1 //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that ARM uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "ARMISelLowering.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMBaseRegisterInfo.h"
17 #include "ARMCallingConv.h"
18 #include "ARMConstantPoolValue.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMPerfectShuffle.h"
21 #include "ARMRegisterInfo.h"
22 #include "ARMSelectionDAGInfo.h"
23 #include "ARMSubtarget.h"
24 #include "MCTargetDesc/ARMAddressingModes.h"
25 #include "MCTargetDesc/ARMBaseInfo.h"
26 #include "Utils/ARMBaseInfo.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/ArrayRef.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/DenseMap.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/ADT/StringRef.h"
38 #include "llvm/ADT/StringSwitch.h"
39 #include "llvm/ADT/Triple.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/CodeGen/CallingConvLower.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/IntrinsicLowering.h"
45 #include "llvm/CodeGen/MachineBasicBlock.h"
46 #include "llvm/CodeGen/MachineConstantPool.h"
47 #include "llvm/CodeGen/MachineFrameInfo.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineInstr.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineJumpTableInfo.h"
52 #include "llvm/CodeGen/MachineMemOperand.h"
53 #include "llvm/CodeGen/MachineOperand.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/RuntimeLibcalls.h"
56 #include "llvm/CodeGen/SelectionDAG.h"
57 #include "llvm/CodeGen/SelectionDAGNodes.h"
58 #include "llvm/CodeGen/TargetInstrInfo.h"
59 #include "llvm/CodeGen/TargetLowering.h"
60 #include "llvm/CodeGen/TargetOpcodes.h"
61 #include "llvm/CodeGen/TargetRegisterInfo.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/CodeGen/ValueTypes.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/CallingConv.h"
66 #include "llvm/IR/Constant.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DebugLoc.h"
70 #include "llvm/IR/DerivedTypes.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GlobalAlias.h"
73 #include "llvm/IR/GlobalValue.h"
74 #include "llvm/IR/GlobalVariable.h"
75 #include "llvm/IR/IRBuilder.h"
76 #include "llvm/IR/InlineAsm.h"
77 #include "llvm/IR/Instruction.h"
78 #include "llvm/IR/Instructions.h"
79 #include "llvm/IR/IntrinsicInst.h"
80 #include "llvm/IR/Intrinsics.h"
81 #include "llvm/IR/Module.h"
82 #include "llvm/IR/PatternMatch.h"
83 #include "llvm/IR/Type.h"
84 #include "llvm/IR/User.h"
85 #include "llvm/IR/Value.h"
86 #include "llvm/MC/MCInstrDesc.h"
87 #include "llvm/MC/MCInstrItineraries.h"
88 #include "llvm/MC/MCRegisterInfo.h"
89 #include "llvm/MC/MCSchedule.h"
90 #include "llvm/Support/AtomicOrdering.h"
91 #include "llvm/Support/BranchProbability.h"
92 #include "llvm/Support/Casting.h"
93 #include "llvm/Support/CodeGen.h"
94 #include "llvm/Support/CommandLine.h"
95 #include "llvm/Support/Compiler.h"
96 #include "llvm/Support/Debug.h"
97 #include "llvm/Support/ErrorHandling.h"
98 #include "llvm/Support/KnownBits.h"
99 #include "llvm/Support/MachineValueType.h"
100 #include "llvm/Support/MathExtras.h"
101 #include "llvm/Support/raw_ostream.h"
102 #include "llvm/Target/TargetMachine.h"
103 #include "llvm/Target/TargetOptions.h"
104 #include <algorithm>
105 #include <cassert>
106 #include <cstdint>
107 #include <cstdlib>
108 #include <iterator>
109 #include <limits>
110 #include <string>
111 #include <tuple>
112 #include <utility>
113 #include <vector>
114 
115 using namespace llvm;
116 using namespace llvm::PatternMatch;
117 
118 #define DEBUG_TYPE "arm-isel"
119 
120 STATISTIC(NumTailCalls, "Number of tail calls");
121 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
122 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
123 STATISTIC(NumConstpoolPromoted,
124   "Number of constants with their storage promoted into constant pools");
125 
126 static cl::opt<bool>
127 ARMInterworking("arm-interworking", cl::Hidden,
128   cl::desc("Enable / disable ARM interworking (for debugging only)"),
129   cl::init(true));
130 
131 static cl::opt<bool> EnableConstpoolPromotion(
132     "arm-promote-constant", cl::Hidden,
133     cl::desc("Enable / disable promotion of unnamed_addr constants into "
134              "constant pools"),
135     cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
136 static cl::opt<unsigned> ConstpoolPromotionMaxSize(
137     "arm-promote-constant-max-size", cl::Hidden,
138     cl::desc("Maximum size of constant to promote into a constant pool"),
139     cl::init(64));
140 static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
141     "arm-promote-constant-max-total", cl::Hidden,
142     cl::desc("Maximum size of ALL constants to promote into a constant pool"),
143     cl::init(128));
144 
145 // The APCS parameter registers.
146 static const MCPhysReg GPRArgRegs[] = {
147   ARM::R0, ARM::R1, ARM::R2, ARM::R3
148 };
149 
150 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
151                                        MVT PromotedBitwiseVT) {
152   if (VT != PromotedLdStVT) {
153     setOperationAction(ISD::LOAD, VT, Promote);
154     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
155 
156     setOperationAction(ISD::STORE, VT, Promote);
157     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
158   }
159 
160   MVT ElemTy = VT.getVectorElementType();
161   if (ElemTy != MVT::f64)
162     setOperationAction(ISD::SETCC, VT, Custom);
163   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
164   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
165   if (ElemTy == MVT::i32) {
166     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
167     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
168     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
169     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
170   } else {
171     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
172     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
173     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
174     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
175   }
176   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
177   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
178   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
179   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
180   setOperationAction(ISD::SELECT,            VT, Expand);
181   setOperationAction(ISD::SELECT_CC,         VT, Expand);
182   setOperationAction(ISD::VSELECT,           VT, Expand);
183   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
184   if (VT.isInteger()) {
185     setOperationAction(ISD::SHL, VT, Custom);
186     setOperationAction(ISD::SRA, VT, Custom);
187     setOperationAction(ISD::SRL, VT, Custom);
188   }
189 
190   // Promote all bit-wise operations.
191   if (VT.isInteger() && VT != PromotedBitwiseVT) {
192     setOperationAction(ISD::AND, VT, Promote);
193     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
194     setOperationAction(ISD::OR,  VT, Promote);
195     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
196     setOperationAction(ISD::XOR, VT, Promote);
197     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
198   }
199 
200   // Neon does not support vector divide/remainder operations.
201   setOperationAction(ISD::SDIV, VT, Expand);
202   setOperationAction(ISD::UDIV, VT, Expand);
203   setOperationAction(ISD::FDIV, VT, Expand);
204   setOperationAction(ISD::SREM, VT, Expand);
205   setOperationAction(ISD::UREM, VT, Expand);
206   setOperationAction(ISD::FREM, VT, Expand);
207 
208   if (!VT.isFloatingPoint() &&
209       VT != MVT::v2i64 && VT != MVT::v1i64)
210     for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
211       setOperationAction(Opcode, VT, Legal);
212 }
213 
214 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
215   addRegisterClass(VT, &ARM::DPRRegClass);
216   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
217 }
218 
219 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
220   addRegisterClass(VT, &ARM::DPairRegClass);
221   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
222 }
223 
224 void ARMTargetLowering::setAllExpand(MVT VT) {
225   for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
226     setOperationAction(Opc, VT, Expand);
227 
228   // We support these really simple operations even on types where all
229   // the actual arithmetic has to be broken down into simpler
230   // operations or turned into library calls.
231   setOperationAction(ISD::BITCAST, VT, Legal);
232   setOperationAction(ISD::LOAD, VT, Legal);
233   setOperationAction(ISD::STORE, VT, Legal);
234   setOperationAction(ISD::UNDEF, VT, Legal);
235 }
236 
237 void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To,
238                                        LegalizeAction Action) {
239   setLoadExtAction(ISD::EXTLOAD,  From, To, Action);
240   setLoadExtAction(ISD::ZEXTLOAD, From, To, Action);
241   setLoadExtAction(ISD::SEXTLOAD, From, To, Action);
242 }
243 
244 void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) {
245   const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 };
246 
247   for (auto VT : IntTypes) {
248     addRegisterClass(VT, &ARM::QPRRegClass);
249     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
250     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
251     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
252     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
253 
254     // No native support for these.
255     setOperationAction(ISD::UDIV, VT, Expand);
256     setOperationAction(ISD::SDIV, VT, Expand);
257     setOperationAction(ISD::UREM, VT, Expand);
258     setOperationAction(ISD::SREM, VT, Expand);
259 
260     if (!HasMVEFP) {
261       setOperationAction(ISD::SINT_TO_FP, VT, Expand);
262       setOperationAction(ISD::UINT_TO_FP, VT, Expand);
263       setOperationAction(ISD::FP_TO_SINT, VT, Expand);
264       setOperationAction(ISD::FP_TO_UINT, VT, Expand);
265     }
266   }
267 
268   const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 };
269   for (auto VT : FloatTypes) {
270     addRegisterClass(VT, &ARM::QPRRegClass);
271     if (!HasMVEFP)
272       setAllExpand(VT);
273 
274     // These are legal or custom whether we have MVE.fp or not
275     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
276     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
277     setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom);
278     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
279     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
280     setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom);
281     setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
282 
283     if (HasMVEFP) {
284       // No native support for these.
285       setOperationAction(ISD::FDIV, VT, Expand);
286       setOperationAction(ISD::FREM, VT, Expand);
287       setOperationAction(ISD::FSQRT, VT, Expand);
288       setOperationAction(ISD::FSIN, VT, Expand);
289       setOperationAction(ISD::FCOS, VT, Expand);
290       setOperationAction(ISD::FPOW, VT, Expand);
291       setOperationAction(ISD::FLOG, VT, Expand);
292       setOperationAction(ISD::FLOG2, VT, Expand);
293       setOperationAction(ISD::FLOG10, VT, Expand);
294       setOperationAction(ISD::FEXP, VT, Expand);
295       setOperationAction(ISD::FEXP2, VT, Expand);
296     }
297   }
298 
299   // We 'support' these types up to bitcast/load/store level, regardless of
300   // MVE integer-only / float support. Only doing FP data processing on the FP
301   // vector types is inhibited at integer-only level.
302   const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 };
303   for (auto VT : LongTypes) {
304     addRegisterClass(VT, &ARM::QPRRegClass);
305     setAllExpand(VT);
306     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
307     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
308     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
309   }
310 
311   // It is legal to extload from v4i8 to v4i16 or v4i32.
312   addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal);
313   addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal);
314   addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal);
315 
316   // Some truncating stores are legal too.
317   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
318   setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
319   setTruncStoreAction(MVT::v8i16, MVT::v8i8,  Legal);
320 }
321 
322 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
323                                      const ARMSubtarget &STI)
324     : TargetLowering(TM), Subtarget(&STI) {
325   RegInfo = Subtarget->getRegisterInfo();
326   Itins = Subtarget->getInstrItineraryData();
327 
328   setBooleanContents(ZeroOrOneBooleanContent);
329   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
330 
331   if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() &&
332       !Subtarget->isTargetWatchOS()) {
333     bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard;
334     for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
335       setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID),
336                             IsHFTarget ? CallingConv::ARM_AAPCS_VFP
337                                        : CallingConv::ARM_AAPCS);
338   }
339 
340   if (Subtarget->isTargetMachO()) {
341     // Uses VFP for Thumb libfuncs if available.
342     if (Subtarget->isThumb() && Subtarget->hasVFP2Base() &&
343         Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
344       static const struct {
345         const RTLIB::Libcall Op;
346         const char * const Name;
347         const ISD::CondCode Cond;
348       } LibraryCalls[] = {
349         // Single-precision floating-point arithmetic.
350         { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
351         { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
352         { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
353         { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },
354 
355         // Double-precision floating-point arithmetic.
356         { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
357         { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
358         { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
359         { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },
360 
361         // Single-precision comparisons.
362         { RTLIB::OEQ_F32, "__eqsf2vfp",    ISD::SETNE },
363         { RTLIB::UNE_F32, "__nesf2vfp",    ISD::SETNE },
364         { RTLIB::OLT_F32, "__ltsf2vfp",    ISD::SETNE },
365         { RTLIB::OLE_F32, "__lesf2vfp",    ISD::SETNE },
366         { RTLIB::OGE_F32, "__gesf2vfp",    ISD::SETNE },
367         { RTLIB::OGT_F32, "__gtsf2vfp",    ISD::SETNE },
368         { RTLIB::UO_F32,  "__unordsf2vfp", ISD::SETNE },
369         { RTLIB::O_F32,   "__unordsf2vfp", ISD::SETEQ },
370 
371         // Double-precision comparisons.
372         { RTLIB::OEQ_F64, "__eqdf2vfp",    ISD::SETNE },
373         { RTLIB::UNE_F64, "__nedf2vfp",    ISD::SETNE },
374         { RTLIB::OLT_F64, "__ltdf2vfp",    ISD::SETNE },
375         { RTLIB::OLE_F64, "__ledf2vfp",    ISD::SETNE },
376         { RTLIB::OGE_F64, "__gedf2vfp",    ISD::SETNE },
377         { RTLIB::OGT_F64, "__gtdf2vfp",    ISD::SETNE },
378         { RTLIB::UO_F64,  "__unorddf2vfp", ISD::SETNE },
379         { RTLIB::O_F64,   "__unorddf2vfp", ISD::SETEQ },
380 
381         // Floating-point to integer conversions.
382         // i64 conversions are done via library routines even when generating VFP
383         // instructions, so use the same ones.
384         { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp",    ISD::SETCC_INVALID },
385         { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
386         { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp",    ISD::SETCC_INVALID },
387         { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },
388 
389         // Conversions between floating types.
390         { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp",  ISD::SETCC_INVALID },
391         { RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp", ISD::SETCC_INVALID },
392 
393         // Integer to floating-point conversions.
394         // i64 conversions are done via library routines even when generating VFP
395         // instructions, so use the same ones.
396         // FIXME: There appears to be some naming inconsistency in ARM libgcc:
397         // e.g., __floatunsidf vs. __floatunssidfvfp.
398         { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp",    ISD::SETCC_INVALID },
399         { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
400         { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp",    ISD::SETCC_INVALID },
401         { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
402       };
403 
404       for (const auto &LC : LibraryCalls) {
405         setLibcallName(LC.Op, LC.Name);
406         if (LC.Cond != ISD::SETCC_INVALID)
407           setCmpLibcallCC(LC.Op, LC.Cond);
408       }
409     }
410   }
411 
412   // These libcalls are not available in 32-bit.
413   setLibcallName(RTLIB::SHL_I128, nullptr);
414   setLibcallName(RTLIB::SRL_I128, nullptr);
415   setLibcallName(RTLIB::SRA_I128, nullptr);
416 
417   // RTLIB
418   if (Subtarget->isAAPCS_ABI() &&
419       (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
420        Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
421     static const struct {
422       const RTLIB::Libcall Op;
423       const char * const Name;
424       const CallingConv::ID CC;
425       const ISD::CondCode Cond;
426     } LibraryCalls[] = {
427       // Double-precision floating-point arithmetic helper functions
428       // RTABI chapter 4.1.2, Table 2
429       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
430       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
431       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
432       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
433 
434       // Double-precision floating-point comparison helper functions
435       // RTABI chapter 4.1.2, Table 3
436       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
437       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
438       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
439       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
440       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
441       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
442       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
443       { RTLIB::O_F64,   "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
444 
445       // Single-precision floating-point arithmetic helper functions
446       // RTABI chapter 4.1.2, Table 4
447       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
448       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
449       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
450       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
451 
452       // Single-precision floating-point comparison helper functions
453       // RTABI chapter 4.1.2, Table 5
454       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
455       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
456       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
457       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
458       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
459       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
460       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
461       { RTLIB::O_F32,   "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
462 
463       // Floating-point to integer conversions.
464       // RTABI chapter 4.1.2, Table 6
465       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
466       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
467       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
468       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
469       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
470       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
471       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
472       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
473 
474       // Conversions between floating types.
475       // RTABI chapter 4.1.2, Table 7
476       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
477       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
478       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
479 
480       // Integer to floating-point conversions.
481       // RTABI chapter 4.1.2, Table 8
482       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
483       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
484       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
485       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
486       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
487       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
488       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
489       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
490 
491       // Long long helper functions
492       // RTABI chapter 4.2, Table 9
493       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
494       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
495       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
496       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
497 
498       // Integer division functions
499       // RTABI chapter 4.3.1
500       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
501       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
502       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
503       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
504       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
505       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
506       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
507       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
508     };
509 
510     for (const auto &LC : LibraryCalls) {
511       setLibcallName(LC.Op, LC.Name);
512       setLibcallCallingConv(LC.Op, LC.CC);
513       if (LC.Cond != ISD::SETCC_INVALID)
514         setCmpLibcallCC(LC.Op, LC.Cond);
515     }
516 
517     // EABI dependent RTLIB
518     if (TM.Options.EABIVersion == EABI::EABI4 ||
519         TM.Options.EABIVersion == EABI::EABI5) {
520       static const struct {
521         const RTLIB::Libcall Op;
522         const char *const Name;
523         const CallingConv::ID CC;
524         const ISD::CondCode Cond;
525       } MemOpsLibraryCalls[] = {
526         // Memory operations
527         // RTABI chapter 4.3.4
528         { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
529         { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
530         { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
531       };
532 
533       for (const auto &LC : MemOpsLibraryCalls) {
534         setLibcallName(LC.Op, LC.Name);
535         setLibcallCallingConv(LC.Op, LC.CC);
536         if (LC.Cond != ISD::SETCC_INVALID)
537           setCmpLibcallCC(LC.Op, LC.Cond);
538       }
539     }
540   }
541 
542   if (Subtarget->isTargetWindows()) {
543     static const struct {
544       const RTLIB::Libcall Op;
545       const char * const Name;
546       const CallingConv::ID CC;
547     } LibraryCalls[] = {
548       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
549       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
550       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
551       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
552       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
553       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
554       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
555       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
556     };
557 
558     for (const auto &LC : LibraryCalls) {
559       setLibcallName(LC.Op, LC.Name);
560       setLibcallCallingConv(LC.Op, LC.CC);
561     }
562   }
563 
564   // Use divmod compiler-rt calls for iOS 5.0 and later.
565   if (Subtarget->isTargetMachO() &&
566       !(Subtarget->isTargetIOS() &&
567         Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
568     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
569     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
570   }
571 
572   // The half <-> float conversion functions are always soft-float on
573   // non-watchos platforms, but are needed for some targets which use a
574   // hard-float calling convention by default.
575   if (!Subtarget->isTargetWatchABI()) {
576     if (Subtarget->isAAPCS_ABI()) {
577       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
578       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
579       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
580     } else {
581       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
582       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
583       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
584     }
585   }
586 
587   // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
588   // a __gnu_ prefix (which is the default).
589   if (Subtarget->isTargetAEABI()) {
590     static const struct {
591       const RTLIB::Libcall Op;
592       const char * const Name;
593       const CallingConv::ID CC;
594     } LibraryCalls[] = {
595       { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS },
596       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS },
597       { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS },
598     };
599 
600     for (const auto &LC : LibraryCalls) {
601       setLibcallName(LC.Op, LC.Name);
602       setLibcallCallingConv(LC.Op, LC.CC);
603     }
604   }
605 
606   if (Subtarget->isThumb1Only())
607     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
608   else
609     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
610 
611   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() &&
612       Subtarget->hasFPRegs()) {
613     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
614     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
615     if (!Subtarget->hasVFP2Base())
616       setAllExpand(MVT::f32);
617     if (!Subtarget->hasFP64())
618       setAllExpand(MVT::f64);
619   }
620 
621   if (Subtarget->hasFullFP16()) {
622     addRegisterClass(MVT::f16, &ARM::HPRRegClass);
623     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
624     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
625     setOperationAction(ISD::BITCAST, MVT::f16, Custom);
626 
627     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
628     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
629   }
630 
631   for (MVT VT : MVT::vector_valuetypes()) {
632     for (MVT InnerVT : MVT::vector_valuetypes()) {
633       setTruncStoreAction(VT, InnerVT, Expand);
634       addAllExtLoads(VT, InnerVT, Expand);
635     }
636 
637     setOperationAction(ISD::MULHS, VT, Expand);
638     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
639     setOperationAction(ISD::MULHU, VT, Expand);
640     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
641 
642     setOperationAction(ISD::BSWAP, VT, Expand);
643   }
644 
645   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
646   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
647 
648   setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
649   setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
650 
651   if (Subtarget->hasMVEIntegerOps())
652     addMVEVectorTypes(Subtarget->hasMVEFloatOps());
653 
654   // Combine low-overhead loop intrinsics so that we can lower i1 types.
655   if (Subtarget->hasLOB())
656     setTargetDAGCombine(ISD::BRCOND);
657 
658   if (Subtarget->hasNEON()) {
659     addDRTypeForNEON(MVT::v2f32);
660     addDRTypeForNEON(MVT::v8i8);
661     addDRTypeForNEON(MVT::v4i16);
662     addDRTypeForNEON(MVT::v2i32);
663     addDRTypeForNEON(MVT::v1i64);
664 
665     addQRTypeForNEON(MVT::v4f32);
666     addQRTypeForNEON(MVT::v2f64);
667     addQRTypeForNEON(MVT::v16i8);
668     addQRTypeForNEON(MVT::v8i16);
669     addQRTypeForNEON(MVT::v4i32);
670     addQRTypeForNEON(MVT::v2i64);
671 
672     if (Subtarget->hasFullFP16()) {
673       addQRTypeForNEON(MVT::v8f16);
674       addDRTypeForNEON(MVT::v4f16);
675     }
676   }
677 
678   if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
679     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
680     // none of Neon, MVE or VFP supports any arithmetic operations on it.
681     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
682     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
683     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
684     // FIXME: Code duplication: FDIV and FREM are expanded always, see
685     // ARMTargetLowering::addTypeForNEON method for details.
686     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
687     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
688     // FIXME: Create unittest.
689     // In another words, find a way when "copysign" appears in DAG with vector
690     // operands.
691     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
692     // FIXME: Code duplication: SETCC has custom operation action, see
693     // ARMTargetLowering::addTypeForNEON method for details.
694     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
695     // FIXME: Create unittest for FNEG and for FABS.
696     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
697     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
698     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
699     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
700     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
701     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
702     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
703     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
704     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
705     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
706     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
707     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
708     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
709     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
710     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
711     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
712     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
713     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
714   }
715 
716   if (Subtarget->hasNEON()) {
717     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
718     // supported for v4f32.
719     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
720     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
721     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
722     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
723     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
724     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
725     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
726     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
727     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
728     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
729     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
730     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
731     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
732     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
733 
734     // Mark v2f32 intrinsics.
735     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
736     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
737     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
738     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
739     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
740     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
741     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
742     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
743     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
744     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
745     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
746     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
747     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
748     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
749 
750     // Neon does not support some operations on v1i64 and v2i64 types.
751     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
752     // Custom handling for some quad-vector types to detect VMULL.
753     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
754     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
755     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
756     // Custom handling for some vector types to avoid expensive expansions
757     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
758     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
759     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
760     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
761     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
762     // a destination type that is wider than the source, and nor does
763     // it have a FP_TO_[SU]INT instruction with a narrower destination than
764     // source.
765     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
766     setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
767     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
768     setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
769     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
770     setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
771     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
772     setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
773 
774     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
775     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
776 
777     // NEON does not have single instruction CTPOP for vectors with element
778     // types wider than 8-bits.  However, custom lowering can leverage the
779     // v8i8/v16i8 vcnt instruction.
780     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
781     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
782     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
783     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
784     setOperationAction(ISD::CTPOP,      MVT::v1i64, Custom);
785     setOperationAction(ISD::CTPOP,      MVT::v2i64, Custom);
786 
787     setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
788     setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);
789 
790     // NEON does not have single instruction CTTZ for vectors.
791     setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
792     setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
793     setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
794     setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
795 
796     setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
797     setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
798     setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
799     setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);
800 
801     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
802     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
803     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
804     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);
805 
806     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
807     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
808     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
809     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
810 
811     // NEON only has FMA instructions as of VFP4.
812     if (!Subtarget->hasVFP4Base()) {
813       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
814       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
815     }
816 
817     setTargetDAGCombine(ISD::INTRINSIC_VOID);
818     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
819     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
820     setTargetDAGCombine(ISD::SHL);
821     setTargetDAGCombine(ISD::SRL);
822     setTargetDAGCombine(ISD::SRA);
823     setTargetDAGCombine(ISD::SIGN_EXTEND);
824     setTargetDAGCombine(ISD::ZERO_EXTEND);
825     setTargetDAGCombine(ISD::ANY_EXTEND);
826     setTargetDAGCombine(ISD::STORE);
827     setTargetDAGCombine(ISD::FP_TO_SINT);
828     setTargetDAGCombine(ISD::FP_TO_UINT);
829     setTargetDAGCombine(ISD::FDIV);
830     setTargetDAGCombine(ISD::LOAD);
831 
832     // It is legal to extload from v4i8 to v4i16 or v4i32.
833     for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
834                    MVT::v2i32}) {
835       for (MVT VT : MVT::integer_vector_valuetypes()) {
836         setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
837         setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
838         setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
839       }
840     }
841   }
842 
843   if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
844     setTargetDAGCombine(ISD::BUILD_VECTOR);
845     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
846     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
847   }
848 
849   if (!Subtarget->hasFP64()) {
850     // When targeting a floating-point unit with only single-precision
851     // operations, f64 is legal for the few double-precision instructions which
852     // are present However, no double-precision operations other than moves,
853     // loads and stores are provided by the hardware.
854     setOperationAction(ISD::FADD,       MVT::f64, Expand);
855     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
856     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
857     setOperationAction(ISD::FMA,        MVT::f64, Expand);
858     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
859     setOperationAction(ISD::FREM,       MVT::f64, Expand);
860     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
861     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
862     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
863     setOperationAction(ISD::FABS,       MVT::f64, Expand);
864     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
865     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
866     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
867     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
868     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
869     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
870     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
871     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
872     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
873     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
874     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
875     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
876     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
877     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
878     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
879     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
880     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
881     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
882     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
883     setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
884     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
885   }
886 
887   if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()){
888     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
889     setOperationAction(ISD::FP_ROUND,  MVT::f16, Custom);
890   }
891 
892   if (!Subtarget->hasFP16())
893     setOperationAction(ISD::FP_EXTEND,  MVT::f32, Custom);
894 
895   if (!Subtarget->hasFP64())
896     setOperationAction(ISD::FP_ROUND,  MVT::f32, Custom);
897 
898   computeRegisterProperties(Subtarget->getRegisterInfo());
899 
900   // ARM does not have floating-point extending loads.
901   for (MVT VT : MVT::fp_valuetypes()) {
902     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
903     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
904   }
905 
906   // ... or truncating stores
907   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
908   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
909   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
910 
911   // ARM does not have i1 sign extending load.
912   for (MVT VT : MVT::integer_valuetypes())
913     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
914 
915   // ARM supports all 4 flavors of integer indexed load / store.
916   if (!Subtarget->isThumb1Only()) {
917     for (unsigned im = (unsigned)ISD::PRE_INC;
918          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
919       setIndexedLoadAction(im,  MVT::i1,  Legal);
920       setIndexedLoadAction(im,  MVT::i8,  Legal);
921       setIndexedLoadAction(im,  MVT::i16, Legal);
922       setIndexedLoadAction(im,  MVT::i32, Legal);
923       setIndexedStoreAction(im, MVT::i1,  Legal);
924       setIndexedStoreAction(im, MVT::i8,  Legal);
925       setIndexedStoreAction(im, MVT::i16, Legal);
926       setIndexedStoreAction(im, MVT::i32, Legal);
927     }
928   } else {
929     // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
930     setIndexedLoadAction(ISD::POST_INC, MVT::i32,  Legal);
931     setIndexedStoreAction(ISD::POST_INC, MVT::i32,  Legal);
932   }
933 
934   setOperationAction(ISD::SADDO, MVT::i32, Custom);
935   setOperationAction(ISD::UADDO, MVT::i32, Custom);
936   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
937   setOperationAction(ISD::USUBO, MVT::i32, Custom);
938 
939   setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
940   setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
941 
942   // i64 operation support.
943   setOperationAction(ISD::MUL,     MVT::i64, Expand);
944   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
945   if (Subtarget->isThumb1Only()) {
946     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
947     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
948   }
949   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
950       || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
951     setOperationAction(ISD::MULHS, MVT::i32, Expand);
952 
953   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
954   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
955   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
956   setOperationAction(ISD::SRL,       MVT::i64, Custom);
957   setOperationAction(ISD::SRA,       MVT::i64, Custom);
958   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
959 
960   // MVE lowers 64 bit shifts to lsll and lsrl
961   // assuming that ISD::SRL and SRA of i64 are already marked custom
962   if (Subtarget->hasMVEIntegerOps())
963     setOperationAction(ISD::SHL, MVT::i64, Custom);
964 
965   // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1.
966   if (Subtarget->isThumb1Only()) {
967     setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
968     setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
969     setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
970   }
971 
972   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
973     setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
974 
975   // ARM does not have ROTL.
976   setOperationAction(ISD::ROTL, MVT::i32, Expand);
977   for (MVT VT : MVT::vector_valuetypes()) {
978     setOperationAction(ISD::ROTL, VT, Expand);
979     setOperationAction(ISD::ROTR, VT, Expand);
980   }
981   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
982   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
983   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) {
984     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
985     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall);
986   }
987 
988   // @llvm.readcyclecounter requires the Performance Monitors extension.
989   // Default to the 0 expansion on unsupported platforms.
990   // FIXME: Technically there are older ARM CPUs that have
991   // implementation-specific ways of obtaining this information.
992   if (Subtarget->hasPerfMon())
993     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
994 
995   // Only ARMv6 has BSWAP.
996   if (!Subtarget->hasV6Ops())
997     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
998 
999   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
1000                                         : Subtarget->hasDivideInARMMode();
1001   if (!hasDivide) {
1002     // These are expanded into libcalls if the cpu doesn't have HW divider.
1003     setOperationAction(ISD::SDIV,  MVT::i32, LibCall);
1004     setOperationAction(ISD::UDIV,  MVT::i32, LibCall);
1005   }
1006 
1007   if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
1008     setOperationAction(ISD::SDIV, MVT::i32, Custom);
1009     setOperationAction(ISD::UDIV, MVT::i32, Custom);
1010 
1011     setOperationAction(ISD::SDIV, MVT::i64, Custom);
1012     setOperationAction(ISD::UDIV, MVT::i64, Custom);
1013   }
1014 
1015   setOperationAction(ISD::SREM,  MVT::i32, Expand);
1016   setOperationAction(ISD::UREM,  MVT::i32, Expand);
1017 
1018   // Register based DivRem for AEABI (RTABI 4.2)
1019   if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
1020       Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
1021       Subtarget->isTargetWindows()) {
1022     setOperationAction(ISD::SREM, MVT::i64, Custom);
1023     setOperationAction(ISD::UREM, MVT::i64, Custom);
1024     HasStandaloneRem = false;
1025 
1026     if (Subtarget->isTargetWindows()) {
1027       const struct {
1028         const RTLIB::Libcall Op;
1029         const char * const Name;
1030         const CallingConv::ID CC;
1031       } LibraryCalls[] = {
1032         { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS },
1033         { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS },
1034         { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS },
1035         { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS },
1036 
1037         { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS },
1038         { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS },
1039         { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS },
1040         { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS },
1041       };
1042 
1043       for (const auto &LC : LibraryCalls) {
1044         setLibcallName(LC.Op, LC.Name);
1045         setLibcallCallingConv(LC.Op, LC.CC);
1046       }
1047     } else {
1048       const struct {
1049         const RTLIB::Libcall Op;
1050         const char * const Name;
1051         const CallingConv::ID CC;
1052       } LibraryCalls[] = {
1053         { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1054         { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1055         { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1056         { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS },
1057 
1058         { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1059         { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1060         { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1061         { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS },
1062       };
1063 
1064       for (const auto &LC : LibraryCalls) {
1065         setLibcallName(LC.Op, LC.Name);
1066         setLibcallCallingConv(LC.Op, LC.CC);
1067       }
1068     }
1069 
1070     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
1071     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
1072     setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
1073     setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
1074   } else {
1075     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1076     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1077   }
1078 
1079   if (Subtarget->isTargetWindows() && Subtarget->getTargetTriple().isOSMSVCRT())
1080     for (auto &VT : {MVT::f32, MVT::f64})
1081       setOperationAction(ISD::FPOWI, VT, Custom);
1082 
1083   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
1084   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
1085   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1086   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1087 
1088   setOperationAction(ISD::TRAP, MVT::Other, Legal);
1089   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
1090 
1091   // Use the default implementation.
1092   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
1093   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
1094   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
1095   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
1096   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
1097   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
1098 
1099   if (Subtarget->isTargetWindows())
1100     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1101   else
1102     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
1103 
1104   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
1105   // the default expansion.
1106   InsertFencesForAtomic = false;
1107   if (Subtarget->hasAnyDataBarrier() &&
1108       (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1109     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
1110     // to ldrex/strex loops already.
1111     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
1112     if (!Subtarget->isThumb() || !Subtarget->isMClass())
1113       setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Custom);
1114 
1115     // On v8, we have particularly efficient implementations of atomic fences
1116     // if they can be combined with nearby atomic loads and stores.
1117     if (!Subtarget->hasAcquireRelease() ||
1118         getTargetMachine().getOptLevel() == 0) {
1119       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
1120       InsertFencesForAtomic = true;
1121     }
1122   } else {
1123     // If there's anything we can use as a barrier, go through custom lowering
1124     // for ATOMIC_FENCE.
1125     // If target has DMB in thumb, Fences can be inserted.
1126     if (Subtarget->hasDataBarrier())
1127       InsertFencesForAtomic = true;
1128 
1129     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
1130                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
1131 
1132     // Set them all for expansion, which will force libcalls.
1133     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
1134     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
1135     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
1136     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
1137     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
1138     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
1139     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
1140     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
1141     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
1142     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
1143     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
1144     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
1145     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
1146     // Unordered/Monotonic case.
1147     if (!InsertFencesForAtomic) {
1148       setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1149       setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1150     }
1151   }
1152 
1153   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
1154 
1155   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1156   if (!Subtarget->hasV6Ops()) {
1157     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1158     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
1159   }
1160   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1161 
1162   if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
1163       !Subtarget->isThumb1Only()) {
1164     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1165     // iff target supports vfp2.
1166     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1167     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
1168   }
1169 
1170   // We want to custom lower some of our intrinsics.
1171   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1172   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1173   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1174   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
1175   if (Subtarget->useSjLjEH())
1176     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
1177 
1178   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
1179   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
1180   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
1181   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
1182   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
1183   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
1184   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1185   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1186   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1187   if (Subtarget->hasFullFP16()) {
1188     setOperationAction(ISD::SETCC,     MVT::f16, Expand);
1189     setOperationAction(ISD::SELECT,    MVT::f16, Custom);
1190     setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
1191   }
1192 
1193   setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom);
1194 
1195   setOperationAction(ISD::BRCOND,    MVT::Other, Custom);
1196   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
1197   if (Subtarget->hasFullFP16())
1198       setOperationAction(ISD::BR_CC, MVT::f16,   Custom);
1199   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
1200   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
1201   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
1202 
1203   // We don't support sin/cos/fmod/copysign/pow
1204   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
1205   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
1206   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
1207   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
1208   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
1209   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
1210   setOperationAction(ISD::FREM,      MVT::f64, Expand);
1211   setOperationAction(ISD::FREM,      MVT::f32, Expand);
1212   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() &&
1213       !Subtarget->isThumb1Only()) {
1214     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
1215     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
1216   }
1217   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
1218   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
1219 
1220   if (!Subtarget->hasVFP4Base()) {
1221     setOperationAction(ISD::FMA, MVT::f64, Expand);
1222     setOperationAction(ISD::FMA, MVT::f32, Expand);
1223   }
1224 
1225   // Various VFP goodness
1226   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
1227     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1228     if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) {
1229       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1230       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1231     }
1232 
1233     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1234     if (!Subtarget->hasFP16()) {
1235       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1236       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1237     }
1238   }
1239 
1240   // Use __sincos_stret if available.
1241   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1242       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1243     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1244     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1245   }
1246 
1247   // FP-ARMv8 implements a lot of rounding-like FP operations.
1248   if (Subtarget->hasFPARMv8Base()) {
1249     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1250     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1251     setOperationAction(ISD::FROUND, MVT::f32, Legal);
1252     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1253     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1254     setOperationAction(ISD::FRINT, MVT::f32, Legal);
1255     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1256     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1257     setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
1258     setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
1259     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1260     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1261 
1262     if (Subtarget->hasFP64()) {
1263       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1264       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1265       setOperationAction(ISD::FROUND, MVT::f64, Legal);
1266       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1267       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1268       setOperationAction(ISD::FRINT, MVT::f64, Legal);
1269       setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1270       setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1271     }
1272   }
1273 
1274   // FP16 often need to be promoted to call lib functions
1275   if (Subtarget->hasFullFP16()) {
1276     setOperationAction(ISD::FREM, MVT::f16, Promote);
1277     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
1278     setOperationAction(ISD::FSIN, MVT::f16, Promote);
1279     setOperationAction(ISD::FCOS, MVT::f16, Promote);
1280     setOperationAction(ISD::FSINCOS, MVT::f16, Promote);
1281     setOperationAction(ISD::FPOWI, MVT::f16, Promote);
1282     setOperationAction(ISD::FPOW, MVT::f16, Promote);
1283     setOperationAction(ISD::FEXP, MVT::f16, Promote);
1284     setOperationAction(ISD::FEXP2, MVT::f16, Promote);
1285     setOperationAction(ISD::FLOG, MVT::f16, Promote);
1286     setOperationAction(ISD::FLOG10, MVT::f16, Promote);
1287     setOperationAction(ISD::FLOG2, MVT::f16, Promote);
1288 
1289     setOperationAction(ISD::FROUND, MVT::f16, Legal);
1290   }
1291 
1292   if (Subtarget->hasNEON()) {
1293     // vmin and vmax aren't available in a scalar form, so we use
1294     // a NEON instruction with an undef lane instead.
1295     setOperationAction(ISD::FMINIMUM, MVT::f16, Legal);
1296     setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal);
1297     setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
1298     setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
1299     setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal);
1300     setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal);
1301     setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
1302     setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
1303 
1304     if (Subtarget->hasFullFP16()) {
1305       setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal);
1306       setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal);
1307       setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal);
1308       setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal);
1309 
1310       setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal);
1311       setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal);
1312       setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal);
1313       setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal);
1314     }
1315   }
1316 
1317   // We have target-specific dag combine patterns for the following nodes:
1318   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
1319   setTargetDAGCombine(ISD::ADD);
1320   setTargetDAGCombine(ISD::SUB);
1321   setTargetDAGCombine(ISD::MUL);
1322   setTargetDAGCombine(ISD::AND);
1323   setTargetDAGCombine(ISD::OR);
1324   setTargetDAGCombine(ISD::XOR);
1325 
1326   if (Subtarget->hasV6Ops())
1327     setTargetDAGCombine(ISD::SRL);
1328   if (Subtarget->isThumb1Only())
1329     setTargetDAGCombine(ISD::SHL);
1330 
1331   setStackPointerRegisterToSaveRestore(ARM::SP);
1332 
1333   if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
1334       !Subtarget->hasVFP2Base() || Subtarget->hasMinSize())
1335     setSchedulingPreference(Sched::RegPressure);
1336   else
1337     setSchedulingPreference(Sched::Hybrid);
1338 
1339   //// temporary - rewrite interface to use type
1340   MaxStoresPerMemset = 8;
1341   MaxStoresPerMemsetOptSize = 4;
1342   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
1343   MaxStoresPerMemcpyOptSize = 2;
1344   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
1345   MaxStoresPerMemmoveOptSize = 2;
1346 
1347   // On ARM arguments smaller than 4 bytes are extended, so all arguments
1348   // are at least 4 bytes aligned.
1349   setMinStackArgumentAlignment(4);
1350 
1351   // Prefer likely predicted branches to selects on out-of-order cores.
1352   PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
1353 
1354   setPrefLoopAlignment(Subtarget->getPrefLoopAlignment());
1355 
1356   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
1357 
1358   if (Subtarget->isThumb() || Subtarget->isThumb2())
1359     setTargetDAGCombine(ISD::ABS);
1360 }
1361 
1362 bool ARMTargetLowering::useSoftFloat() const {
1363   return Subtarget->useSoftFloat();
1364 }
1365 
1366 // FIXME: It might make sense to define the representative register class as the
1367 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1368 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1369 // SPR's representative would be DPR_VFP2. This should work well if register
1370 // pressure tracking were modified such that a register use would increment the
1371 // pressure of the register class's representative and all of it's super
1372 // classes' representatives transitively. We have not implemented this because
1373 // of the difficulty prior to coalescing of modeling operand register classes
1374 // due to the common occurrence of cross class copies and subregister insertions
1375 // and extractions.
1376 std::pair<const TargetRegisterClass *, uint8_t>
1377 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1378                                            MVT VT) const {
1379   const TargetRegisterClass *RRC = nullptr;
1380   uint8_t Cost = 1;
1381   switch (VT.SimpleTy) {
1382   default:
1383     return TargetLowering::findRepresentativeClass(TRI, VT);
1384   // Use DPR as representative register class for all floating point
1385   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1386   // the cost is 1 for both f32 and f64.
1387   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
1388   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
1389     RRC = &ARM::DPRRegClass;
1390     // When NEON is used for SP, only half of the register file is available
1391     // because operations that define both SP and DP results will be constrained
1392     // to the VFP2 class (D0-D15). We currently model this constraint prior to
1393     // coalescing by double-counting the SP regs. See the FIXME above.
1394     if (Subtarget->useNEONForSinglePrecisionFP())
1395       Cost = 2;
1396     break;
1397   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1398   case MVT::v4f32: case MVT::v2f64:
1399     RRC = &ARM::DPRRegClass;
1400     Cost = 2;
1401     break;
1402   case MVT::v4i64:
1403     RRC = &ARM::DPRRegClass;
1404     Cost = 4;
1405     break;
1406   case MVT::v8i64:
1407     RRC = &ARM::DPRRegClass;
1408     Cost = 8;
1409     break;
1410   }
1411   return std::make_pair(RRC, Cost);
1412 }
1413 
1414 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
1415   switch ((ARMISD::NodeType)Opcode) {
1416   case ARMISD::FIRST_NUMBER:  break;
1417   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
1418   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
1419   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
1420   case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
1421   case ARMISD::CALL:          return "ARMISD::CALL";
1422   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
1423   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
1424   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
1425   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
1426   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
1427   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
1428   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
1429   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
1430   case ARMISD::CMP:           return "ARMISD::CMP";
1431   case ARMISD::CMN:           return "ARMISD::CMN";
1432   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
1433   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
1434   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
1435   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
1436   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
1437 
1438   case ARMISD::CMOV:          return "ARMISD::CMOV";
1439   case ARMISD::SUBS:          return "ARMISD::SUBS";
1440 
1441   case ARMISD::SSAT:          return "ARMISD::SSAT";
1442   case ARMISD::USAT:          return "ARMISD::USAT";
1443 
1444   case ARMISD::ASRL:          return "ARMISD::ASRL";
1445   case ARMISD::LSRL:          return "ARMISD::LSRL";
1446   case ARMISD::LSLL:          return "ARMISD::LSLL";
1447 
1448   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
1449   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
1450   case ARMISD::RRX:           return "ARMISD::RRX";
1451 
1452   case ARMISD::ADDC:          return "ARMISD::ADDC";
1453   case ARMISD::ADDE:          return "ARMISD::ADDE";
1454   case ARMISD::SUBC:          return "ARMISD::SUBC";
1455   case ARMISD::SUBE:          return "ARMISD::SUBE";
1456 
1457   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
1458   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
1459   case ARMISD::VMOVhr:        return "ARMISD::VMOVhr";
1460   case ARMISD::VMOVrh:        return "ARMISD::VMOVrh";
1461   case ARMISD::VMOVSR:        return "ARMISD::VMOVSR";
1462 
1463   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
1464   case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
1465   case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
1466 
1467   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
1468 
1469   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
1470 
1471   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
1472 
1473   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
1474 
1475   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
1476 
1477   case ARMISD::WIN__CHKSTK:   return "ARMISD::WIN__CHKSTK";
1478   case ARMISD::WIN__DBZCHK:   return "ARMISD::WIN__DBZCHK";
1479 
1480   case ARMISD::VCEQ:          return "ARMISD::VCEQ";
1481   case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
1482   case ARMISD::VCGE:          return "ARMISD::VCGE";
1483   case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
1484   case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
1485   case ARMISD::VCGEU:         return "ARMISD::VCGEU";
1486   case ARMISD::VCGT:          return "ARMISD::VCGT";
1487   case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
1488   case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
1489   case ARMISD::VCGTU:         return "ARMISD::VCGTU";
1490   case ARMISD::VTST:          return "ARMISD::VTST";
1491 
1492   case ARMISD::VSHL:          return "ARMISD::VSHL";
1493   case ARMISD::VSHRs:         return "ARMISD::VSHRs";
1494   case ARMISD::VSHRu:         return "ARMISD::VSHRu";
1495   case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
1496   case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
1497   case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
1498   case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
1499   case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
1500   case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
1501   case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
1502   case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
1503   case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
1504   case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
1505   case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
1506   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
1507   case ARMISD::VSLI:          return "ARMISD::VSLI";
1508   case ARMISD::VSRI:          return "ARMISD::VSRI";
1509   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
1510   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
1511   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
1512   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
1513   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
1514   case ARMISD::VDUP:          return "ARMISD::VDUP";
1515   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
1516   case ARMISD::VEXT:          return "ARMISD::VEXT";
1517   case ARMISD::VREV64:        return "ARMISD::VREV64";
1518   case ARMISD::VREV32:        return "ARMISD::VREV32";
1519   case ARMISD::VREV16:        return "ARMISD::VREV16";
1520   case ARMISD::VZIP:          return "ARMISD::VZIP";
1521   case ARMISD::VUZP:          return "ARMISD::VUZP";
1522   case ARMISD::VTRN:          return "ARMISD::VTRN";
1523   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
1524   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
1525   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
1526   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
1527   case ARMISD::UMAAL:         return "ARMISD::UMAAL";
1528   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
1529   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
1530   case ARMISD::SMLALBB:       return "ARMISD::SMLALBB";
1531   case ARMISD::SMLALBT:       return "ARMISD::SMLALBT";
1532   case ARMISD::SMLALTB:       return "ARMISD::SMLALTB";
1533   case ARMISD::SMLALTT:       return "ARMISD::SMLALTT";
1534   case ARMISD::SMULWB:        return "ARMISD::SMULWB";
1535   case ARMISD::SMULWT:        return "ARMISD::SMULWT";
1536   case ARMISD::SMLALD:        return "ARMISD::SMLALD";
1537   case ARMISD::SMLALDX:       return "ARMISD::SMLALDX";
1538   case ARMISD::SMLSLD:        return "ARMISD::SMLSLD";
1539   case ARMISD::SMLSLDX:       return "ARMISD::SMLSLDX";
1540   case ARMISD::SMMLAR:        return "ARMISD::SMMLAR";
1541   case ARMISD::SMMLSR:        return "ARMISD::SMMLSR";
1542   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
1543   case ARMISD::BFI:           return "ARMISD::BFI";
1544   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
1545   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
1546   case ARMISD::VBSL:          return "ARMISD::VBSL";
1547   case ARMISD::MEMCPY:        return "ARMISD::MEMCPY";
1548   case ARMISD::VLD1DUP:       return "ARMISD::VLD1DUP";
1549   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
1550   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
1551   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
1552   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
1553   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
1554   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
1555   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
1556   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
1557   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
1558   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
1559   case ARMISD::VLD1DUP_UPD:   return "ARMISD::VLD1DUP_UPD";
1560   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
1561   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
1562   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
1563   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
1564   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
1565   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
1566   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
1567   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
1568   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
1569   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
1570   case ARMISD::WLS:           return "ARMISD::WLS";
1571   }
1572   return nullptr;
1573 }
1574 
1575 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1576                                           EVT VT) const {
1577   if (!VT.isVector())
1578     return getPointerTy(DL);
1579   return VT.changeVectorElementTypeToInteger();
1580 }
1581 
1582 /// getRegClassFor - Return the register class that should be used for the
1583 /// specified value type.
1584 const TargetRegisterClass *
1585 ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
1586   (void)isDivergent;
1587   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1588   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1589   // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive
1590   // MVE Q registers.
1591   if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
1592     if (VT == MVT::v4i64)
1593       return &ARM::QQPRRegClass;
1594     if (VT == MVT::v8i64)
1595       return &ARM::QQQQPRRegClass;
1596   }
1597   return TargetLowering::getRegClassFor(VT);
1598 }
1599 
1600 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1601 // source/dest is aligned and the copy size is large enough. We therefore want
1602 // to align such objects passed to memory intrinsics.
1603 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
1604                                                unsigned &PrefAlign) const {
1605   if (!isa<MemIntrinsic>(CI))
1606     return false;
1607   MinSize = 8;
1608   // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1609   // cycle faster than 4-byte aligned LDM.
1610   PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
1611   return true;
1612 }
1613 
1614 // Create a fast isel object.
1615 FastISel *
1616 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
1617                                   const TargetLibraryInfo *libInfo) const {
1618   return ARM::createFastISel(funcInfo, libInfo);
1619 }
1620 
1621 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
1622   unsigned NumVals = N->getNumValues();
1623   if (!NumVals)
1624     return Sched::RegPressure;
1625 
1626   for (unsigned i = 0; i != NumVals; ++i) {
1627     EVT VT = N->getValueType(i);
1628     if (VT == MVT::Glue || VT == MVT::Other)
1629       continue;
1630     if (VT.isFloatingPoint() || VT.isVector())
1631       return Sched::ILP;
1632   }
1633 
1634   if (!N->isMachineOpcode())
1635     return Sched::RegPressure;
1636 
1637   // Load are scheduled for latency even if there instruction itinerary
1638   // is not available.
1639   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1640   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1641 
1642   if (MCID.getNumDefs() == 0)
1643     return Sched::RegPressure;
1644   if (!Itins->isEmpty() &&
1645       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1646     return Sched::ILP;
1647 
1648   return Sched::RegPressure;
1649 }
1650 
1651 //===----------------------------------------------------------------------===//
1652 // Lowering Code
1653 //===----------------------------------------------------------------------===//
1654 
1655 static bool isSRL16(const SDValue &Op) {
1656   if (Op.getOpcode() != ISD::SRL)
1657     return false;
1658   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1659     return Const->getZExtValue() == 16;
1660   return false;
1661 }
1662 
1663 static bool isSRA16(const SDValue &Op) {
1664   if (Op.getOpcode() != ISD::SRA)
1665     return false;
1666   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1667     return Const->getZExtValue() == 16;
1668   return false;
1669 }
1670 
1671 static bool isSHL16(const SDValue &Op) {
1672   if (Op.getOpcode() != ISD::SHL)
1673     return false;
1674   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1675     return Const->getZExtValue() == 16;
1676   return false;
1677 }
1678 
1679 // Check for a signed 16-bit value. We special case SRA because it makes it
1680 // more simple when also looking for SRAs that aren't sign extending a
1681 // smaller value. Without the check, we'd need to take extra care with
1682 // checking order for some operations.
1683 static bool isS16(const SDValue &Op, SelectionDAG &DAG) {
1684   if (isSRA16(Op))
1685     return isSHL16(Op.getOperand(0));
1686   return DAG.ComputeNumSignBits(Op) == 17;
1687 }
1688 
1689 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
1690 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1691   switch (CC) {
1692   default: llvm_unreachable("Unknown condition code!");
1693   case ISD::SETNE:  return ARMCC::NE;
1694   case ISD::SETEQ:  return ARMCC::EQ;
1695   case ISD::SETGT:  return ARMCC::GT;
1696   case ISD::SETGE:  return ARMCC::GE;
1697   case ISD::SETLT:  return ARMCC::LT;
1698   case ISD::SETLE:  return ARMCC::LE;
1699   case ISD::SETUGT: return ARMCC::HI;
1700   case ISD::SETUGE: return ARMCC::HS;
1701   case ISD::SETULT: return ARMCC::LO;
1702   case ISD::SETULE: return ARMCC::LS;
1703   }
1704 }
1705 
1706 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
1707 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1708                         ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN) {
1709   CondCode2 = ARMCC::AL;
1710   InvalidOnQNaN = true;
1711   switch (CC) {
1712   default: llvm_unreachable("Unknown FP condition!");
1713   case ISD::SETEQ:
1714   case ISD::SETOEQ:
1715     CondCode = ARMCC::EQ;
1716     InvalidOnQNaN = false;
1717     break;
1718   case ISD::SETGT:
1719   case ISD::SETOGT: CondCode = ARMCC::GT; break;
1720   case ISD::SETGE:
1721   case ISD::SETOGE: CondCode = ARMCC::GE; break;
1722   case ISD::SETOLT: CondCode = ARMCC::MI; break;
1723   case ISD::SETOLE: CondCode = ARMCC::LS; break;
1724   case ISD::SETONE:
1725     CondCode = ARMCC::MI;
1726     CondCode2 = ARMCC::GT;
1727     InvalidOnQNaN = false;
1728     break;
1729   case ISD::SETO:   CondCode = ARMCC::VC; break;
1730   case ISD::SETUO:  CondCode = ARMCC::VS; break;
1731   case ISD::SETUEQ:
1732     CondCode = ARMCC::EQ;
1733     CondCode2 = ARMCC::VS;
1734     InvalidOnQNaN = false;
1735     break;
1736   case ISD::SETUGT: CondCode = ARMCC::HI; break;
1737   case ISD::SETUGE: CondCode = ARMCC::PL; break;
1738   case ISD::SETLT:
1739   case ISD::SETULT: CondCode = ARMCC::LT; break;
1740   case ISD::SETLE:
1741   case ISD::SETULE: CondCode = ARMCC::LE; break;
1742   case ISD::SETNE:
1743   case ISD::SETUNE:
1744     CondCode = ARMCC::NE;
1745     InvalidOnQNaN = false;
1746     break;
1747   }
1748 }
1749 
1750 //===----------------------------------------------------------------------===//
1751 //                      Calling Convention Implementation
1752 //===----------------------------------------------------------------------===//
1753 
1754 /// getEffectiveCallingConv - Get the effective calling convention, taking into
1755 /// account presence of floating point hardware and calling convention
1756 /// limitations, such as support for variadic functions.
1757 CallingConv::ID
1758 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
1759                                            bool isVarArg) const {
1760   switch (CC) {
1761   default:
1762     report_fatal_error("Unsupported calling convention");
1763   case CallingConv::ARM_AAPCS:
1764   case CallingConv::ARM_APCS:
1765   case CallingConv::GHC:
1766     return CC;
1767   case CallingConv::PreserveMost:
1768     return CallingConv::PreserveMost;
1769   case CallingConv::ARM_AAPCS_VFP:
1770   case CallingConv::Swift:
1771     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
1772   case CallingConv::C:
1773     if (!Subtarget->isAAPCS_ABI())
1774       return CallingConv::ARM_APCS;
1775     else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() &&
1776              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
1777              !isVarArg)
1778       return CallingConv::ARM_AAPCS_VFP;
1779     else
1780       return CallingConv::ARM_AAPCS;
1781   case CallingConv::Fast:
1782   case CallingConv::CXX_FAST_TLS:
1783     if (!Subtarget->isAAPCS_ABI()) {
1784       if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg)
1785         return CallingConv::Fast;
1786       return CallingConv::ARM_APCS;
1787     } else if (Subtarget->hasVFP2Base() &&
1788                !Subtarget->isThumb1Only() && !isVarArg)
1789       return CallingConv::ARM_AAPCS_VFP;
1790     else
1791       return CallingConv::ARM_AAPCS;
1792   }
1793 }
1794 
1795 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1796                                                  bool isVarArg) const {
1797   return CCAssignFnForNode(CC, false, isVarArg);
1798 }
1799 
1800 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1801                                                    bool isVarArg) const {
1802   return CCAssignFnForNode(CC, true, isVarArg);
1803 }
1804 
1805 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
1806 /// CallingConvention.
1807 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
1808                                                  bool Return,
1809                                                  bool isVarArg) const {
1810   switch (getEffectiveCallingConv(CC, isVarArg)) {
1811   default:
1812     report_fatal_error("Unsupported calling convention");
1813   case CallingConv::ARM_APCS:
1814     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1815   case CallingConv::ARM_AAPCS:
1816     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1817   case CallingConv::ARM_AAPCS_VFP:
1818     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1819   case CallingConv::Fast:
1820     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1821   case CallingConv::GHC:
1822     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1823   case CallingConv::PreserveMost:
1824     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1825   }
1826 }
1827 
1828 /// LowerCallResult - Lower the result values of a call into the
1829 /// appropriate copies out of appropriate physical registers.
1830 SDValue ARMTargetLowering::LowerCallResult(
1831     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
1832     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1833     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
1834     SDValue ThisVal) const {
1835   // Assign locations to each value returned by this call.
1836   SmallVector<CCValAssign, 16> RVLocs;
1837   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1838                  *DAG.getContext());
1839   CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));
1840 
1841   // Copy all of the result registers out of their specified physreg.
1842   for (unsigned i = 0; i != RVLocs.size(); ++i) {
1843     CCValAssign VA = RVLocs[i];
1844 
1845     // Pass 'this' value directly from the argument to return value, to avoid
1846     // reg unit interference
1847     if (i == 0 && isThisReturn) {
1848       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
1849              "unexpected return calling convention register assignment");
1850       InVals.push_back(ThisVal);
1851       continue;
1852     }
1853 
1854     SDValue Val;
1855     if (VA.needsCustom()) {
1856       // Handle f64 or half of a v2f64.
1857       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1858                                       InFlag);
1859       Chain = Lo.getValue(1);
1860       InFlag = Lo.getValue(2);
1861       VA = RVLocs[++i]; // skip ahead to next loc
1862       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1863                                       InFlag);
1864       Chain = Hi.getValue(1);
1865       InFlag = Hi.getValue(2);
1866       if (!Subtarget->isLittle())
1867         std::swap (Lo, Hi);
1868       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1869 
1870       if (VA.getLocVT() == MVT::v2f64) {
1871         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1872         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1873                           DAG.getConstant(0, dl, MVT::i32));
1874 
1875         VA = RVLocs[++i]; // skip ahead to next loc
1876         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1877         Chain = Lo.getValue(1);
1878         InFlag = Lo.getValue(2);
1879         VA = RVLocs[++i]; // skip ahead to next loc
1880         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1881         Chain = Hi.getValue(1);
1882         InFlag = Hi.getValue(2);
1883         if (!Subtarget->isLittle())
1884           std::swap (Lo, Hi);
1885         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1886         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1887                           DAG.getConstant(1, dl, MVT::i32));
1888       }
1889     } else {
1890       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1891                                InFlag);
1892       Chain = Val.getValue(1);
1893       InFlag = Val.getValue(2);
1894     }
1895 
1896     switch (VA.getLocInfo()) {
1897     default: llvm_unreachable("Unknown loc info!");
1898     case CCValAssign::Full: break;
1899     case CCValAssign::BCvt:
1900       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1901       break;
1902     }
1903 
1904     InVals.push_back(Val);
1905   }
1906 
1907   return Chain;
1908 }
1909 
1910 /// LowerMemOpCallTo - Store the argument to the stack.
1911 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
1912                                             SDValue Arg, const SDLoc &dl,
1913                                             SelectionDAG &DAG,
1914                                             const CCValAssign &VA,
1915                                             ISD::ArgFlagsTy Flags) const {
1916   unsigned LocMemOffset = VA.getLocMemOffset();
1917   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
1918   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
1919                        StackPtr, PtrOff);
1920   return DAG.getStore(
1921       Chain, dl, Arg, PtrOff,
1922       MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
1923 }
1924 
1925 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
1926                                          SDValue Chain, SDValue &Arg,
1927                                          RegsToPassVector &RegsToPass,
1928                                          CCValAssign &VA, CCValAssign &NextVA,
1929                                          SDValue &StackPtr,
1930                                          SmallVectorImpl<SDValue> &MemOpChains,
1931                                          ISD::ArgFlagsTy Flags) const {
1932   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1933                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
1934   unsigned id = Subtarget->isLittle() ? 0 : 1;
1935   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
1936 
1937   if (NextVA.isRegLoc())
1938     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
1939   else {
1940     assert(NextVA.isMemLoc());
1941     if (!StackPtr.getNode())
1942       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
1943                                     getPointerTy(DAG.getDataLayout()));
1944 
1945     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
1946                                            dl, DAG, NextVA,
1947                                            Flags));
1948   }
1949 }
1950 
1951 /// LowerCall - Lowering a call into a callseq_start <-
1952 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
1953 /// nodes.
1954 SDValue
1955 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1956                              SmallVectorImpl<SDValue> &InVals) const {
1957   SelectionDAG &DAG                     = CLI.DAG;
1958   SDLoc &dl                             = CLI.DL;
1959   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1960   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
1961   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
1962   SDValue Chain                         = CLI.Chain;
1963   SDValue Callee                        = CLI.Callee;
1964   bool &isTailCall                      = CLI.IsTailCall;
1965   CallingConv::ID CallConv              = CLI.CallConv;
1966   bool doesNotRet                       = CLI.DoesNotReturn;
1967   bool isVarArg                         = CLI.IsVarArg;
1968 
1969   MachineFunction &MF = DAG.getMachineFunction();
1970   bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
1971   bool isThisReturn = false;
1972   auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
1973   bool PreferIndirect = false;
1974 
1975   // Disable tail calls if they're not supported.
1976   if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true")
1977     isTailCall = false;
1978 
1979   if (isa<GlobalAddressSDNode>(Callee)) {
1980     // If we're optimizing for minimum size and the function is called three or
1981     // more times in this block, we can improve codesize by calling indirectly
1982     // as BLXr has a 16-bit encoding.
1983     auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
1984     if (CLI.CS) {
1985       auto *BB = CLI.CS.getParent();
1986       PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() &&
1987                        count_if(GV->users(), [&BB](const User *U) {
1988                          return isa<Instruction>(U) &&
1989                                 cast<Instruction>(U)->getParent() == BB;
1990                        }) > 2;
1991     }
1992   }
1993   if (isTailCall) {
1994     // Check if it's really possible to do a tail call.
1995     isTailCall = IsEligibleForTailCallOptimization(
1996         Callee, CallConv, isVarArg, isStructRet,
1997         MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG,
1998         PreferIndirect);
1999     if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall())
2000       report_fatal_error("failed to perform tail call elimination on a call "
2001                          "site marked musttail");
2002     // We don't support GuaranteedTailCallOpt for ARM, only automatically
2003     // detected sibcalls.
2004     if (isTailCall)
2005       ++NumTailCalls;
2006   }
2007 
2008   // Analyze operands of the call, assigning locations to each operand.
2009   SmallVector<CCValAssign, 16> ArgLocs;
2010   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2011                  *DAG.getContext());
2012   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));
2013 
2014   // Get a count of how many bytes are to be pushed on the stack.
2015   unsigned NumBytes = CCInfo.getNextStackOffset();
2016 
2017   if (isTailCall) {
2018     // For tail calls, memory operands are available in our caller's stack.
2019     NumBytes = 0;
2020   } else {
2021     // Adjust the stack pointer for the new arguments...
2022     // These operations are automatically eliminated by the prolog/epilog pass
2023     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
2024   }
2025 
2026   SDValue StackPtr =
2027       DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));
2028 
2029   RegsToPassVector RegsToPass;
2030   SmallVector<SDValue, 8> MemOpChains;
2031 
2032   // Walk the register/memloc assignments, inserting copies/loads.  In the case
2033   // of tail call optimization, arguments are handled later.
2034   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2035        i != e;
2036        ++i, ++realArgIdx) {
2037     CCValAssign &VA = ArgLocs[i];
2038     SDValue Arg = OutVals[realArgIdx];
2039     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2040     bool isByVal = Flags.isByVal();
2041 
2042     // Promote the value if needed.
2043     switch (VA.getLocInfo()) {
2044     default: llvm_unreachable("Unknown loc info!");
2045     case CCValAssign::Full: break;
2046     case CCValAssign::SExt:
2047       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
2048       break;
2049     case CCValAssign::ZExt:
2050       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
2051       break;
2052     case CCValAssign::AExt:
2053       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
2054       break;
2055     case CCValAssign::BCvt:
2056       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2057       break;
2058     }
2059 
2060     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
2061     if (VA.needsCustom()) {
2062       if (VA.getLocVT() == MVT::v2f64) {
2063         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2064                                   DAG.getConstant(0, dl, MVT::i32));
2065         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2066                                   DAG.getConstant(1, dl, MVT::i32));
2067 
2068         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
2069                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
2070 
2071         VA = ArgLocs[++i]; // skip ahead to next loc
2072         if (VA.isRegLoc()) {
2073           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
2074                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
2075         } else {
2076           assert(VA.isMemLoc());
2077 
2078           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
2079                                                  dl, DAG, VA, Flags));
2080         }
2081       } else {
2082         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
2083                          StackPtr, MemOpChains, Flags);
2084       }
2085     } else if (VA.isRegLoc()) {
2086       if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
2087           Outs[0].VT == MVT::i32) {
2088         assert(VA.getLocVT() == MVT::i32 &&
2089                "unexpected calling convention register assignment");
2090         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
2091                "unexpected use of 'returned'");
2092         isThisReturn = true;
2093       }
2094       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2095     } else if (isByVal) {
2096       assert(VA.isMemLoc());
2097       unsigned offset = 0;
2098 
2099       // True if this byval aggregate will be split between registers
2100       // and memory.
2101       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2102       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2103 
2104       if (CurByValIdx < ByValArgsCount) {
2105 
2106         unsigned RegBegin, RegEnd;
2107         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2108 
2109         EVT PtrVT =
2110             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2111         unsigned int i, j;
2112         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
2113           SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
2114           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
2115           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
2116                                      MachinePointerInfo(),
2117                                      DAG.InferPtrAlignment(AddArg));
2118           MemOpChains.push_back(Load.getValue(1));
2119           RegsToPass.push_back(std::make_pair(j, Load));
2120         }
2121 
2122         // If parameter size outsides register area, "offset" value
2123         // helps us to calculate stack slot for remained part properly.
2124         offset = RegEnd - RegBegin;
2125 
2126         CCInfo.nextInRegsParam();
2127       }
2128 
2129       if (Flags.getByValSize() > 4*offset) {
2130         auto PtrVT = getPointerTy(DAG.getDataLayout());
2131         unsigned LocMemOffset = VA.getLocMemOffset();
2132         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
2133         SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff);
2134         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
2135         SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
2136         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
2137                                            MVT::i32);
2138         SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl,
2139                                             MVT::i32);
2140 
2141         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
2142         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2143         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
2144                                           Ops));
2145       }
2146     } else if (!isTailCall) {
2147       assert(VA.isMemLoc());
2148 
2149       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2150                                              dl, DAG, VA, Flags));
2151     }
2152   }
2153 
2154   if (!MemOpChains.empty())
2155     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2156 
2157   // Build a sequence of copy-to-reg nodes chained together with token chain
2158   // and flag operands which copy the outgoing args into the appropriate regs.
2159   SDValue InFlag;
2160   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2161     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2162                              RegsToPass[i].second, InFlag);
2163     InFlag = Chain.getValue(1);
2164   }
2165 
2166   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2167   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2168   // node so that legalize doesn't hack it.
2169   bool isDirect = false;
2170 
2171   const TargetMachine &TM = getTargetMachine();
2172   const Module *Mod = MF.getFunction().getParent();
2173   const GlobalValue *GV = nullptr;
2174   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2175     GV = G->getGlobal();
2176   bool isStub =
2177       !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();
2178 
2179   bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
2180   bool isLocalARMFunc = false;
2181   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2182   auto PtrVt = getPointerTy(DAG.getDataLayout());
2183 
2184   if (Subtarget->genLongCalls()) {
2185     assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
2186            "long-calls codegen is not position independent!");
2187     // Handle a global address or an external symbol. If it's not one of
2188     // those, the target's already in a register, so we don't need to do
2189     // anything extra.
2190     if (isa<GlobalAddressSDNode>(Callee)) {
2191       // Create a constant pool entry for the callee address
2192       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2193       ARMConstantPoolValue *CPV =
2194         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
2195 
2196       // Get the address of the callee into a register
2197       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2198       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2199       Callee = DAG.getLoad(
2200           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2201           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2202     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
2203       const char *Sym = S->getSymbol();
2204 
2205       // Create a constant pool entry for the callee address
2206       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2207       ARMConstantPoolValue *CPV =
2208         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2209                                       ARMPCLabelIndex, 0);
2210       // Get the address of the callee into a register
2211       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2212       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2213       Callee = DAG.getLoad(
2214           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2215           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2216     }
2217   } else if (isa<GlobalAddressSDNode>(Callee)) {
2218     if (!PreferIndirect) {
2219       isDirect = true;
2220       bool isDef = GV->isStrongDefinitionForLinker();
2221 
2222       // ARM call to a local ARM function is predicable.
2223       isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
2224       // tBX takes a register source operand.
2225       if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2226         assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
2227         Callee = DAG.getNode(
2228             ARMISD::WrapperPIC, dl, PtrVt,
2229             DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
2230         Callee = DAG.getLoad(
2231             PtrVt, dl, DAG.getEntryNode(), Callee,
2232             MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2233             /* Alignment = */ 0, MachineMemOperand::MODereferenceable |
2234                                      MachineMemOperand::MOInvariant);
2235       } else if (Subtarget->isTargetCOFF()) {
2236         assert(Subtarget->isTargetWindows() &&
2237                "Windows is the only supported COFF target");
2238         unsigned TargetFlags = GV->hasDLLImportStorageClass()
2239                                    ? ARMII::MO_DLLIMPORT
2240                                    : ARMII::MO_NO_FLAG;
2241         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0,
2242                                             TargetFlags);
2243         if (GV->hasDLLImportStorageClass())
2244           Callee =
2245               DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
2246                           DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
2247                           MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2248       } else {
2249         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
2250       }
2251     }
2252   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2253     isDirect = true;
2254     // tBX takes a register source operand.
2255     const char *Sym = S->getSymbol();
2256     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2257       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2258       ARMConstantPoolValue *CPV =
2259         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2260                                       ARMPCLabelIndex, 4);
2261       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2262       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2263       Callee = DAG.getLoad(
2264           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2265           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2266       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2267       Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
2268     } else {
2269       Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
2270     }
2271   }
2272 
2273   // FIXME: handle tail calls differently.
2274   unsigned CallOpc;
2275   if (Subtarget->isThumb()) {
2276     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2277       CallOpc = ARMISD::CALL_NOLINK;
2278     else
2279       CallOpc = ARMISD::CALL;
2280   } else {
2281     if (!isDirect && !Subtarget->hasV5TOps())
2282       CallOpc = ARMISD::CALL_NOLINK;
2283     else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2284              // Emit regular call when code size is the priority
2285              !Subtarget->hasMinSize())
2286       // "mov lr, pc; b _foo" to avoid confusing the RSP
2287       CallOpc = ARMISD::CALL_NOLINK;
2288     else
2289       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
2290   }
2291 
2292   std::vector<SDValue> Ops;
2293   Ops.push_back(Chain);
2294   Ops.push_back(Callee);
2295 
2296   // Add argument registers to the end of the list so that they are known live
2297   // into the call.
2298   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2299     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2300                                   RegsToPass[i].second.getValueType()));
2301 
2302   // Add a register mask operand representing the call-preserved registers.
2303   if (!isTailCall) {
2304     const uint32_t *Mask;
2305     const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
2306     if (isThisReturn) {
2307       // For 'this' returns, use the R0-preserving mask if applicable
2308       Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
2309       if (!Mask) {
2310         // Set isThisReturn to false if the calling convention is not one that
2311         // allows 'returned' to be modeled in this way, so LowerCallResult does
2312         // not try to pass 'this' straight through
2313         isThisReturn = false;
2314         Mask = ARI->getCallPreservedMask(MF, CallConv);
2315       }
2316     } else
2317       Mask = ARI->getCallPreservedMask(MF, CallConv);
2318 
2319     assert(Mask && "Missing call preserved mask for calling convention");
2320     Ops.push_back(DAG.getRegisterMask(Mask));
2321   }
2322 
2323   if (InFlag.getNode())
2324     Ops.push_back(InFlag);
2325 
2326   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2327   if (isTailCall) {
2328     MF.getFrameInfo().setHasTailCall();
2329     return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
2330   }
2331 
2332   // Returns a chain and a flag for retval copy to use.
2333   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
2334   InFlag = Chain.getValue(1);
2335 
2336   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
2337                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
2338   if (!Ins.empty())
2339     InFlag = Chain.getValue(1);
2340 
2341   // Handle result values, copying them out of physregs into vregs that we
2342   // return.
2343   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2344                          InVals, isThisReturn,
2345                          isThisReturn ? OutVals[0] : SDValue());
2346 }
2347 
2348 /// HandleByVal - Every parameter *after* a byval parameter is passed
2349 /// on the stack.  Remember the next parameter register to allocate,
2350 /// and then confiscate the rest of the parameter registers to insure
2351 /// this.
2352 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
2353                                     unsigned Align) const {
2354   // Byval (as with any stack) slots are always at least 4 byte aligned.
2355   Align = std::max(Align, 4U);
2356 
2357   unsigned Reg = State->AllocateReg(GPRArgRegs);
2358   if (!Reg)
2359     return;
2360 
2361   unsigned AlignInRegs = Align / 4;
2362   unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
2363   for (unsigned i = 0; i < Waste; ++i)
2364     Reg = State->AllocateReg(GPRArgRegs);
2365 
2366   if (!Reg)
2367     return;
2368 
2369   unsigned Excess = 4 * (ARM::R4 - Reg);
2370 
2371   // Special case when NSAA != SP and parameter size greater than size of
2372   // all remained GPR regs. In that case we can't split parameter, we must
2373   // send it to stack. We also must set NCRN to R4, so waste all
2374   // remained registers.
2375   const unsigned NSAAOffset = State->getNextStackOffset();
2376   if (NSAAOffset != 0 && Size > Excess) {
2377     while (State->AllocateReg(GPRArgRegs))
2378       ;
2379     return;
2380   }
2381 
2382   // First register for byval parameter is the first register that wasn't
2383   // allocated before this method call, so it would be "reg".
2384   // If parameter is small enough to be saved in range [reg, r4), then
2385   // the end (first after last) register would be reg + param-size-in-regs,
2386   // else parameter would be splitted between registers and stack,
2387   // end register would be r4 in this case.
2388   unsigned ByValRegBegin = Reg;
2389   unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
2390   State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
2391   // Note, first register is allocated in the beginning of function already,
2392   // allocate remained amount of registers we need.
2393   for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2394     State->AllocateReg(GPRArgRegs);
2395   // A byval parameter that is split between registers and memory needs its
2396   // size truncated here.
2397   // In the case where the entire structure fits in registers, we set the
2398   // size in memory to zero.
2399   Size = std::max<int>(Size - Excess, 0);
2400 }
2401 
2402 /// MatchingStackOffset - Return true if the given stack call argument is
2403 /// already available in the same position (relatively) of the caller's
2404 /// incoming argument stack.
2405 static
2406 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2407                          MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
2408                          const TargetInstrInfo *TII) {
2409   unsigned Bytes = Arg.getValueSizeInBits() / 8;
2410   int FI = std::numeric_limits<int>::max();
2411   if (Arg.getOpcode() == ISD::CopyFromReg) {
2412     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2413     if (!TargetRegisterInfo::isVirtualRegister(VR))
2414       return false;
2415     MachineInstr *Def = MRI->getVRegDef(VR);
2416     if (!Def)
2417       return false;
2418     if (!Flags.isByVal()) {
2419       if (!TII->isLoadFromStackSlot(*Def, FI))
2420         return false;
2421     } else {
2422       return false;
2423     }
2424   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2425     if (Flags.isByVal())
2426       // ByVal argument is passed in as a pointer but it's now being
2427       // dereferenced. e.g.
2428       // define @foo(%struct.X* %A) {
2429       //   tail call @bar(%struct.X* byval %A)
2430       // }
2431       return false;
2432     SDValue Ptr = Ld->getBasePtr();
2433     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2434     if (!FINode)
2435       return false;
2436     FI = FINode->getIndex();
2437   } else
2438     return false;
2439 
2440   assert(FI != std::numeric_limits<int>::max());
2441   if (!MFI.isFixedObjectIndex(FI))
2442     return false;
2443   return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
2444 }
2445 
2446 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2447 /// for tail call optimization. Targets which want to do tail call
2448 /// optimization should implement this function.
2449 bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2450     SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
2451     bool isCalleeStructRet, bool isCallerStructRet,
2452     const SmallVectorImpl<ISD::OutputArg> &Outs,
2453     const SmallVectorImpl<SDValue> &OutVals,
2454     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
2455     const bool isIndirect) const {
2456   MachineFunction &MF = DAG.getMachineFunction();
2457   const Function &CallerF = MF.getFunction();
2458   CallingConv::ID CallerCC = CallerF.getCallingConv();
2459 
2460   assert(Subtarget->supportsTailCall());
2461 
2462   // Indirect tail calls cannot be optimized for Thumb1 if the args
2463   // to the call take up r0-r3. The reason is that there are no legal registers
2464   // left to hold the pointer to the function to be called.
2465   if (Subtarget->isThumb1Only() && Outs.size() >= 4 &&
2466       (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect))
2467     return false;
2468 
2469   // Look for obvious safe cases to perform tail call optimization that do not
2470   // require ABI changes. This is what gcc calls sibcall.
2471 
2472   // Exception-handling functions need a special set of instructions to indicate
2473   // a return to the hardware. Tail-calling another function would probably
2474   // break this.
2475   if (CallerF.hasFnAttribute("interrupt"))
2476     return false;
2477 
2478   // Also avoid sibcall optimization if either caller or callee uses struct
2479   // return semantics.
2480   if (isCalleeStructRet || isCallerStructRet)
2481     return false;
2482 
2483   // Externally-defined functions with weak linkage should not be
2484   // tail-called on ARM when the OS does not support dynamic
2485   // pre-emption of symbols, as the AAELF spec requires normal calls
2486   // to undefined weak functions to be replaced with a NOP or jump to the
2487   // next instruction. The behaviour of branch instructions in this
2488   // situation (as used for tail calls) is implementation-defined, so we
2489   // cannot rely on the linker replacing the tail call with a return.
2490   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2491     const GlobalValue *GV = G->getGlobal();
2492     const Triple &TT = getTargetMachine().getTargetTriple();
2493     if (GV->hasExternalWeakLinkage() &&
2494         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
2495       return false;
2496   }
2497 
2498   // Check that the call results are passed in the same way.
2499   LLVMContext &C = *DAG.getContext();
2500   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
2501                                   CCAssignFnForReturn(CalleeCC, isVarArg),
2502                                   CCAssignFnForReturn(CallerCC, isVarArg)))
2503     return false;
2504   // The callee has to preserve all registers the caller needs to preserve.
2505   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2506   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2507   if (CalleeCC != CallerCC) {
2508     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2509     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2510       return false;
2511   }
2512 
2513   // If Caller's vararg or byval argument has been split between registers and
2514   // stack, do not perform tail call, since part of the argument is in caller's
2515   // local frame.
2516   const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
2517   if (AFI_Caller->getArgRegsSaveSize())
2518     return false;
2519 
2520   // If the callee takes no arguments then go on to check the results of the
2521   // call.
2522   if (!Outs.empty()) {
2523     // Check if stack adjustment is needed. For now, do not do this if any
2524     // argument is passed on the stack.
2525     SmallVector<CCValAssign, 16> ArgLocs;
2526     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2527     CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
2528     if (CCInfo.getNextStackOffset()) {
2529       // Check if the arguments are already laid out in the right way as
2530       // the caller's fixed stack objects.
2531       MachineFrameInfo &MFI = MF.getFrameInfo();
2532       const MachineRegisterInfo *MRI = &MF.getRegInfo();
2533       const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2534       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2535            i != e;
2536            ++i, ++realArgIdx) {
2537         CCValAssign &VA = ArgLocs[i];
2538         EVT RegVT = VA.getLocVT();
2539         SDValue Arg = OutVals[realArgIdx];
2540         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2541         if (VA.getLocInfo() == CCValAssign::Indirect)
2542           return false;
2543         if (VA.needsCustom()) {
2544           // f64 and vector types are split into multiple registers or
2545           // register/stack-slot combinations.  The types will not match
2546           // the registers; give up on memory f64 refs until we figure
2547           // out what to do about this.
2548           if (!VA.isRegLoc())
2549             return false;
2550           if (!ArgLocs[++i].isRegLoc())
2551             return false;
2552           if (RegVT == MVT::v2f64) {
2553             if (!ArgLocs[++i].isRegLoc())
2554               return false;
2555             if (!ArgLocs[++i].isRegLoc())
2556               return false;
2557           }
2558         } else if (!VA.isRegLoc()) {
2559           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
2560                                    MFI, MRI, TII))
2561             return false;
2562         }
2563       }
2564     }
2565 
2566     const MachineRegisterInfo &MRI = MF.getRegInfo();
2567     if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
2568       return false;
2569   }
2570 
2571   return true;
2572 }
2573 
2574 bool
2575 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2576                                   MachineFunction &MF, bool isVarArg,
2577                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
2578                                   LLVMContext &Context) const {
2579   SmallVector<CCValAssign, 16> RVLocs;
2580   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2581   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2582 }
2583 
2584 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
2585                                     const SDLoc &DL, SelectionDAG &DAG) {
2586   const MachineFunction &MF = DAG.getMachineFunction();
2587   const Function &F = MF.getFunction();
2588 
2589   StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString();
2590 
2591   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
2592   // version of the "preferred return address". These offsets affect the return
2593   // instruction if this is a return from PL1 without hypervisor extensions.
2594   //    IRQ/FIQ: +4     "subs pc, lr, #4"
2595   //    SWI:     0      "subs pc, lr, #0"
2596   //    ABORT:   +4     "subs pc, lr, #4"
2597   //    UNDEF:   +4/+2  "subs pc, lr, #0"
2598   // UNDEF varies depending on where the exception came from ARM or Thumb
2599   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
2600 
2601   int64_t LROffset;
2602   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
2603       IntKind == "ABORT")
2604     LROffset = 4;
2605   else if (IntKind == "SWI" || IntKind == "UNDEF")
2606     LROffset = 0;
2607   else
2608     report_fatal_error("Unsupported interrupt attribute. If present, value "
2609                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2610 
2611   RetOps.insert(RetOps.begin() + 1,
2612                 DAG.getConstant(LROffset, DL, MVT::i32, false));
2613 
2614   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
2615 }
2616 
2617 SDValue
2618 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2619                                bool isVarArg,
2620                                const SmallVectorImpl<ISD::OutputArg> &Outs,
2621                                const SmallVectorImpl<SDValue> &OutVals,
2622                                const SDLoc &dl, SelectionDAG &DAG) const {
2623   // CCValAssign - represent the assignment of the return value to a location.
2624   SmallVector<CCValAssign, 16> RVLocs;
2625 
2626   // CCState - Info about the registers and stack slots.
2627   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2628                  *DAG.getContext());
2629 
2630   // Analyze outgoing return values.
2631   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2632 
2633   SDValue Flag;
2634   SmallVector<SDValue, 4> RetOps;
2635   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2636   bool isLittleEndian = Subtarget->isLittle();
2637 
2638   MachineFunction &MF = DAG.getMachineFunction();
2639   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2640   AFI->setReturnRegsCount(RVLocs.size());
2641 
2642   // Copy the result values into the output registers.
2643   for (unsigned i = 0, realRVLocIdx = 0;
2644        i != RVLocs.size();
2645        ++i, ++realRVLocIdx) {
2646     CCValAssign &VA = RVLocs[i];
2647     assert(VA.isRegLoc() && "Can only return in registers!");
2648 
2649     SDValue Arg = OutVals[realRVLocIdx];
2650     bool ReturnF16 = false;
2651 
2652     if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) {
2653       // Half-precision return values can be returned like this:
2654       //
2655       // t11 f16 = fadd ...
2656       // t12: i16 = bitcast t11
2657       //   t13: i32 = zero_extend t12
2658       // t14: f32 = bitcast t13  <~~~~~~~ Arg
2659       //
2660       // to avoid code generation for bitcasts, we simply set Arg to the node
2661       // that produces the f16 value, t11 in this case.
2662       //
2663       if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) {
2664         SDValue ZE = Arg.getOperand(0);
2665         if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) {
2666           SDValue BC = ZE.getOperand(0);
2667           if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) {
2668             Arg = BC.getOperand(0);
2669             ReturnF16 = true;
2670           }
2671         }
2672       }
2673     }
2674 
2675     switch (VA.getLocInfo()) {
2676     default: llvm_unreachable("Unknown loc info!");
2677     case CCValAssign::Full: break;
2678     case CCValAssign::BCvt:
2679       if (!ReturnF16)
2680         Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2681       break;
2682     }
2683 
2684     if (VA.needsCustom()) {
2685       if (VA.getLocVT() == MVT::v2f64) {
2686         // Extract the first half and return it in two registers.
2687         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2688                                    DAG.getConstant(0, dl, MVT::i32));
2689         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
2690                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
2691 
2692         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2693                                  HalfGPRs.getValue(isLittleEndian ? 0 : 1),
2694                                  Flag);
2695         Flag = Chain.getValue(1);
2696         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2697         VA = RVLocs[++i]; // skip ahead to next loc
2698         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2699                                  HalfGPRs.getValue(isLittleEndian ? 1 : 0),
2700                                  Flag);
2701         Flag = Chain.getValue(1);
2702         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2703         VA = RVLocs[++i]; // skip ahead to next loc
2704 
2705         // Extract the 2nd half and fall through to handle it as an f64 value.
2706         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2707                           DAG.getConstant(1, dl, MVT::i32));
2708       }
2709       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
2710       // available.
2711       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2712                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
2713       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2714                                fmrrd.getValue(isLittleEndian ? 0 : 1),
2715                                Flag);
2716       Flag = Chain.getValue(1);
2717       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2718       VA = RVLocs[++i]; // skip ahead to next loc
2719       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2720                                fmrrd.getValue(isLittleEndian ? 1 : 0),
2721                                Flag);
2722     } else
2723       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
2724 
2725     // Guarantee that all emitted copies are
2726     // stuck together, avoiding something bad.
2727     Flag = Chain.getValue(1);
2728     RetOps.push_back(DAG.getRegister(VA.getLocReg(),
2729                                      ReturnF16 ? MVT::f16 : VA.getLocVT()));
2730   }
2731   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2732   const MCPhysReg *I =
2733       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2734   if (I) {
2735     for (; *I; ++I) {
2736       if (ARM::GPRRegClass.contains(*I))
2737         RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2738       else if (ARM::DPRRegClass.contains(*I))
2739         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
2740       else
2741         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2742     }
2743   }
2744 
2745   // Update chain and glue.
2746   RetOps[0] = Chain;
2747   if (Flag.getNode())
2748     RetOps.push_back(Flag);
2749 
2750   // CPUs which aren't M-class use a special sequence to return from
2751   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
2752   // though we use "subs pc, lr, #N").
2753   //
2754   // M-class CPUs actually use a normal return sequence with a special
2755   // (hardware-provided) value in LR, so the normal code path works.
2756   if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") &&
2757       !Subtarget->isMClass()) {
2758     if (Subtarget->isThumb1Only())
2759       report_fatal_error("interrupt attribute is not supported in Thumb1");
2760     return LowerInterruptReturn(RetOps, dl, DAG);
2761   }
2762 
2763   return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
2764 }
2765 
2766 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2767   if (N->getNumValues() != 1)
2768     return false;
2769   if (!N->hasNUsesOfValue(1, 0))
2770     return false;
2771 
2772   SDValue TCChain = Chain;
2773   SDNode *Copy = *N->use_begin();
2774   if (Copy->getOpcode() == ISD::CopyToReg) {
2775     // If the copy has a glue operand, we conservatively assume it isn't safe to
2776     // perform a tail call.
2777     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2778       return false;
2779     TCChain = Copy->getOperand(0);
2780   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
2781     SDNode *VMov = Copy;
2782     // f64 returned in a pair of GPRs.
2783     SmallPtrSet<SDNode*, 2> Copies;
2784     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2785          UI != UE; ++UI) {
2786       if (UI->getOpcode() != ISD::CopyToReg)
2787         return false;
2788       Copies.insert(*UI);
2789     }
2790     if (Copies.size() > 2)
2791       return false;
2792 
2793     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2794          UI != UE; ++UI) {
2795       SDValue UseChain = UI->getOperand(0);
2796       if (Copies.count(UseChain.getNode()))
2797         // Second CopyToReg
2798         Copy = *UI;
2799       else {
2800         // We are at the top of this chain.
2801         // If the copy has a glue operand, we conservatively assume it
2802         // isn't safe to perform a tail call.
2803         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
2804           return false;
2805         // First CopyToReg
2806         TCChain = UseChain;
2807       }
2808     }
2809   } else if (Copy->getOpcode() == ISD::BITCAST) {
2810     // f32 returned in a single GPR.
2811     if (!Copy->hasOneUse())
2812       return false;
2813     Copy = *Copy->use_begin();
2814     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
2815       return false;
2816     // If the copy has a glue operand, we conservatively assume it isn't safe to
2817     // perform a tail call.
2818     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2819       return false;
2820     TCChain = Copy->getOperand(0);
2821   } else {
2822     return false;
2823   }
2824 
2825   bool HasRet = false;
2826   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2827        UI != UE; ++UI) {
2828     if (UI->getOpcode() != ARMISD::RET_FLAG &&
2829         UI->getOpcode() != ARMISD::INTRET_FLAG)
2830       return false;
2831     HasRet = true;
2832   }
2833 
2834   if (!HasRet)
2835     return false;
2836 
2837   Chain = TCChain;
2838   return true;
2839 }
2840 
2841 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2842   if (!Subtarget->supportsTailCall())
2843     return false;
2844 
2845   auto Attr =
2846       CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2847   if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2848     return false;
2849 
2850   return true;
2851 }
2852 
2853 // Trying to write a 64 bit value so need to split into two 32 bit values first,
2854 // and pass the lower and high parts through.
2855 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
2856   SDLoc DL(Op);
2857   SDValue WriteValue = Op->getOperand(2);
2858 
2859   // This function is only supposed to be called for i64 type argument.
2860   assert(WriteValue.getValueType() == MVT::i64
2861           && "LowerWRITE_REGISTER called for non-i64 type argument.");
2862 
2863   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2864                            DAG.getConstant(0, DL, MVT::i32));
2865   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2866                            DAG.getConstant(1, DL, MVT::i32));
2867   SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
2868   return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
2869 }
2870 
2871 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2872 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
2873 // one of the above mentioned nodes. It has to be wrapped because otherwise
2874 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2875 // be used to form addressing mode. These wrapped nodes will be selected
2876 // into MOVi.
2877 SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
2878                                              SelectionDAG &DAG) const {
2879   EVT PtrVT = Op.getValueType();
2880   // FIXME there is no actual debug info here
2881   SDLoc dl(Op);
2882   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2883   SDValue Res;
2884 
2885   // When generating execute-only code Constant Pools must be promoted to the
2886   // global data section. It's a bit ugly that we can't share them across basic
2887   // blocks, but this way we guarantee that execute-only behaves correct with
2888   // position-independent addressing modes.
2889   if (Subtarget->genExecuteOnly()) {
2890     auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
2891     auto T = const_cast<Type*>(CP->getType());
2892     auto C = const_cast<Constant*>(CP->getConstVal());
2893     auto M = const_cast<Module*>(DAG.getMachineFunction().
2894                                  getFunction().getParent());
2895     auto GV = new GlobalVariable(
2896                     *M, T, /*isConst=*/true, GlobalVariable::InternalLinkage, C,
2897                     Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
2898                     Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" +
2899                     Twine(AFI->createPICLabelUId())
2900                   );
2901     SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV),
2902                                             dl, PtrVT);
2903     return LowerGlobalAddress(GA, DAG);
2904   }
2905 
2906   if (CP->isMachineConstantPoolEntry())
2907     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
2908                                     CP->getAlignment());
2909   else
2910     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
2911                                     CP->getAlignment());
2912   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
2913 }
2914 
2915 unsigned ARMTargetLowering::getJumpTableEncoding() const {
2916   return MachineJumpTableInfo::EK_Inline;
2917 }
2918 
2919 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
2920                                              SelectionDAG &DAG) const {
2921   MachineFunction &MF = DAG.getMachineFunction();
2922   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2923   unsigned ARMPCLabelIndex = 0;
2924   SDLoc DL(Op);
2925   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2926   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2927   SDValue CPAddr;
2928   bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
2929   if (!IsPositionIndependent) {
2930     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
2931   } else {
2932     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2933     ARMPCLabelIndex = AFI->createPICLabelUId();
2934     ARMConstantPoolValue *CPV =
2935       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
2936                                       ARMCP::CPBlockAddress, PCAdj);
2937     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2938   }
2939   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
2940   SDValue Result = DAG.getLoad(
2941       PtrVT, DL, DAG.getEntryNode(), CPAddr,
2942       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2943   if (!IsPositionIndependent)
2944     return Result;
2945   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
2946   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
2947 }
2948 
2949 /// Convert a TLS address reference into the correct sequence of loads
2950 /// and calls to compute the variable's address for Darwin, and return an
2951 /// SDValue containing the final node.
2952 
2953 /// Darwin only has one TLS scheme which must be capable of dealing with the
2954 /// fully general situation, in the worst case. This means:
2955 ///     + "extern __thread" declaration.
2956 ///     + Defined in a possibly unknown dynamic library.
2957 ///
2958 /// The general system is that each __thread variable has a [3 x i32] descriptor
2959 /// which contains information used by the runtime to calculate the address. The
2960 /// only part of this the compiler needs to know about is the first word, which
2961 /// contains a function pointer that must be called with the address of the
2962 /// entire descriptor in "r0".
2963 ///
2964 /// Since this descriptor may be in a different unit, in general access must
2965 /// proceed along the usual ARM rules. A common sequence to produce is:
2966 ///
2967 ///     movw rT1, :lower16:_var$non_lazy_ptr
2968 ///     movt rT1, :upper16:_var$non_lazy_ptr
2969 ///     ldr r0, [rT1]
2970 ///     ldr rT2, [r0]
2971 ///     blx rT2
2972 ///     [...address now in r0...]
2973 SDValue
2974 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
2975                                                SelectionDAG &DAG) const {
2976   assert(Subtarget->isTargetDarwin() &&
2977          "This function expects a Darwin target");
2978   SDLoc DL(Op);
2979 
2980   // First step is to get the address of the actua global symbol. This is where
2981   // the TLS descriptor lives.
2982   SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
2983 
2984   // The first entry in the descriptor is a function pointer that we must call
2985   // to obtain the address of the variable.
2986   SDValue Chain = DAG.getEntryNode();
2987   SDValue FuncTLVGet = DAG.getLoad(
2988       MVT::i32, DL, Chain, DescAddr,
2989       MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2990       /* Alignment = */ 4,
2991       MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
2992           MachineMemOperand::MOInvariant);
2993   Chain = FuncTLVGet.getValue(1);
2994 
2995   MachineFunction &F = DAG.getMachineFunction();
2996   MachineFrameInfo &MFI = F.getFrameInfo();
2997   MFI.setAdjustsStack(true);
2998 
2999   // TLS calls preserve all registers except those that absolutely must be
3000   // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
3001   // silly).
3002   auto TRI =
3003       getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo();
3004   auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
3005   const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
3006 
3007   // Finally, we can make the call. This is just a degenerate version of a
3008   // normal AArch64 call node: r0 takes the address of the descriptor, and
3009   // returns the address of the variable in this thread.
3010   Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
3011   Chain =
3012       DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
3013                   Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
3014                   DAG.getRegisterMask(Mask), Chain.getValue(1));
3015   return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
3016 }
3017 
3018 SDValue
3019 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
3020                                                 SelectionDAG &DAG) const {
3021   assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
3022 
3023   SDValue Chain = DAG.getEntryNode();
3024   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3025   SDLoc DL(Op);
3026 
3027   // Load the current TEB (thread environment block)
3028   SDValue Ops[] = {Chain,
3029                    DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
3030                    DAG.getConstant(15, DL, MVT::i32),
3031                    DAG.getConstant(0, DL, MVT::i32),
3032                    DAG.getConstant(13, DL, MVT::i32),
3033                    DAG.getConstant(0, DL, MVT::i32),
3034                    DAG.getConstant(2, DL, MVT::i32)};
3035   SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
3036                                    DAG.getVTList(MVT::i32, MVT::Other), Ops);
3037 
3038   SDValue TEB = CurrentTEB.getValue(0);
3039   Chain = CurrentTEB.getValue(1);
3040 
3041   // Load the ThreadLocalStoragePointer from the TEB
3042   // A pointer to the TLS array is located at offset 0x2c from the TEB.
3043   SDValue TLSArray =
3044       DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
3045   TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
3046 
3047   // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
3048   // offset into the TLSArray.
3049 
3050   // Load the TLS index from the C runtime
3051   SDValue TLSIndex =
3052       DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
3053   TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
3054   TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
3055 
3056   SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
3057                               DAG.getConstant(2, DL, MVT::i32));
3058   SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
3059                             DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
3060                             MachinePointerInfo());
3061 
3062   // Get the offset of the start of the .tls section (section base)
3063   const auto *GA = cast<GlobalAddressSDNode>(Op);
3064   auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
3065   SDValue Offset = DAG.getLoad(
3066       PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
3067                                     DAG.getTargetConstantPool(CPV, PtrVT, 4)),
3068       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3069 
3070   return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
3071 }
3072 
3073 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
3074 SDValue
3075 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
3076                                                  SelectionDAG &DAG) const {
3077   SDLoc dl(GA);
3078   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3079   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3080   MachineFunction &MF = DAG.getMachineFunction();
3081   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3082   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3083   ARMConstantPoolValue *CPV =
3084     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3085                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
3086   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3087   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
3088   Argument = DAG.getLoad(
3089       PtrVT, dl, DAG.getEntryNode(), Argument,
3090       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3091   SDValue Chain = Argument.getValue(1);
3092 
3093   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3094   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
3095 
3096   // call __tls_get_addr.
3097   ArgListTy Args;
3098   ArgListEntry Entry;
3099   Entry.Node = Argument;
3100   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
3101   Args.push_back(Entry);
3102 
3103   // FIXME: is there useful debug info available here?
3104   TargetLowering::CallLoweringInfo CLI(DAG);
3105   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3106       CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
3107       DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
3108 
3109   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3110   return CallResult.first;
3111 }
3112 
3113 // Lower ISD::GlobalTLSAddress using the "initial exec" or
3114 // "local exec" model.
3115 SDValue
3116 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
3117                                         SelectionDAG &DAG,
3118                                         TLSModel::Model model) const {
3119   const GlobalValue *GV = GA->getGlobal();
3120   SDLoc dl(GA);
3121   SDValue Offset;
3122   SDValue Chain = DAG.getEntryNode();
3123   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3124   // Get the Thread Pointer
3125   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3126 
3127   if (model == TLSModel::InitialExec) {
3128     MachineFunction &MF = DAG.getMachineFunction();
3129     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3130     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3131     // Initial exec model.
3132     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3133     ARMConstantPoolValue *CPV =
3134       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3135                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
3136                                       true);
3137     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3138     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3139     Offset = DAG.getLoad(
3140         PtrVT, dl, Chain, Offset,
3141         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3142     Chain = Offset.getValue(1);
3143 
3144     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3145     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
3146 
3147     Offset = DAG.getLoad(
3148         PtrVT, dl, Chain, Offset,
3149         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3150   } else {
3151     // local exec model
3152     assert(model == TLSModel::LocalExec);
3153     ARMConstantPoolValue *CPV =
3154       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
3155     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3156     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3157     Offset = DAG.getLoad(
3158         PtrVT, dl, Chain, Offset,
3159         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3160   }
3161 
3162   // The address of the thread local variable is the add of the thread
3163   // pointer with the offset of the variable.
3164   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
3165 }
3166 
3167 SDValue
3168 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
3169   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3170   if (DAG.getTarget().useEmulatedTLS())
3171     return LowerToTLSEmulatedModel(GA, DAG);
3172 
3173   if (Subtarget->isTargetDarwin())
3174     return LowerGlobalTLSAddressDarwin(Op, DAG);
3175 
3176   if (Subtarget->isTargetWindows())
3177     return LowerGlobalTLSAddressWindows(Op, DAG);
3178 
3179   // TODO: implement the "local dynamic" model
3180   assert(Subtarget->isTargetELF() && "Only ELF implemented here");
3181   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
3182 
3183   switch (model) {
3184     case TLSModel::GeneralDynamic:
3185     case TLSModel::LocalDynamic:
3186       return LowerToTLSGeneralDynamicModel(GA, DAG);
3187     case TLSModel::InitialExec:
3188     case TLSModel::LocalExec:
3189       return LowerToTLSExecModels(GA, DAG, model);
3190   }
3191   llvm_unreachable("bogus TLS model");
3192 }
3193 
3194 /// Return true if all users of V are within function F, looking through
3195 /// ConstantExprs.
3196 static bool allUsersAreInFunction(const Value *V, const Function *F) {
3197   SmallVector<const User*,4> Worklist;
3198   for (auto *U : V->users())
3199     Worklist.push_back(U);
3200   while (!Worklist.empty()) {
3201     auto *U = Worklist.pop_back_val();
3202     if (isa<ConstantExpr>(U)) {
3203       for (auto *UU : U->users())
3204         Worklist.push_back(UU);
3205       continue;
3206     }
3207 
3208     auto *I = dyn_cast<Instruction>(U);
3209     if (!I || I->getParent()->getParent() != F)
3210       return false;
3211   }
3212   return true;
3213 }
3214 
3215 static SDValue promoteToConstantPool(const ARMTargetLowering *TLI,
3216                                      const GlobalValue *GV, SelectionDAG &DAG,
3217                                      EVT PtrVT, const SDLoc &dl) {
3218   // If we're creating a pool entry for a constant global with unnamed address,
3219   // and the global is small enough, we can emit it inline into the constant pool
3220   // to save ourselves an indirection.
3221   //
3222   // This is a win if the constant is only used in one function (so it doesn't
3223   // need to be duplicated) or duplicating the constant wouldn't increase code
3224   // size (implying the constant is no larger than 4 bytes).
3225   const Function &F = DAG.getMachineFunction().getFunction();
3226 
3227   // We rely on this decision to inline being idemopotent and unrelated to the
3228   // use-site. We know that if we inline a variable at one use site, we'll
3229   // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3230   // doesn't know about this optimization, so bail out if it's enabled else
3231   // we could decide to inline here (and thus never emit the GV) but require
3232   // the GV from fast-isel generated code.
3233   if (!EnableConstpoolPromotion ||
3234       DAG.getMachineFunction().getTarget().Options.EnableFastISel)
3235       return SDValue();
3236 
3237   auto *GVar = dyn_cast<GlobalVariable>(GV);
3238   if (!GVar || !GVar->hasInitializer() ||
3239       !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3240       !GVar->hasLocalLinkage())
3241     return SDValue();
3242 
3243   // If we inline a value that contains relocations, we move the relocations
3244   // from .data to .text. This is not allowed in position-independent code.
3245   auto *Init = GVar->getInitializer();
3246   if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) &&
3247       Init->needsRelocation())
3248     return SDValue();
3249 
3250   // The constant islands pass can only really deal with alignment requests
3251   // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3252   // any type wanting greater alignment requirements than 4 bytes. We also
3253   // can only promote constants that are multiples of 4 bytes in size or
3254   // are paddable to a multiple of 4. Currently we only try and pad constants
3255   // that are strings for simplicity.
3256   auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
3257   unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
3258   unsigned Align = DAG.getDataLayout().getPreferredAlignment(GVar);
3259   unsigned RequiredPadding = 4 - (Size % 4);
3260   bool PaddingPossible =
3261     RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3262   if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize ||
3263       Size == 0)
3264     return SDValue();
3265 
3266   unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3267   MachineFunction &MF = DAG.getMachineFunction();
3268   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3269 
3270   // We can't bloat the constant pool too much, else the ConstantIslands pass
3271   // may fail to converge. If we haven't promoted this global yet (it may have
3272   // multiple uses), and promoting it would increase the constant pool size (Sz
3273   // > 4), ensure we have space to do so up to MaxTotal.
3274   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
3275     if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
3276         ConstpoolPromotionMaxTotal)
3277       return SDValue();
3278 
3279   // This is only valid if all users are in a single function; we can't clone
3280   // the constant in general. The LLVM IR unnamed_addr allows merging
3281   // constants, but not cloning them.
3282   //
3283   // We could potentially allow cloning if we could prove all uses of the
3284   // constant in the current function don't care about the address, like
3285   // printf format strings. But that isn't implemented for now.
3286   if (!allUsersAreInFunction(GVar, &F))
3287     return SDValue();
3288 
3289   // We're going to inline this global. Pad it out if needed.
3290   if (RequiredPadding != 4) {
3291     StringRef S = CDAInit->getAsString();
3292 
3293     SmallVector<uint8_t,16> V(S.size());
3294     std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
3295     while (RequiredPadding--)
3296       V.push_back(0);
3297     Init = ConstantDataArray::get(*DAG.getContext(), V);
3298   }
3299 
3300   auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
3301   SDValue CPAddr =
3302     DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4);
3303   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
3304     AFI->markGlobalAsPromotedToConstantPool(GVar);
3305     AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
3306                                       PaddedSize - 4);
3307   }
3308   ++NumConstpoolPromoted;
3309   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3310 }
3311 
3312 bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const {
3313   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3314     if (!(GV = GA->getBaseObject()))
3315       return false;
3316   if (const auto *V = dyn_cast<GlobalVariable>(GV))
3317     return V->isConstant();
3318   return isa<Function>(GV);
3319 }
3320 
3321 SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op,
3322                                               SelectionDAG &DAG) const {
3323   switch (Subtarget->getTargetTriple().getObjectFormat()) {
3324   default: llvm_unreachable("unknown object format");
3325   case Triple::COFF:
3326     return LowerGlobalAddressWindows(Op, DAG);
3327   case Triple::ELF:
3328     return LowerGlobalAddressELF(Op, DAG);
3329   case Triple::MachO:
3330     return LowerGlobalAddressDarwin(Op, DAG);
3331   }
3332 }
3333 
3334 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
3335                                                  SelectionDAG &DAG) const {
3336   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3337   SDLoc dl(Op);
3338   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3339   const TargetMachine &TM = getTargetMachine();
3340   bool IsRO = isReadOnly(GV);
3341 
3342   // promoteToConstantPool only if not generating XO text section
3343   if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
3344     if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl))
3345       return V;
3346 
3347   if (isPositionIndependent()) {
3348     bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
3349     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3350                                            UseGOT_PREL ? ARMII::MO_GOT : 0);
3351     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3352     if (UseGOT_PREL)
3353       Result =
3354           DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3355                       MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3356     return Result;
3357   } else if (Subtarget->isROPI() && IsRO) {
3358     // PC-relative.
3359     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
3360     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3361     return Result;
3362   } else if (Subtarget->isRWPI() && !IsRO) {
3363     // SB-relative.
3364     SDValue RelAddr;
3365     if (Subtarget->useMovt()) {
3366       ++NumMovwMovt;
3367       SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL);
3368       RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G);
3369     } else { // use literal pool for address constant
3370       ARMConstantPoolValue *CPV =
3371         ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
3372       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3373       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3374       RelAddr = DAG.getLoad(
3375           PtrVT, dl, DAG.getEntryNode(), CPAddr,
3376           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3377     }
3378     SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
3379     SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr);
3380     return Result;
3381   }
3382 
3383   // If we have T2 ops, we can materialize the address directly via movt/movw
3384   // pair. This is always cheaper.
3385   if (Subtarget->useMovt()) {
3386     ++NumMovwMovt;
3387     // FIXME: Once remat is capable of dealing with instructions with register
3388     // operands, expand this into two nodes.
3389     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
3390                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
3391   } else {
3392     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
3393     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3394     return DAG.getLoad(
3395         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3396         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3397   }
3398 }
3399 
3400 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
3401                                                     SelectionDAG &DAG) const {
3402   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3403          "ROPI/RWPI not currently supported for Darwin");
3404   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3405   SDLoc dl(Op);
3406   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3407 
3408   if (Subtarget->useMovt())
3409     ++NumMovwMovt;
3410 
3411   // FIXME: Once remat is capable of dealing with instructions with register
3412   // operands, expand this into multiple nodes
3413   unsigned Wrapper =
3414       isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
3415 
3416   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
3417   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
3418 
3419   if (Subtarget->isGVIndirectSymbol(GV))
3420     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3421                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3422   return Result;
3423 }
3424 
3425 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
3426                                                      SelectionDAG &DAG) const {
3427   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
3428   assert(Subtarget->useMovt() &&
3429          "Windows on ARM expects to use movw/movt");
3430   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3431          "ROPI/RWPI not currently supported for Windows");
3432 
3433   const TargetMachine &TM = getTargetMachine();
3434   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3435   ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG;
3436   if (GV->hasDLLImportStorageClass())
3437     TargetFlags = ARMII::MO_DLLIMPORT;
3438   else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
3439     TargetFlags = ARMII::MO_COFFSTUB;
3440   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3441   SDValue Result;
3442   SDLoc DL(Op);
3443 
3444   ++NumMovwMovt;
3445 
3446   // FIXME: Once remat is capable of dealing with instructions with register
3447   // operands, expand this into two nodes.
3448   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
3449                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
3450                                                   TargetFlags));
3451   if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
3452     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3453                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3454   return Result;
3455 }
3456 
3457 SDValue
3458 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
3459   SDLoc dl(Op);
3460   SDValue Val = DAG.getConstant(0, dl, MVT::i32);
3461   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
3462                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
3463                      Op.getOperand(1), Val);
3464 }
3465 
3466 SDValue
3467 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
3468   SDLoc dl(Op);
3469   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
3470                      Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
3471 }
3472 
3473 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
3474                                                       SelectionDAG &DAG) const {
3475   SDLoc dl(Op);
3476   return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
3477                      Op.getOperand(0));
3478 }
3479 
3480 SDValue
3481 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
3482                                           const ARMSubtarget *Subtarget) const {
3483   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3484   SDLoc dl(Op);
3485   switch (IntNo) {
3486   default: return SDValue();    // Don't custom lower most intrinsics.
3487   case Intrinsic::thread_pointer: {
3488     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3489     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3490   }
3491   case Intrinsic::eh_sjlj_lsda: {
3492     MachineFunction &MF = DAG.getMachineFunction();
3493     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3494     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3495     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3496     SDValue CPAddr;
3497     bool IsPositionIndependent = isPositionIndependent();
3498     unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
3499     ARMConstantPoolValue *CPV =
3500       ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex,
3501                                       ARMCP::CPLSDA, PCAdj);
3502     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3503     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3504     SDValue Result = DAG.getLoad(
3505         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3506         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3507 
3508     if (IsPositionIndependent) {
3509       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3510       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3511     }
3512     return Result;
3513   }
3514   case Intrinsic::arm_neon_vabs:
3515     return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
3516                         Op.getOperand(1));
3517   case Intrinsic::arm_neon_vmulls:
3518   case Intrinsic::arm_neon_vmullu: {
3519     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3520       ? ARMISD::VMULLs : ARMISD::VMULLu;
3521     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3522                        Op.getOperand(1), Op.getOperand(2));
3523   }
3524   case Intrinsic::arm_neon_vminnm:
3525   case Intrinsic::arm_neon_vmaxnm: {
3526     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3527       ? ISD::FMINNUM : ISD::FMAXNUM;
3528     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3529                        Op.getOperand(1), Op.getOperand(2));
3530   }
3531   case Intrinsic::arm_neon_vminu:
3532   case Intrinsic::arm_neon_vmaxu: {
3533     if (Op.getValueType().isFloatingPoint())
3534       return SDValue();
3535     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3536       ? ISD::UMIN : ISD::UMAX;
3537     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3538                          Op.getOperand(1), Op.getOperand(2));
3539   }
3540   case Intrinsic::arm_neon_vmins:
3541   case Intrinsic::arm_neon_vmaxs: {
3542     // v{min,max}s is overloaded between signed integers and floats.
3543     if (!Op.getValueType().isFloatingPoint()) {
3544       unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3545         ? ISD::SMIN : ISD::SMAX;
3546       return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3547                          Op.getOperand(1), Op.getOperand(2));
3548     }
3549     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3550       ? ISD::FMINIMUM : ISD::FMAXIMUM;
3551     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3552                        Op.getOperand(1), Op.getOperand(2));
3553   }
3554   case Intrinsic::arm_neon_vtbl1:
3555     return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(),
3556                        Op.getOperand(1), Op.getOperand(2));
3557   case Intrinsic::arm_neon_vtbl2:
3558     return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(),
3559                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3560   }
3561 }
3562 
3563 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
3564                                  const ARMSubtarget *Subtarget) {
3565   SDLoc dl(Op);
3566   ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2));
3567   auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue());
3568   if (SSID == SyncScope::SingleThread)
3569     return Op;
3570 
3571   if (!Subtarget->hasDataBarrier()) {
3572     // Some ARMv6 cpus can support data barriers with an mcr instruction.
3573     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
3574     // here.
3575     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
3576            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3577     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
3578                        DAG.getConstant(0, dl, MVT::i32));
3579   }
3580 
3581   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
3582   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
3583   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
3584   if (Subtarget->isMClass()) {
3585     // Only a full system barrier exists in the M-class architectures.
3586     Domain = ARM_MB::SY;
3587   } else if (Subtarget->preferISHSTBarriers() &&
3588              Ord == AtomicOrdering::Release) {
3589     // Swift happens to implement ISHST barriers in a way that's compatible with
3590     // Release semantics but weaker than ISH so we'd be fools not to use
3591     // it. Beware: other processors probably don't!
3592     Domain = ARM_MB::ISHST;
3593   }
3594 
3595   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
3596                      DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
3597                      DAG.getConstant(Domain, dl, MVT::i32));
3598 }
3599 
3600 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
3601                              const ARMSubtarget *Subtarget) {
3602   // ARM pre v5TE and Thumb1 does not have preload instructions.
3603   if (!(Subtarget->isThumb2() ||
3604         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
3605     // Just preserve the chain.
3606     return Op.getOperand(0);
3607 
3608   SDLoc dl(Op);
3609   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
3610   if (!isRead &&
3611       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
3612     // ARMv7 with MP extension has PLDW.
3613     return Op.getOperand(0);
3614 
3615   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3616   if (Subtarget->isThumb()) {
3617     // Invert the bits.
3618     isRead = ~isRead & 1;
3619     isData = ~isData & 1;
3620   }
3621 
3622   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
3623                      Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
3624                      DAG.getConstant(isData, dl, MVT::i32));
3625 }
3626 
3627 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
3628   MachineFunction &MF = DAG.getMachineFunction();
3629   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
3630 
3631   // vastart just stores the address of the VarArgsFrameIndex slot into the
3632   // memory location argument.
3633   SDLoc dl(Op);
3634   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
3635   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3636   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3637   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3638                       MachinePointerInfo(SV));
3639 }
3640 
3641 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
3642                                                 CCValAssign &NextVA,
3643                                                 SDValue &Root,
3644                                                 SelectionDAG &DAG,
3645                                                 const SDLoc &dl) const {
3646   MachineFunction &MF = DAG.getMachineFunction();
3647   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3648 
3649   const TargetRegisterClass *RC;
3650   if (AFI->isThumb1OnlyFunction())
3651     RC = &ARM::tGPRRegClass;
3652   else
3653     RC = &ARM::GPRRegClass;
3654 
3655   // Transform the arguments stored in physical registers into virtual ones.
3656   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3657   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3658 
3659   SDValue ArgValue2;
3660   if (NextVA.isMemLoc()) {
3661     MachineFrameInfo &MFI = MF.getFrameInfo();
3662     int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);
3663 
3664     // Create load node to retrieve arguments from the stack.
3665     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3666     ArgValue2 = DAG.getLoad(
3667         MVT::i32, dl, Root, FIN,
3668         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3669   } else {
3670     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3671     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3672   }
3673   if (!Subtarget->isLittle())
3674     std::swap (ArgValue, ArgValue2);
3675   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
3676 }
3677 
3678 // The remaining GPRs hold either the beginning of variable-argument
3679 // data, or the beginning of an aggregate passed by value (usually
3680 // byval).  Either way, we allocate stack slots adjacent to the data
3681 // provided by our caller, and store the unallocated registers there.
3682 // If this is a variadic function, the va_list pointer will begin with
3683 // these values; otherwise, this reassembles a (byval) structure that
3684 // was split between registers and memory.
3685 // Return: The frame index registers were stored into.
3686 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
3687                                       const SDLoc &dl, SDValue &Chain,
3688                                       const Value *OrigArg,
3689                                       unsigned InRegsParamRecordIdx,
3690                                       int ArgOffset, unsigned ArgSize) const {
3691   // Currently, two use-cases possible:
3692   // Case #1. Non-var-args function, and we meet first byval parameter.
3693   //          Setup first unallocated register as first byval register;
3694   //          eat all remained registers
3695   //          (these two actions are performed by HandleByVal method).
3696   //          Then, here, we initialize stack frame with
3697   //          "store-reg" instructions.
3698   // Case #2. Var-args function, that doesn't contain byval parameters.
3699   //          The same: eat all remained unallocated registers,
3700   //          initialize stack frame.
3701 
3702   MachineFunction &MF = DAG.getMachineFunction();
3703   MachineFrameInfo &MFI = MF.getFrameInfo();
3704   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3705   unsigned RBegin, REnd;
3706   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
3707     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
3708   } else {
3709     unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3710     RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
3711     REnd = ARM::R4;
3712   }
3713 
3714   if (REnd != RBegin)
3715     ArgOffset = -4 * (ARM::R4 - RBegin);
3716 
3717   auto PtrVT = getPointerTy(DAG.getDataLayout());
3718   int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
3719   SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);
3720 
3721   SmallVector<SDValue, 4> MemOps;
3722   const TargetRegisterClass *RC =
3723       AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
3724 
3725   for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
3726     unsigned VReg = MF.addLiveIn(Reg, RC);
3727     SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
3728     SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3729                                  MachinePointerInfo(OrigArg, 4 * i));
3730     MemOps.push_back(Store);
3731     FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
3732   }
3733 
3734   if (!MemOps.empty())
3735     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3736   return FrameIndex;
3737 }
3738 
3739 // Setup stack frame, the va_list pointer will start from.
3740 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
3741                                              const SDLoc &dl, SDValue &Chain,
3742                                              unsigned ArgOffset,
3743                                              unsigned TotalArgRegsSaveSize,
3744                                              bool ForceMutable) const {
3745   MachineFunction &MF = DAG.getMachineFunction();
3746   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3747 
3748   // Try to store any remaining integer argument regs
3749   // to their spots on the stack so that they may be loaded by dereferencing
3750   // the result of va_next.
3751   // If there is no regs to be stored, just point address after last
3752   // argument passed via stack.
3753   int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
3754                                   CCInfo.getInRegsParamsCount(),
3755                                   CCInfo.getNextStackOffset(),
3756                                   std::max(4U, TotalArgRegsSaveSize));
3757   AFI->setVarArgsFrameIndex(FrameIndex);
3758 }
3759 
3760 SDValue ARMTargetLowering::LowerFormalArguments(
3761     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3762     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3763     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3764   MachineFunction &MF = DAG.getMachineFunction();
3765   MachineFrameInfo &MFI = MF.getFrameInfo();
3766 
3767   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3768 
3769   // Assign locations to all of the incoming arguments.
3770   SmallVector<CCValAssign, 16> ArgLocs;
3771   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3772                  *DAG.getContext());
3773   CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
3774 
3775   SmallVector<SDValue, 16> ArgValues;
3776   SDValue ArgValue;
3777   Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
3778   unsigned CurArgIdx = 0;
3779 
3780   // Initially ArgRegsSaveSize is zero.
3781   // Then we increase this value each time we meet byval parameter.
3782   // We also increase this value in case of varargs function.
3783   AFI->setArgRegsSaveSize(0);
3784 
3785   // Calculate the amount of stack space that we need to allocate to store
3786   // byval and variadic arguments that are passed in registers.
3787   // We need to know this before we allocate the first byval or variadic
3788   // argument, as they will be allocated a stack slot below the CFA (Canonical
3789   // Frame Address, the stack pointer at entry to the function).
3790   unsigned ArgRegBegin = ARM::R4;
3791   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3792     if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
3793       break;
3794 
3795     CCValAssign &VA = ArgLocs[i];
3796     unsigned Index = VA.getValNo();
3797     ISD::ArgFlagsTy Flags = Ins[Index].Flags;
3798     if (!Flags.isByVal())
3799       continue;
3800 
3801     assert(VA.isMemLoc() && "unexpected byval pointer in reg");
3802     unsigned RBegin, REnd;
3803     CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
3804     ArgRegBegin = std::min(ArgRegBegin, RBegin);
3805 
3806     CCInfo.nextInRegsParam();
3807   }
3808   CCInfo.rewindByValRegsInfo();
3809 
3810   int lastInsIndex = -1;
3811   if (isVarArg && MFI.hasVAStart()) {
3812     unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3813     if (RegIdx != array_lengthof(GPRArgRegs))
3814       ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
3815   }
3816 
3817   unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
3818   AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
3819   auto PtrVT = getPointerTy(DAG.getDataLayout());
3820 
3821   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3822     CCValAssign &VA = ArgLocs[i];
3823     if (Ins[VA.getValNo()].isOrigArg()) {
3824       std::advance(CurOrigArg,
3825                    Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
3826       CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
3827     }
3828     // Arguments stored in registers.
3829     if (VA.isRegLoc()) {
3830       EVT RegVT = VA.getLocVT();
3831 
3832       if (VA.needsCustom()) {
3833         // f64 and vector types are split up into multiple registers or
3834         // combinations of registers and stack slots.
3835         if (VA.getLocVT() == MVT::v2f64) {
3836           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3837                                                    Chain, DAG, dl);
3838           VA = ArgLocs[++i]; // skip ahead to next loc
3839           SDValue ArgValue2;
3840           if (VA.isMemLoc()) {
3841             int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
3842             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3843             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
3844                                     MachinePointerInfo::getFixedStack(
3845                                         DAG.getMachineFunction(), FI));
3846           } else {
3847             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3848                                              Chain, DAG, dl);
3849           }
3850           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
3851           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3852                                  ArgValue, ArgValue1,
3853                                  DAG.getIntPtrConstant(0, dl));
3854           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3855                                  ArgValue, ArgValue2,
3856                                  DAG.getIntPtrConstant(1, dl));
3857         } else
3858           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3859       } else {
3860         const TargetRegisterClass *RC;
3861 
3862 
3863         if (RegVT == MVT::f16)
3864           RC = &ARM::HPRRegClass;
3865         else if (RegVT == MVT::f32)
3866           RC = &ARM::SPRRegClass;
3867         else if (RegVT == MVT::f64 || RegVT == MVT::v4f16)
3868           RC = &ARM::DPRRegClass;
3869         else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16)
3870           RC = &ARM::QPRRegClass;
3871         else if (RegVT == MVT::i32)
3872           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
3873                                            : &ARM::GPRRegClass;
3874         else
3875           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
3876 
3877         // Transform the arguments in physical registers into virtual ones.
3878         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3879         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3880       }
3881 
3882       // If this is an 8 or 16-bit value, it is really passed promoted
3883       // to 32 bits.  Insert an assert[sz]ext to capture this, then
3884       // truncate to the right size.
3885       switch (VA.getLocInfo()) {
3886       default: llvm_unreachable("Unknown loc info!");
3887       case CCValAssign::Full: break;
3888       case CCValAssign::BCvt:
3889         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
3890         break;
3891       case CCValAssign::SExt:
3892         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3893                                DAG.getValueType(VA.getValVT()));
3894         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3895         break;
3896       case CCValAssign::ZExt:
3897         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3898                                DAG.getValueType(VA.getValVT()));
3899         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3900         break;
3901       }
3902 
3903       InVals.push_back(ArgValue);
3904     } else { // VA.isRegLoc()
3905       // sanity check
3906       assert(VA.isMemLoc());
3907       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
3908 
3909       int index = VA.getValNo();
3910 
3911       // Some Ins[] entries become multiple ArgLoc[] entries.
3912       // Process them only once.
3913       if (index != lastInsIndex)
3914         {
3915           ISD::ArgFlagsTy Flags = Ins[index].Flags;
3916           // FIXME: For now, all byval parameter objects are marked mutable.
3917           // This can be changed with more analysis.
3918           // In case of tail call optimization mark all arguments mutable.
3919           // Since they could be overwritten by lowering of arguments in case of
3920           // a tail call.
3921           if (Flags.isByVal()) {
3922             assert(Ins[index].isOrigArg() &&
3923                    "Byval arguments cannot be implicit");
3924             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
3925 
3926             int FrameIndex = StoreByValRegs(
3927                 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
3928                 VA.getLocMemOffset(), Flags.getByValSize());
3929             InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
3930             CCInfo.nextInRegsParam();
3931           } else {
3932             unsigned FIOffset = VA.getLocMemOffset();
3933             int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
3934                                            FIOffset, true);
3935 
3936             // Create load nodes to retrieve arguments from the stack.
3937             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3938             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
3939                                          MachinePointerInfo::getFixedStack(
3940                                              DAG.getMachineFunction(), FI)));
3941           }
3942           lastInsIndex = index;
3943         }
3944     }
3945   }
3946 
3947   // varargs
3948   if (isVarArg && MFI.hasVAStart())
3949     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3950                          CCInfo.getNextStackOffset(),
3951                          TotalArgRegsSaveSize);
3952 
3953   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
3954 
3955   return Chain;
3956 }
3957 
3958 /// isFloatingPointZero - Return true if this is +0.0.
3959 static bool isFloatingPointZero(SDValue Op) {
3960   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
3961     return CFP->getValueAPF().isPosZero();
3962   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
3963     // Maybe this has already been legalized into the constant pool?
3964     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
3965       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
3966       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
3967         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
3968           return CFP->getValueAPF().isPosZero();
3969     }
3970   } else if (Op->getOpcode() == ISD::BITCAST &&
3971              Op->getValueType(0) == MVT::f64) {
3972     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
3973     // created by LowerConstantFP().
3974     SDValue BitcastOp = Op->getOperand(0);
3975     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
3976         isNullConstant(BitcastOp->getOperand(0)))
3977       return true;
3978   }
3979   return false;
3980 }
3981 
3982 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
3983 /// the given operands.
3984 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3985                                      SDValue &ARMcc, SelectionDAG &DAG,
3986                                      const SDLoc &dl) const {
3987   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
3988     unsigned C = RHSC->getZExtValue();
3989     if (!isLegalICmpImmediate((int32_t)C)) {
3990       // Constant does not fit, try adjusting it by one.
3991       switch (CC) {
3992       default: break;
3993       case ISD::SETLT:
3994       case ISD::SETGE:
3995         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
3996           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
3997           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
3998         }
3999         break;
4000       case ISD::SETULT:
4001       case ISD::SETUGE:
4002         if (C != 0 && isLegalICmpImmediate(C-1)) {
4003           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
4004           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
4005         }
4006         break;
4007       case ISD::SETLE:
4008       case ISD::SETGT:
4009         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
4010           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
4011           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4012         }
4013         break;
4014       case ISD::SETULE:
4015       case ISD::SETUGT:
4016         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
4017           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
4018           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4019         }
4020         break;
4021       }
4022     }
4023   } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) &&
4024              (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) {
4025     // In ARM and Thumb-2, the compare instructions can shift their second
4026     // operand.
4027     CC = ISD::getSetCCSwappedOperands(CC);
4028     std::swap(LHS, RHS);
4029   }
4030 
4031   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4032 
4033   // If the RHS is a constant zero then the V (overflow) flag will never be
4034   // set. This can allow us to simplify GE to PL or LT to MI, which can be
4035   // simpler for other passes (like the peephole optimiser) to deal with.
4036   if (isNullConstant(RHS)) {
4037     switch (CondCode) {
4038       default: break;
4039       case ARMCC::GE:
4040         CondCode = ARMCC::PL;
4041         break;
4042       case ARMCC::LT:
4043         CondCode = ARMCC::MI;
4044         break;
4045     }
4046   }
4047 
4048   ARMISD::NodeType CompareType;
4049   switch (CondCode) {
4050   default:
4051     CompareType = ARMISD::CMP;
4052     break;
4053   case ARMCC::EQ:
4054   case ARMCC::NE:
4055     // Uses only Z Flag
4056     CompareType = ARMISD::CMPZ;
4057     break;
4058   }
4059   ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4060   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
4061 }
4062 
4063 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
4064 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
4065                                      SelectionDAG &DAG, const SDLoc &dl,
4066                                      bool InvalidOnQNaN) const {
4067   assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64);
4068   SDValue Cmp;
4069   SDValue C = DAG.getConstant(InvalidOnQNaN, dl, MVT::i32);
4070   if (!isFloatingPointZero(RHS))
4071     Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS, C);
4072   else
4073     Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS, C);
4074   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
4075 }
4076 
4077 /// duplicateCmp - Glue values can have only one use, so this function
4078 /// duplicates a comparison node.
4079 SDValue
4080 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
4081   unsigned Opc = Cmp.getOpcode();
4082   SDLoc DL(Cmp);
4083   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
4084     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4085 
4086   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
4087   Cmp = Cmp.getOperand(0);
4088   Opc = Cmp.getOpcode();
4089   if (Opc == ARMISD::CMPFP)
4090     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),
4091                       Cmp.getOperand(1), Cmp.getOperand(2));
4092   else {
4093     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
4094     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),
4095                       Cmp.getOperand(1));
4096   }
4097   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
4098 }
4099 
4100 // This function returns three things: the arithmetic computation itself
4101 // (Value), a comparison (OverflowCmp), and a condition code (ARMcc).  The
4102 // comparison and the condition code define the case in which the arithmetic
4103 // computation *does not* overflow.
4104 std::pair<SDValue, SDValue>
4105 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
4106                                  SDValue &ARMcc) const {
4107   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
4108 
4109   SDValue Value, OverflowCmp;
4110   SDValue LHS = Op.getOperand(0);
4111   SDValue RHS = Op.getOperand(1);
4112   SDLoc dl(Op);
4113 
4114   // FIXME: We are currently always generating CMPs because we don't support
4115   // generating CMN through the backend. This is not as good as the natural
4116   // CMP case because it causes a register dependency and cannot be folded
4117   // later.
4118 
4119   switch (Op.getOpcode()) {
4120   default:
4121     llvm_unreachable("Unknown overflow instruction!");
4122   case ISD::SADDO:
4123     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4124     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
4125     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4126     break;
4127   case ISD::UADDO:
4128     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4129     // We use ADDC here to correspond to its use in LowerUnsignedALUO.
4130     // We do not use it in the USUBO case as Value may not be used.
4131     Value = DAG.getNode(ARMISD::ADDC, dl,
4132                         DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS)
4133                 .getValue(0);
4134     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4135     break;
4136   case ISD::SSUBO:
4137     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4138     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4139     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4140     break;
4141   case ISD::USUBO:
4142     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4143     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4144     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4145     break;
4146   case ISD::UMULO:
4147     // We generate a UMUL_LOHI and then check if the high word is 0.
4148     ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4149     Value = DAG.getNode(ISD::UMUL_LOHI, dl,
4150                         DAG.getVTList(Op.getValueType(), Op.getValueType()),
4151                         LHS, RHS);
4152     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4153                               DAG.getConstant(0, dl, MVT::i32));
4154     Value = Value.getValue(0); // We only want the low 32 bits for the result.
4155     break;
4156   case ISD::SMULO:
4157     // We generate a SMUL_LOHI and then check if all the bits of the high word
4158     // are the same as the sign bit of the low word.
4159     ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4160     Value = DAG.getNode(ISD::SMUL_LOHI, dl,
4161                         DAG.getVTList(Op.getValueType(), Op.getValueType()),
4162                         LHS, RHS);
4163     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4164                               DAG.getNode(ISD::SRA, dl, Op.getValueType(),
4165                                           Value.getValue(0),
4166                                           DAG.getConstant(31, dl, MVT::i32)));
4167     Value = Value.getValue(0); // We only want the low 32 bits for the result.
4168     break;
4169   } // switch (...)
4170 
4171   return std::make_pair(Value, OverflowCmp);
4172 }
4173 
4174 SDValue
4175 ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const {
4176   // Let legalize expand this if it isn't a legal type yet.
4177   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
4178     return SDValue();
4179 
4180   SDValue Value, OverflowCmp;
4181   SDValue ARMcc;
4182   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
4183   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4184   SDLoc dl(Op);
4185   // We use 0 and 1 as false and true values.
4186   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
4187   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
4188   EVT VT = Op.getValueType();
4189 
4190   SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
4191                                  ARMcc, CCR, OverflowCmp);
4192 
4193   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
4194   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
4195 }
4196 
4197 static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry,
4198                                               SelectionDAG &DAG) {
4199   SDLoc DL(BoolCarry);
4200   EVT CarryVT = BoolCarry.getValueType();
4201 
4202   // This converts the boolean value carry into the carry flag by doing
4203   // ARMISD::SUBC Carry, 1
4204   SDValue Carry = DAG.getNode(ARMISD::SUBC, DL,
4205                               DAG.getVTList(CarryVT, MVT::i32),
4206                               BoolCarry, DAG.getConstant(1, DL, CarryVT));
4207   return Carry.getValue(1);
4208 }
4209 
4210 static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT,
4211                                               SelectionDAG &DAG) {
4212   SDLoc DL(Flags);
4213 
4214   // Now convert the carry flag into a boolean carry. We do this
4215   // using ARMISD:ADDE 0, 0, Carry
4216   return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32),
4217                      DAG.getConstant(0, DL, MVT::i32),
4218                      DAG.getConstant(0, DL, MVT::i32), Flags);
4219 }
4220 
4221 SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op,
4222                                              SelectionDAG &DAG) const {
4223   // Let legalize expand this if it isn't a legal type yet.
4224   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
4225     return SDValue();
4226 
4227   SDValue LHS = Op.getOperand(0);
4228   SDValue RHS = Op.getOperand(1);
4229   SDLoc dl(Op);
4230 
4231   EVT VT = Op.getValueType();
4232   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
4233   SDValue Value;
4234   SDValue Overflow;
4235   switch (Op.getOpcode()) {
4236   default:
4237     llvm_unreachable("Unknown overflow instruction!");
4238   case ISD::UADDO:
4239     Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS);
4240     // Convert the carry flag into a boolean value.
4241     Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
4242     break;
4243   case ISD::USUBO: {
4244     Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS);
4245     // Convert the carry flag into a boolean value.
4246     Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
4247     // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow
4248     // value. So compute 1 - C.
4249     Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32,
4250                            DAG.getConstant(1, dl, MVT::i32), Overflow);
4251     break;
4252   }
4253   }
4254 
4255   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
4256 }
4257 
4258 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
4259   SDValue Cond = Op.getOperand(0);
4260   SDValue SelectTrue = Op.getOperand(1);
4261   SDValue SelectFalse = Op.getOperand(2);
4262   SDLoc dl(Op);
4263   unsigned Opc = Cond.getOpcode();
4264 
4265   if (Cond.getResNo() == 1 &&
4266       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
4267        Opc == ISD::USUBO)) {
4268     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
4269       return SDValue();
4270 
4271     SDValue Value, OverflowCmp;
4272     SDValue ARMcc;
4273     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4274     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4275     EVT VT = Op.getValueType();
4276 
4277     return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
4278                    OverflowCmp, DAG);
4279   }
4280 
4281   // Convert:
4282   //
4283   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
4284   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
4285   //
4286   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
4287     const ConstantSDNode *CMOVTrue =
4288       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
4289     const ConstantSDNode *CMOVFalse =
4290       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
4291 
4292     if (CMOVTrue && CMOVFalse) {
4293       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
4294       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
4295 
4296       SDValue True;
4297       SDValue False;
4298       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
4299         True = SelectTrue;
4300         False = SelectFalse;
4301       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4302         True = SelectFalse;
4303         False = SelectTrue;
4304       }
4305 
4306       if (True.getNode() && False.getNode()) {
4307         EVT VT = Op.getValueType();
4308         SDValue ARMcc = Cond.getOperand(2);
4309         SDValue CCR = Cond.getOperand(3);
4310         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
4311         assert(True.getValueType() == VT);
4312         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4313       }
4314     }
4315   }
4316 
4317   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
4318   // undefined bits before doing a full-word comparison with zero.
4319   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
4320                      DAG.getConstant(1, dl, Cond.getValueType()));
4321 
4322   return DAG.getSelectCC(dl, Cond,
4323                          DAG.getConstant(0, dl, Cond.getValueType()),
4324                          SelectTrue, SelectFalse, ISD::SETNE);
4325 }
4326 
4327 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
4328                                  bool &swpCmpOps, bool &swpVselOps) {
4329   // Start by selecting the GE condition code for opcodes that return true for
4330   // 'equality'
4331   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
4332       CC == ISD::SETULE || CC == ISD::SETGE  || CC == ISD::SETLE)
4333     CondCode = ARMCC::GE;
4334 
4335   // and GT for opcodes that return false for 'equality'.
4336   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
4337            CC == ISD::SETULT || CC == ISD::SETGT  || CC == ISD::SETLT)
4338     CondCode = ARMCC::GT;
4339 
4340   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
4341   // to swap the compare operands.
4342   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
4343       CC == ISD::SETULT || CC == ISD::SETLE  || CC == ISD::SETLT)
4344     swpCmpOps = true;
4345 
4346   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
4347   // If we have an unordered opcode, we need to swap the operands to the VSEL
4348   // instruction (effectively negating the condition).
4349   //
4350   // This also has the effect of swapping which one of 'less' or 'greater'
4351   // returns true, so we also swap the compare operands. It also switches
4352   // whether we return true for 'equality', so we compensate by picking the
4353   // opposite condition code to our original choice.
4354   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
4355       CC == ISD::SETUGT) {
4356     swpCmpOps = !swpCmpOps;
4357     swpVselOps = !swpVselOps;
4358     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
4359   }
4360 
4361   // 'ordered' is 'anything but unordered', so use the VS condition code and
4362   // swap the VSEL operands.
4363   if (CC == ISD::SETO) {
4364     CondCode = ARMCC::VS;
4365     swpVselOps = true;
4366   }
4367 
4368   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
4369   // code and swap the VSEL operands. Also do this if we don't care about the
4370   // unordered case.
4371   if (CC == ISD::SETUNE || CC == ISD::SETNE) {
4372     CondCode = ARMCC::EQ;
4373     swpVselOps = true;
4374   }
4375 }
4376 
4377 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
4378                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
4379                                    SDValue Cmp, SelectionDAG &DAG) const {
4380   if (!Subtarget->hasFP64() && VT == MVT::f64) {
4381     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4382                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
4383     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4384                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
4385 
4386     SDValue TrueLow = TrueVal.getValue(0);
4387     SDValue TrueHigh = TrueVal.getValue(1);
4388     SDValue FalseLow = FalseVal.getValue(0);
4389     SDValue FalseHigh = FalseVal.getValue(1);
4390 
4391     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
4392                               ARMcc, CCR, Cmp);
4393     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
4394                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
4395 
4396     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
4397   } else {
4398     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
4399                        Cmp);
4400   }
4401 }
4402 
4403 static bool isGTorGE(ISD::CondCode CC) {
4404   return CC == ISD::SETGT || CC == ISD::SETGE;
4405 }
4406 
4407 static bool isLTorLE(ISD::CondCode CC) {
4408   return CC == ISD::SETLT || CC == ISD::SETLE;
4409 }
4410 
4411 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
4412 // All of these conditions (and their <= and >= counterparts) will do:
4413 //          x < k ? k : x
4414 //          x > k ? x : k
4415 //          k < x ? x : k
4416 //          k > x ? k : x
4417 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
4418                             const SDValue TrueVal, const SDValue FalseVal,
4419                             const ISD::CondCode CC, const SDValue K) {
4420   return (isGTorGE(CC) &&
4421           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4422          (isLTorLE(CC) &&
4423           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4424 }
4425 
4426 // Similar to isLowerSaturate(), but checks for upper-saturating conditions.
4427 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS,
4428                             const SDValue TrueVal, const SDValue FalseVal,
4429                             const ISD::CondCode CC, const SDValue K) {
4430   return (isGTorGE(CC) &&
4431           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4432          (isLTorLE(CC) &&
4433           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4434 }
4435 
4436 // Check if two chained conditionals could be converted into SSAT or USAT.
4437 //
4438 // SSAT can replace a set of two conditional selectors that bound a number to an
4439 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
4440 //
4441 //     x < -k ? -k : (x > k ? k : x)
4442 //     x < -k ? -k : (x < k ? x : k)
4443 //     x > -k ? (x > k ? k : x) : -k
4444 //     x < k ? (x < -k ? -k : x) : k
4445 //     etc.
4446 //
4447 // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is
4448 // a power of 2.
4449 //
4450 // It returns true if the conversion can be done, false otherwise.
4451 // Additionally, the variable is returned in parameter V, the constant in K and
4452 // usat is set to true if the conditional represents an unsigned saturation
4453 static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
4454                                     uint64_t &K, bool &usat) {
4455   SDValue LHS1 = Op.getOperand(0);
4456   SDValue RHS1 = Op.getOperand(1);
4457   SDValue TrueVal1 = Op.getOperand(2);
4458   SDValue FalseVal1 = Op.getOperand(3);
4459   ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4460 
4461   const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4462   if (Op2.getOpcode() != ISD::SELECT_CC)
4463     return false;
4464 
4465   SDValue LHS2 = Op2.getOperand(0);
4466   SDValue RHS2 = Op2.getOperand(1);
4467   SDValue TrueVal2 = Op2.getOperand(2);
4468   SDValue FalseVal2 = Op2.getOperand(3);
4469   ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
4470 
4471   // Find out which are the constants and which are the variables
4472   // in each conditional
4473   SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4474                                                         ? &RHS1
4475                                                         : nullptr;
4476   SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4477                                                         ? &RHS2
4478                                                         : nullptr;
4479   SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4480   SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4481   SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4482   SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4483 
4484   // We must detect cases where the original operations worked with 16- or
4485   // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
4486   // must work with sign-extended values but the select operations return
4487   // the original non-extended value.
4488   SDValue V2TmpReg = V2Tmp;
4489   if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG)
4490     V2TmpReg = V2Tmp->getOperand(0);
4491 
4492   // Check that the registers and the constants have the correct values
4493   // in both conditionals
4494   if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4495       V2TmpReg != V2)
4496     return false;
4497 
4498   // Figure out which conditional is saturating the lower/upper bound.
4499   const SDValue *LowerCheckOp =
4500       isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4501           ? &Op
4502           : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
4503                 ? &Op2
4504                 : nullptr;
4505   const SDValue *UpperCheckOp =
4506       isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4507           ? &Op
4508           : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
4509                 ? &Op2
4510                 : nullptr;
4511 
4512   if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4513     return false;
4514 
4515   // Check that the constant in the lower-bound check is
4516   // the opposite of the constant in the upper-bound check
4517   // in 1's complement.
4518   int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4519   int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4520   int64_t PosVal = std::max(Val1, Val2);
4521   int64_t NegVal = std::min(Val1, Val2);
4522 
4523   if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4524        (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4525       isPowerOf2_64(PosVal + 1)) {
4526 
4527     // Handle the difference between USAT (unsigned) and SSAT (signed) saturation
4528     if (Val1 == ~Val2)
4529       usat = false;
4530     else if (NegVal == 0)
4531       usat = true;
4532     else
4533       return false;
4534 
4535     V = V2;
4536     K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive
4537 
4538     return true;
4539   }
4540 
4541   return false;
4542 }
4543 
4544 // Check if a condition of the type x < k ? k : x can be converted into a
4545 // bit operation instead of conditional moves.
4546 // Currently this is allowed given:
4547 // - The conditions and values match up
4548 // - k is 0 or -1 (all ones)
4549 // This function will not check the last condition, thats up to the caller
4550 // It returns true if the transformation can be made, and in such case
4551 // returns x in V, and k in SatK.
4552 static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V,
4553                                          SDValue &SatK)
4554 {
4555   SDValue LHS = Op.getOperand(0);
4556   SDValue RHS = Op.getOperand(1);
4557   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4558   SDValue TrueVal = Op.getOperand(2);
4559   SDValue FalseVal = Op.getOperand(3);
4560 
4561   SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS)
4562                                                ? &RHS
4563                                                : nullptr;
4564 
4565   // No constant operation in comparison, early out
4566   if (!K)
4567     return false;
4568 
4569   SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal;
4570   V = (KTmp == TrueVal) ? FalseVal : TrueVal;
4571   SDValue VTmp = (K && *K == LHS) ? RHS : LHS;
4572 
4573   // If the constant on left and right side, or variable on left and right,
4574   // does not match, early out
4575   if (*K != KTmp || V != VTmp)
4576     return false;
4577 
4578   if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) {
4579     SatK = *K;
4580     return true;
4581   }
4582 
4583   return false;
4584 }
4585 
4586 bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const {
4587   if (VT == MVT::f32)
4588     return !Subtarget->hasVFP2Base();
4589   if (VT == MVT::f64)
4590     return !Subtarget->hasFP64();
4591   if (VT == MVT::f16)
4592     return !Subtarget->hasFullFP16();
4593   return false;
4594 }
4595 
4596 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
4597   EVT VT = Op.getValueType();
4598   SDLoc dl(Op);
4599 
4600   // Try to convert two saturating conditional selects into a single SSAT
4601   SDValue SatValue;
4602   uint64_t SatConstant;
4603   bool SatUSat;
4604   if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) &&
4605       isSaturatingConditional(Op, SatValue, SatConstant, SatUSat)) {
4606     if (SatUSat)
4607       return DAG.getNode(ARMISD::USAT, dl, VT, SatValue,
4608                          DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
4609     else
4610       return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
4611                          DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
4612   }
4613 
4614   // Try to convert expressions of the form x < k ? k : x (and similar forms)
4615   // into more efficient bit operations, which is possible when k is 0 or -1
4616   // On ARM and Thumb-2 which have flexible operand 2 this will result in
4617   // single instructions. On Thumb the shift and the bit operation will be two
4618   // instructions.
4619   // Only allow this transformation on full-width (32-bit) operations
4620   SDValue LowerSatConstant;
4621   if (VT == MVT::i32 &&
4622       isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) {
4623     SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue,
4624                                  DAG.getConstant(31, dl, VT));
4625     if (isNullConstant(LowerSatConstant)) {
4626       SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV,
4627                                       DAG.getAllOnesConstant(dl, VT));
4628       return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV);
4629     } else if (isAllOnesConstant(LowerSatConstant))
4630       return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV);
4631   }
4632 
4633   SDValue LHS = Op.getOperand(0);
4634   SDValue RHS = Op.getOperand(1);
4635   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4636   SDValue TrueVal = Op.getOperand(2);
4637   SDValue FalseVal = Op.getOperand(3);
4638 
4639   if (isUnsupportedFloatingType(LHS.getValueType())) {
4640     DAG.getTargetLoweringInfo().softenSetCCOperands(
4641         DAG, LHS.getValueType(), LHS, RHS, CC, dl);
4642 
4643     // If softenSetCCOperands only returned one value, we should compare it to
4644     // zero.
4645     if (!RHS.getNode()) {
4646       RHS = DAG.getConstant(0, dl, LHS.getValueType());
4647       CC = ISD::SETNE;
4648     }
4649   }
4650 
4651   if (LHS.getValueType() == MVT::i32) {
4652     // Try to generate VSEL on ARMv8.
4653     // The VSEL instruction can't use all the usual ARM condition
4654     // codes: it only has two bits to select the condition code, so it's
4655     // constrained to use only GE, GT, VS and EQ.
4656     //
4657     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
4658     // swap the operands of the previous compare instruction (effectively
4659     // inverting the compare condition, swapping 'less' and 'greater') and
4660     // sometimes need to swap the operands to the VSEL (which inverts the
4661     // condition in the sense of firing whenever the previous condition didn't)
4662     if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 ||
4663                                         TrueVal.getValueType() == MVT::f32 ||
4664                                         TrueVal.getValueType() == MVT::f64)) {
4665       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4666       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
4667           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
4668         CC = ISD::getSetCCInverse(CC, true);
4669         std::swap(TrueVal, FalseVal);
4670       }
4671     }
4672 
4673     SDValue ARMcc;
4674     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4675     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4676     // Choose GE over PL, which vsel does now support
4677     if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL)
4678       ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
4679     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4680   }
4681 
4682   ARMCC::CondCodes CondCode, CondCode2;
4683   bool InvalidOnQNaN;
4684   FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4685 
4686   // Normalize the fp compare. If RHS is zero we prefer to keep it there so we
4687   // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we
4688   // must use VSEL (limited condition codes), due to not having conditional f16
4689   // moves.
4690   if (Subtarget->hasFPARMv8Base() &&
4691       !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) &&
4692       (TrueVal.getValueType() == MVT::f16 ||
4693        TrueVal.getValueType() == MVT::f32 ||
4694        TrueVal.getValueType() == MVT::f64)) {
4695     bool swpCmpOps = false;
4696     bool swpVselOps = false;
4697     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
4698 
4699     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
4700         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
4701       if (swpCmpOps)
4702         std::swap(LHS, RHS);
4703       if (swpVselOps)
4704         std::swap(TrueVal, FalseVal);
4705     }
4706   }
4707 
4708   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4709   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4710   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4711   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4712   if (CondCode2 != ARMCC::AL) {
4713     SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
4714     // FIXME: Needs another CMP because flag can have but one use.
4715     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4716     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
4717   }
4718   return Result;
4719 }
4720 
4721 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
4722 /// to morph to an integer compare sequence.
4723 static bool canChangeToInt(SDValue Op, bool &SeenZero,
4724                            const ARMSubtarget *Subtarget) {
4725   SDNode *N = Op.getNode();
4726   if (!N->hasOneUse())
4727     // Otherwise it requires moving the value from fp to integer registers.
4728     return false;
4729   if (!N->getNumValues())
4730     return false;
4731   EVT VT = Op.getValueType();
4732   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
4733     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
4734     // vmrs are very slow, e.g. cortex-a8.
4735     return false;
4736 
4737   if (isFloatingPointZero(Op)) {
4738     SeenZero = true;
4739     return true;
4740   }
4741   return ISD::isNormalLoad(N);
4742 }
4743 
4744 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
4745   if (isFloatingPointZero(Op))
4746     return DAG.getConstant(0, SDLoc(Op), MVT::i32);
4747 
4748   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
4749     return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
4750                        Ld->getPointerInfo(), Ld->getAlignment(),
4751                        Ld->getMemOperand()->getFlags());
4752 
4753   llvm_unreachable("Unknown VFP cmp argument!");
4754 }
4755 
4756 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
4757                            SDValue &RetVal1, SDValue &RetVal2) {
4758   SDLoc dl(Op);
4759 
4760   if (isFloatingPointZero(Op)) {
4761     RetVal1 = DAG.getConstant(0, dl, MVT::i32);
4762     RetVal2 = DAG.getConstant(0, dl, MVT::i32);
4763     return;
4764   }
4765 
4766   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
4767     SDValue Ptr = Ld->getBasePtr();
4768     RetVal1 =
4769         DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
4770                     Ld->getAlignment(), Ld->getMemOperand()->getFlags());
4771 
4772     EVT PtrType = Ptr.getValueType();
4773     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
4774     SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
4775                                  PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
4776     RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
4777                           Ld->getPointerInfo().getWithOffset(4), NewAlign,
4778                           Ld->getMemOperand()->getFlags());
4779     return;
4780   }
4781 
4782   llvm_unreachable("Unknown VFP cmp argument!");
4783 }
4784 
4785 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
4786 /// f32 and even f64 comparisons to integer ones.
4787 SDValue
4788 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
4789   SDValue Chain = Op.getOperand(0);
4790   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4791   SDValue LHS = Op.getOperand(2);
4792   SDValue RHS = Op.getOperand(3);
4793   SDValue Dest = Op.getOperand(4);
4794   SDLoc dl(Op);
4795 
4796   bool LHSSeenZero = false;
4797   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
4798   bool RHSSeenZero = false;
4799   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
4800   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
4801     // If unsafe fp math optimization is enabled and there are no other uses of
4802     // the CMP operands, and the condition code is EQ or NE, we can optimize it
4803     // to an integer comparison.
4804     if (CC == ISD::SETOEQ)
4805       CC = ISD::SETEQ;
4806     else if (CC == ISD::SETUNE)
4807       CC = ISD::SETNE;
4808 
4809     SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
4810     SDValue ARMcc;
4811     if (LHS.getValueType() == MVT::f32) {
4812       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4813                         bitcastf32Toi32(LHS, DAG), Mask);
4814       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4815                         bitcastf32Toi32(RHS, DAG), Mask);
4816       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4817       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4818       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4819                          Chain, Dest, ARMcc, CCR, Cmp);
4820     }
4821 
4822     SDValue LHS1, LHS2;
4823     SDValue RHS1, RHS2;
4824     expandf64Toi32(LHS, DAG, LHS1, LHS2);
4825     expandf64Toi32(RHS, DAG, RHS1, RHS2);
4826     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
4827     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
4828     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4829     ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4830     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4831     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
4832     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
4833   }
4834 
4835   return SDValue();
4836 }
4837 
4838 SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
4839   SDValue Chain = Op.getOperand(0);
4840   SDValue Cond = Op.getOperand(1);
4841   SDValue Dest = Op.getOperand(2);
4842   SDLoc dl(Op);
4843 
4844   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
4845   // instruction.
4846   unsigned Opc = Cond.getOpcode();
4847   bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
4848                       !Subtarget->isThumb1Only();
4849   if (Cond.getResNo() == 1 &&
4850       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
4851        Opc == ISD::USUBO || OptimizeMul)) {
4852     // Only lower legal XALUO ops.
4853     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
4854       return SDValue();
4855 
4856     // The actual operation with overflow check.
4857     SDValue Value, OverflowCmp;
4858     SDValue ARMcc;
4859     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4860 
4861     // Reverse the condition code.
4862     ARMCC::CondCodes CondCode =
4863         (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
4864     CondCode = ARMCC::getOppositeCondition(CondCode);
4865     ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
4866     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4867 
4868     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
4869                        OverflowCmp);
4870   }
4871 
4872   return SDValue();
4873 }
4874 
4875 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
4876   SDValue Chain = Op.getOperand(0);
4877   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4878   SDValue LHS = Op.getOperand(2);
4879   SDValue RHS = Op.getOperand(3);
4880   SDValue Dest = Op.getOperand(4);
4881   SDLoc dl(Op);
4882 
4883   if (isUnsupportedFloatingType(LHS.getValueType())) {
4884     DAG.getTargetLoweringInfo().softenSetCCOperands(
4885         DAG, LHS.getValueType(), LHS, RHS, CC, dl);
4886 
4887     // If softenSetCCOperands only returned one value, we should compare it to
4888     // zero.
4889     if (!RHS.getNode()) {
4890       RHS = DAG.getConstant(0, dl, LHS.getValueType());
4891       CC = ISD::SETNE;
4892     }
4893   }
4894 
4895   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
4896   // instruction.
4897   unsigned Opc = LHS.getOpcode();
4898   bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
4899                       !Subtarget->isThumb1Only();
4900   if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) &&
4901       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
4902        Opc == ISD::USUBO || OptimizeMul) &&
4903       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
4904     // Only lower legal XALUO ops.
4905     if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
4906       return SDValue();
4907 
4908     // The actual operation with overflow check.
4909     SDValue Value, OverflowCmp;
4910     SDValue ARMcc;
4911     std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc);
4912 
4913     if ((CC == ISD::SETNE) != isOneConstant(RHS)) {
4914       // Reverse the condition code.
4915       ARMCC::CondCodes CondCode =
4916           (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
4917       CondCode = ARMCC::getOppositeCondition(CondCode);
4918       ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
4919     }
4920     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4921 
4922     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
4923                        OverflowCmp);
4924   }
4925 
4926   if (LHS.getValueType() == MVT::i32) {
4927     SDValue ARMcc;
4928     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4929     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4930     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4931                        Chain, Dest, ARMcc, CCR, Cmp);
4932   }
4933 
4934   if (getTargetMachine().Options.UnsafeFPMath &&
4935       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
4936        CC == ISD::SETNE || CC == ISD::SETUNE)) {
4937     if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
4938       return Result;
4939   }
4940 
4941   ARMCC::CondCodes CondCode, CondCode2;
4942   bool InvalidOnQNaN;
4943   FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4944 
4945   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4946   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4947   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4948   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4949   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
4950   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4951   if (CondCode2 != ARMCC::AL) {
4952     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
4953     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
4954     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4955   }
4956   return Res;
4957 }
4958 
4959 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
4960   SDValue Chain = Op.getOperand(0);
4961   SDValue Table = Op.getOperand(1);
4962   SDValue Index = Op.getOperand(2);
4963   SDLoc dl(Op);
4964 
4965   EVT PTy = getPointerTy(DAG.getDataLayout());
4966   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
4967   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
4968   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
4969   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
4970   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index);
4971   if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
4972     // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
4973     // which does another jump to the destination. This also makes it easier
4974     // to translate it to TBB / TBH later (Thumb2 only).
4975     // FIXME: This might not work if the function is extremely large.
4976     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
4977                        Addr, Op.getOperand(2), JTI);
4978   }
4979   if (isPositionIndependent() || Subtarget->isROPI()) {
4980     Addr =
4981         DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
4982                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4983     Chain = Addr.getValue(1);
4984     Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr);
4985     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4986   } else {
4987     Addr =
4988         DAG.getLoad(PTy, dl, Chain, Addr,
4989                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4990     Chain = Addr.getValue(1);
4991     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4992   }
4993 }
4994 
4995 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
4996   EVT VT = Op.getValueType();
4997   SDLoc dl(Op);
4998 
4999   if (Op.getValueType().getVectorElementType() == MVT::i32) {
5000     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
5001       return Op;
5002     return DAG.UnrollVectorOp(Op.getNode());
5003   }
5004 
5005   const bool HasFullFP16 =
5006     static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
5007 
5008   EVT NewTy;
5009   const EVT OpTy = Op.getOperand(0).getValueType();
5010   if (OpTy == MVT::v4f32)
5011     NewTy = MVT::v4i32;
5012   else if (OpTy == MVT::v4f16 && HasFullFP16)
5013     NewTy = MVT::v4i16;
5014   else if (OpTy == MVT::v8f16 && HasFullFP16)
5015     NewTy = MVT::v8i16;
5016   else
5017     llvm_unreachable("Invalid type for custom lowering!");
5018 
5019   if (VT != MVT::v4i16 && VT != MVT::v8i16)
5020     return DAG.UnrollVectorOp(Op.getNode());
5021 
5022   Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0));
5023   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
5024 }
5025 
5026 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
5027   EVT VT = Op.getValueType();
5028   if (VT.isVector())
5029     return LowerVectorFP_TO_INT(Op, DAG);
5030   if (isUnsupportedFloatingType(Op.getOperand(0).getValueType())) {
5031     RTLIB::Libcall LC;
5032     if (Op.getOpcode() == ISD::FP_TO_SINT)
5033       LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
5034                               Op.getValueType());
5035     else
5036       LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
5037                               Op.getValueType());
5038     return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
5039                        /*isSigned*/ false, SDLoc(Op)).first;
5040   }
5041 
5042   return Op;
5043 }
5044 
5045 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
5046   EVT VT = Op.getValueType();
5047   SDLoc dl(Op);
5048 
5049   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
5050     if (VT.getVectorElementType() == MVT::f32)
5051       return Op;
5052     return DAG.UnrollVectorOp(Op.getNode());
5053   }
5054 
5055   assert((Op.getOperand(0).getValueType() == MVT::v4i16 ||
5056           Op.getOperand(0).getValueType() == MVT::v8i16) &&
5057          "Invalid type for custom lowering!");
5058 
5059   const bool HasFullFP16 =
5060     static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
5061 
5062   EVT DestVecType;
5063   if (VT == MVT::v4f32)
5064     DestVecType = MVT::v4i32;
5065   else if (VT == MVT::v4f16 && HasFullFP16)
5066     DestVecType = MVT::v4i16;
5067   else if (VT == MVT::v8f16 && HasFullFP16)
5068     DestVecType = MVT::v8i16;
5069   else
5070     return DAG.UnrollVectorOp(Op.getNode());
5071 
5072   unsigned CastOpc;
5073   unsigned Opc;
5074   switch (Op.getOpcode()) {
5075   default: llvm_unreachable("Invalid opcode!");
5076   case ISD::SINT_TO_FP:
5077     CastOpc = ISD::SIGN_EXTEND;
5078     Opc = ISD::SINT_TO_FP;
5079     break;
5080   case ISD::UINT_TO_FP:
5081     CastOpc = ISD::ZERO_EXTEND;
5082     Opc = ISD::UINT_TO_FP;
5083     break;
5084   }
5085 
5086   Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0));
5087   return DAG.getNode(Opc, dl, VT, Op);
5088 }
5089 
5090 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
5091   EVT VT = Op.getValueType();
5092   if (VT.isVector())
5093     return LowerVectorINT_TO_FP(Op, DAG);
5094   if (isUnsupportedFloatingType(VT)) {
5095     RTLIB::Libcall LC;
5096     if (Op.getOpcode() == ISD::SINT_TO_FP)
5097       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
5098                               Op.getValueType());
5099     else
5100       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
5101                               Op.getValueType());
5102     return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
5103                        /*isSigned*/ false, SDLoc(Op)).first;
5104   }
5105 
5106   return Op;
5107 }
5108 
5109 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
5110   // Implement fcopysign with a fabs and a conditional fneg.
5111   SDValue Tmp0 = Op.getOperand(0);
5112   SDValue Tmp1 = Op.getOperand(1);
5113   SDLoc dl(Op);
5114   EVT VT = Op.getValueType();
5115   EVT SrcVT = Tmp1.getValueType();
5116   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
5117     Tmp0.getOpcode() == ARMISD::VMOVDRR;
5118   bool UseNEON = !InGPR && Subtarget->hasNEON();
5119 
5120   if (UseNEON) {
5121     // Use VBSL to copy the sign bit.
5122     unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
5123     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
5124                                DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
5125     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
5126     if (VT == MVT::f64)
5127       Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
5128                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
5129                          DAG.getConstant(32, dl, MVT::i32));
5130     else /*if (VT == MVT::f32)*/
5131       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
5132     if (SrcVT == MVT::f32) {
5133       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
5134       if (VT == MVT::f64)
5135         Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
5136                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
5137                            DAG.getConstant(32, dl, MVT::i32));
5138     } else if (VT == MVT::f32)
5139       Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
5140                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
5141                          DAG.getConstant(32, dl, MVT::i32));
5142     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
5143     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
5144 
5145     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
5146                                             dl, MVT::i32);
5147     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
5148     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
5149                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
5150 
5151     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
5152                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
5153                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
5154     if (VT == MVT::f32) {
5155       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
5156       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
5157                         DAG.getConstant(0, dl, MVT::i32));
5158     } else {
5159       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
5160     }
5161 
5162     return Res;
5163   }
5164 
5165   // Bitcast operand 1 to i32.
5166   if (SrcVT == MVT::f64)
5167     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
5168                        Tmp1).getValue(1);
5169   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
5170 
5171   // Or in the signbit with integer operations.
5172   SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
5173   SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
5174   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
5175   if (VT == MVT::f32) {
5176     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
5177                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
5178     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5179                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
5180   }
5181 
5182   // f64: Or the high part with signbit and then combine two parts.
5183   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
5184                      Tmp0);
5185   SDValue Lo = Tmp0.getValue(0);
5186   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
5187   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
5188   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
5189 }
5190 
5191 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
5192   MachineFunction &MF = DAG.getMachineFunction();
5193   MachineFrameInfo &MFI = MF.getFrameInfo();
5194   MFI.setReturnAddressIsTaken(true);
5195 
5196   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
5197     return SDValue();
5198 
5199   EVT VT = Op.getValueType();
5200   SDLoc dl(Op);
5201   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5202   if (Depth) {
5203     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
5204     SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
5205     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
5206                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
5207                        MachinePointerInfo());
5208   }
5209 
5210   // Return LR, which contains the return address. Mark it an implicit live-in.
5211   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
5212   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
5213 }
5214 
5215 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
5216   const ARMBaseRegisterInfo &ARI =
5217     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
5218   MachineFunction &MF = DAG.getMachineFunction();
5219   MachineFrameInfo &MFI = MF.getFrameInfo();
5220   MFI.setFrameAddressIsTaken(true);
5221 
5222   EVT VT = Op.getValueType();
5223   SDLoc dl(Op);  // FIXME probably not meaningful
5224   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5225   unsigned FrameReg = ARI.getFrameRegister(MF);
5226   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
5227   while (Depth--)
5228     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
5229                             MachinePointerInfo());
5230   return FrameAddr;
5231 }
5232 
5233 // FIXME? Maybe this could be a TableGen attribute on some registers and
5234 // this table could be generated automatically from RegInfo.
5235 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT,
5236                                               SelectionDAG &DAG) const {
5237   unsigned Reg = StringSwitch<unsigned>(RegName)
5238                        .Case("sp", ARM::SP)
5239                        .Default(0);
5240   if (Reg)
5241     return Reg;
5242   report_fatal_error(Twine("Invalid register name \""
5243                               + StringRef(RegName)  + "\"."));
5244 }
5245 
5246 // Result is 64 bit value so split into two 32 bit values and return as a
5247 // pair of values.
5248 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
5249                                 SelectionDAG &DAG) {
5250   SDLoc DL(N);
5251 
5252   // This function is only supposed to be called for i64 type destination.
5253   assert(N->getValueType(0) == MVT::i64
5254           && "ExpandREAD_REGISTER called for non-i64 type result.");
5255 
5256   SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
5257                              DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
5258                              N->getOperand(0),
5259                              N->getOperand(1));
5260 
5261   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
5262                     Read.getValue(1)));
5263   Results.push_back(Read.getOperand(0));
5264 }
5265 
5266 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
5267 /// When \p DstVT, the destination type of \p BC, is on the vector
5268 /// register bank and the source of bitcast, \p Op, operates on the same bank,
5269 /// it might be possible to combine them, such that everything stays on the
5270 /// vector register bank.
5271 /// \p return The node that would replace \p BT, if the combine
5272 /// is possible.
5273 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
5274                                                 SelectionDAG &DAG) {
5275   SDValue Op = BC->getOperand(0);
5276   EVT DstVT = BC->getValueType(0);
5277 
5278   // The only vector instruction that can produce a scalar (remember,
5279   // since the bitcast was about to be turned into VMOVDRR, the source
5280   // type is i64) from a vector is EXTRACT_VECTOR_ELT.
5281   // Moreover, we can do this combine only if there is one use.
5282   // Finally, if the destination type is not a vector, there is not
5283   // much point on forcing everything on the vector bank.
5284   if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5285       !Op.hasOneUse())
5286     return SDValue();
5287 
5288   // If the index is not constant, we will introduce an additional
5289   // multiply that will stick.
5290   // Give up in that case.
5291   ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5292   if (!Index)
5293     return SDValue();
5294   unsigned DstNumElt = DstVT.getVectorNumElements();
5295 
5296   // Compute the new index.
5297   const APInt &APIntIndex = Index->getAPIntValue();
5298   APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
5299   NewIndex *= APIntIndex;
5300   // Check if the new constant index fits into i32.
5301   if (NewIndex.getBitWidth() > 32)
5302     return SDValue();
5303 
5304   // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
5305   // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
5306   SDLoc dl(Op);
5307   SDValue ExtractSrc = Op.getOperand(0);
5308   EVT VecVT = EVT::getVectorVT(
5309       *DAG.getContext(), DstVT.getScalarType(),
5310       ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
5311   SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
5312   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
5313                      DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
5314 }
5315 
5316 /// ExpandBITCAST - If the target supports VFP, this function is called to
5317 /// expand a bit convert where either the source or destination type is i64 to
5318 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
5319 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
5320 /// vectors), since the legalizer won't know what to do with that.
5321 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
5322                              const ARMSubtarget *Subtarget) {
5323   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5324   SDLoc dl(N);
5325   SDValue Op = N->getOperand(0);
5326 
5327   // This function is only supposed to be called for i64 types, either as the
5328   // source or destination of the bit convert.
5329   EVT SrcVT = Op.getValueType();
5330   EVT DstVT = N->getValueType(0);
5331   const bool HasFullFP16 = Subtarget->hasFullFP16();
5332 
5333   if (SrcVT == MVT::f32 && DstVT == MVT::i32) {
5334      // FullFP16: half values are passed in S-registers, and we don't
5335      // need any of the bitcast and moves:
5336      //
5337      // t2: f32,ch = CopyFromReg t0, Register:f32 %0
5338      //   t5: i32 = bitcast t2
5339      // t18: f16 = ARMISD::VMOVhr t5
5340      if (Op.getOpcode() != ISD::CopyFromReg ||
5341          Op.getValueType() != MVT::f32)
5342        return SDValue();
5343 
5344      auto Move = N->use_begin();
5345      if (Move->getOpcode() != ARMISD::VMOVhr)
5346        return SDValue();
5347 
5348      SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
5349      SDValue Copy = DAG.getNode(ISD::CopyFromReg, SDLoc(Op), MVT::f16, Ops);
5350      DAG.ReplaceAllUsesWith(*Move, &Copy);
5351      return Copy;
5352   }
5353 
5354   if (SrcVT == MVT::i16 && DstVT == MVT::f16) {
5355     if (!HasFullFP16)
5356       return SDValue();
5357     // SoftFP: read half-precision arguments:
5358     //
5359     // t2: i32,ch = ...
5360     //        t7: i16 = truncate t2 <~~~~ Op
5361     //      t8: f16 = bitcast t7    <~~~~ N
5362     //
5363     if (Op.getOperand(0).getValueType() == MVT::i32)
5364       return DAG.getNode(ARMISD::VMOVhr, SDLoc(Op),
5365                          MVT::f16, Op.getOperand(0));
5366 
5367     return SDValue();
5368   }
5369 
5370   // Half-precision return values
5371   if (SrcVT == MVT::f16 && DstVT == MVT::i16) {
5372     if (!HasFullFP16)
5373       return SDValue();
5374     //
5375     //          t11: f16 = fadd t8, t10
5376     //        t12: i16 = bitcast t11       <~~~ SDNode N
5377     //      t13: i32 = zero_extend t12
5378     //    t16: ch,glue = CopyToReg t0, Register:i32 %r0, t13
5379     //  t17: ch = ARMISD::RET_FLAG t16, Register:i32 %r0, t16:1
5380     //
5381     // transform this into:
5382     //
5383     //    t20: i32 = ARMISD::VMOVrh t11
5384     //  t16: ch,glue = CopyToReg t0, Register:i32 %r0, t20
5385     //
5386     auto ZeroExtend = N->use_begin();
5387     if (N->use_size() != 1 || ZeroExtend->getOpcode() != ISD::ZERO_EXTEND ||
5388         ZeroExtend->getValueType(0) != MVT::i32)
5389       return SDValue();
5390 
5391     auto Copy = ZeroExtend->use_begin();
5392     if (Copy->getOpcode() == ISD::CopyToReg &&
5393         Copy->use_begin()->getOpcode() == ARMISD::RET_FLAG) {
5394       SDValue Cvt = DAG.getNode(ARMISD::VMOVrh, SDLoc(Op), MVT::i32, Op);
5395       DAG.ReplaceAllUsesWith(*ZeroExtend, &Cvt);
5396       return Cvt;
5397     }
5398     return SDValue();
5399   }
5400 
5401   if (!(SrcVT == MVT::i64 || DstVT == MVT::i64))
5402     return SDValue();
5403 
5404   // Turn i64->f64 into VMOVDRR.
5405   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
5406     // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
5407     // if we can combine the bitcast with its source.
5408     if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
5409       return Val;
5410 
5411     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
5412                              DAG.getConstant(0, dl, MVT::i32));
5413     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
5414                              DAG.getConstant(1, dl, MVT::i32));
5415     return DAG.getNode(ISD::BITCAST, dl, DstVT,
5416                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
5417   }
5418 
5419   // Turn f64->i64 into VMOVRRD.
5420   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
5421     SDValue Cvt;
5422     if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
5423         SrcVT.getVectorNumElements() > 1)
5424       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
5425                         DAG.getVTList(MVT::i32, MVT::i32),
5426                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
5427     else
5428       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
5429                         DAG.getVTList(MVT::i32, MVT::i32), Op);
5430     // Merge the pieces into a single i64 value.
5431     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
5432   }
5433 
5434   return SDValue();
5435 }
5436 
5437 /// getZeroVector - Returns a vector of specified type with all zero elements.
5438 /// Zero vectors are used to represent vector negation and in those cases
5439 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
5440 /// not support i64 elements, so sometimes the zero vectors will need to be
5441 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
5442 /// zero vector.
5443 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5444   assert(VT.isVector() && "Expected a vector type");
5445   // The canonical modified immediate encoding of a zero vector is....0!
5446   SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
5447   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
5448   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
5449   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
5450 }
5451 
5452 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
5453 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
5454 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
5455                                                 SelectionDAG &DAG) const {
5456   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
5457   EVT VT = Op.getValueType();
5458   unsigned VTBits = VT.getSizeInBits();
5459   SDLoc dl(Op);
5460   SDValue ShOpLo = Op.getOperand(0);
5461   SDValue ShOpHi = Op.getOperand(1);
5462   SDValue ShAmt  = Op.getOperand(2);
5463   SDValue ARMcc;
5464   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5465   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
5466 
5467   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
5468 
5469   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
5470                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
5471   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
5472   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
5473                                    DAG.getConstant(VTBits, dl, MVT::i32));
5474   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
5475   SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
5476   SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
5477   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
5478                             ISD::SETGE, ARMcc, DAG, dl);
5479   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
5480                            ARMcc, CCR, CmpLo);
5481 
5482   SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
5483   SDValue HiBigShift = Opc == ISD::SRA
5484                            ? DAG.getNode(Opc, dl, VT, ShOpHi,
5485                                          DAG.getConstant(VTBits - 1, dl, VT))
5486                            : DAG.getConstant(0, dl, VT);
5487   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
5488                             ISD::SETGE, ARMcc, DAG, dl);
5489   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
5490                            ARMcc, CCR, CmpHi);
5491 
5492   SDValue Ops[2] = { Lo, Hi };
5493   return DAG.getMergeValues(Ops, dl);
5494 }
5495 
5496 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
5497 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
5498 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
5499                                                SelectionDAG &DAG) const {
5500   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
5501   EVT VT = Op.getValueType();
5502   unsigned VTBits = VT.getSizeInBits();
5503   SDLoc dl(Op);
5504   SDValue ShOpLo = Op.getOperand(0);
5505   SDValue ShOpHi = Op.getOperand(1);
5506   SDValue ShAmt  = Op.getOperand(2);
5507   SDValue ARMcc;
5508   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5509 
5510   assert(Op.getOpcode() == ISD::SHL_PARTS);
5511   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
5512                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
5513   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
5514   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
5515   SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
5516 
5517   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
5518                                    DAG.getConstant(VTBits, dl, MVT::i32));
5519   SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
5520   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
5521                             ISD::SETGE, ARMcc, DAG, dl);
5522   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
5523                            ARMcc, CCR, CmpHi);
5524 
5525   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
5526                           ISD::SETGE, ARMcc, DAG, dl);
5527   SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
5528   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
5529                            DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
5530 
5531   SDValue Ops[2] = { Lo, Hi };
5532   return DAG.getMergeValues(Ops, dl);
5533 }
5534 
5535 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
5536                                             SelectionDAG &DAG) const {
5537   // The rounding mode is in bits 23:22 of the FPSCR.
5538   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
5539   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
5540   // so that the shift + and get folded into a bitfield extract.
5541   SDLoc dl(Op);
5542   SDValue Ops[] = { DAG.getEntryNode(),
5543                     DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32) };
5544 
5545   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, MVT::i32, Ops);
5546   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
5547                                   DAG.getConstant(1U << 22, dl, MVT::i32));
5548   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
5549                               DAG.getConstant(22, dl, MVT::i32));
5550   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
5551                      DAG.getConstant(3, dl, MVT::i32));
5552 }
5553 
5554 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
5555                          const ARMSubtarget *ST) {
5556   SDLoc dl(N);
5557   EVT VT = N->getValueType(0);
5558   if (VT.isVector()) {
5559     assert(ST->hasNEON());
5560 
5561     // Compute the least significant set bit: LSB = X & -X
5562     SDValue X = N->getOperand(0);
5563     SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
5564     SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);
5565 
5566     EVT ElemTy = VT.getVectorElementType();
5567 
5568     if (ElemTy == MVT::i8) {
5569       // Compute with: cttz(x) = ctpop(lsb - 1)
5570       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5571                                 DAG.getTargetConstant(1, dl, ElemTy));
5572       SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5573       return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
5574     }
5575 
5576     if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
5577         (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
5578       // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
5579       unsigned NumBits = ElemTy.getSizeInBits();
5580       SDValue WidthMinus1 =
5581           DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5582                       DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
5583       SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
5584       return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
5585     }
5586 
5587     // Compute with: cttz(x) = ctpop(lsb - 1)
5588 
5589     // Compute LSB - 1.
5590     SDValue Bits;
5591     if (ElemTy == MVT::i64) {
5592       // Load constant 0xffff'ffff'ffff'ffff to register.
5593       SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5594                                DAG.getTargetConstant(0x1eff, dl, MVT::i32));
5595       Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
5596     } else {
5597       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5598                                 DAG.getTargetConstant(1, dl, ElemTy));
5599       Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5600     }
5601     return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
5602   }
5603 
5604   if (!ST->hasV6T2Ops())
5605     return SDValue();
5606 
5607   SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
5608   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
5609 }
5610 
5611 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
5612                           const ARMSubtarget *ST) {
5613   EVT VT = N->getValueType(0);
5614   SDLoc DL(N);
5615 
5616   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
5617   assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
5618           VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
5619          "Unexpected type for custom ctpop lowering");
5620 
5621   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5622   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
5623   SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0));
5624   Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res);
5625 
5626   // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
5627   unsigned EltSize = 8;
5628   unsigned NumElts = VT.is64BitVector() ? 8 : 16;
5629   while (EltSize != VT.getScalarSizeInBits()) {
5630     SmallVector<SDValue, 8> Ops;
5631     Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL,
5632                                   TLI.getPointerTy(DAG.getDataLayout())));
5633     Ops.push_back(Res);
5634 
5635     EltSize *= 2;
5636     NumElts /= 2;
5637     MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
5638     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops);
5639   }
5640 
5641   return Res;
5642 }
5643 
5644 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
5645                           const ARMSubtarget *ST) {
5646   EVT VT = N->getValueType(0);
5647   SDLoc dl(N);
5648 
5649   if (!VT.isVector())
5650     return SDValue();
5651 
5652   // Lower vector shifts on NEON to use VSHL.
5653   assert(ST->hasNEON() && "unexpected vector shift");
5654 
5655   // Left shifts translate directly to the vshiftu intrinsic.
5656   if (N->getOpcode() == ISD::SHL)
5657     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5658                        DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl,
5659                                        MVT::i32),
5660                        N->getOperand(0), N->getOperand(1));
5661 
5662   assert((N->getOpcode() == ISD::SRA ||
5663           N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
5664 
5665   // NEON uses the same intrinsics for both left and right shifts.  For
5666   // right shifts, the shift amounts are negative, so negate the vector of
5667   // shift amounts.
5668   EVT ShiftVT = N->getOperand(1).getValueType();
5669   SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
5670                                      getZeroVector(ShiftVT, DAG, dl),
5671                                      N->getOperand(1));
5672   Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
5673                              Intrinsic::arm_neon_vshifts :
5674                              Intrinsic::arm_neon_vshiftu);
5675   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5676                      DAG.getConstant(vshiftInt, dl, MVT::i32),
5677                      N->getOperand(0), NegatedCount);
5678 }
5679 
5680 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
5681                                 const ARMSubtarget *ST) {
5682   EVT VT = N->getValueType(0);
5683   SDLoc dl(N);
5684 
5685   // We can get here for a node like i32 = ISD::SHL i32, i64
5686   if (VT != MVT::i64)
5687     return SDValue();
5688 
5689   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA ||
5690           N->getOpcode() == ISD::SHL) &&
5691          "Unknown shift to lower!");
5692 
5693   unsigned ShOpc = N->getOpcode();
5694   if (ST->hasMVEIntegerOps()) {
5695     SDValue ShAmt = N->getOperand(1);
5696     unsigned ShPartsOpc = ARMISD::LSLL;
5697     ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt);
5698 
5699     // If the shift amount is greater than 32 then do the default optimisation
5700     if (Con && Con->getZExtValue() > 32)
5701       return SDValue();
5702 
5703     // Extract the lower 32 bits of the shift amount if it's an i64
5704     if (ShAmt->getValueType(0) == MVT::i64)
5705       ShAmt = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, ShAmt,
5706                           DAG.getConstant(0, dl, MVT::i32));
5707 
5708     if (ShOpc == ISD::SRL) {
5709       if (!Con)
5710         // There is no t2LSRLr instruction so negate and perform an lsll if the
5711         // shift amount is in a register, emulating a right shift.
5712         ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
5713                             DAG.getConstant(0, dl, MVT::i32), ShAmt);
5714       else
5715         // Else generate an lsrl on the immediate shift amount
5716         ShPartsOpc = ARMISD::LSRL;
5717     } else if (ShOpc == ISD::SRA)
5718       ShPartsOpc = ARMISD::ASRL;
5719 
5720     // Lower 32 bits of the destination/source
5721     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5722                              DAG.getConstant(0, dl, MVT::i32));
5723     // Upper 32 bits of the destination/source
5724     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5725                              DAG.getConstant(1, dl, MVT::i32));
5726 
5727     // Generate the shift operation as computed above
5728     Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi,
5729                      ShAmt);
5730     // The upper 32 bits come from the second return value of lsll
5731     Hi = SDValue(Lo.getNode(), 1);
5732     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5733   }
5734 
5735   // We only lower SRA, SRL of 1 here, all others use generic lowering.
5736   if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL)
5737     return SDValue();
5738 
5739   // If we are in thumb mode, we don't have RRX.
5740   if (ST->isThumb1Only())
5741     return SDValue();
5742 
5743   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
5744   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5745                            DAG.getConstant(0, dl, MVT::i32));
5746   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5747                            DAG.getConstant(1, dl, MVT::i32));
5748 
5749   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
5750   // captures the result into a carry flag.
5751   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
5752   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
5753 
5754   // The low part is an ARMISD::RRX operand, which shifts the carry in.
5755   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
5756 
5757   // Merge the pieces into a single i64 value.
5758  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5759 }
5760 
5761 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
5762   SDValue TmpOp0, TmpOp1;
5763   bool Invert = false;
5764   bool Swap = false;
5765   unsigned Opc = 0;
5766 
5767   SDValue Op0 = Op.getOperand(0);
5768   SDValue Op1 = Op.getOperand(1);
5769   SDValue CC = Op.getOperand(2);
5770   EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
5771   EVT VT = Op.getValueType();
5772   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
5773   SDLoc dl(Op);
5774 
5775   if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
5776       (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
5777     // Special-case integer 64-bit equality comparisons. They aren't legal,
5778     // but they can be lowered with a few vector instructions.
5779     unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
5780     EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
5781     SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
5782     SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
5783     SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
5784                               DAG.getCondCode(ISD::SETEQ));
5785     SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
5786     SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
5787     Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
5788     if (SetCCOpcode == ISD::SETNE)
5789       Merged = DAG.getNOT(dl, Merged, CmpVT);
5790     Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
5791     return Merged;
5792   }
5793 
5794   if (CmpVT.getVectorElementType() == MVT::i64)
5795     // 64-bit comparisons are not legal in general.
5796     return SDValue();
5797 
5798   if (Op1.getValueType().isFloatingPoint()) {
5799     switch (SetCCOpcode) {
5800     default: llvm_unreachable("Illegal FP comparison");
5801     case ISD::SETUNE:
5802     case ISD::SETNE:  Invert = true; LLVM_FALLTHROUGH;
5803     case ISD::SETOEQ:
5804     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
5805     case ISD::SETOLT:
5806     case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
5807     case ISD::SETOGT:
5808     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
5809     case ISD::SETOLE:
5810     case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
5811     case ISD::SETOGE:
5812     case ISD::SETGE: Opc = ARMISD::VCGE; break;
5813     case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
5814     case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
5815     case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
5816     case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
5817     case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
5818     case ISD::SETONE:
5819       // Expand this to (OLT | OGT).
5820       TmpOp0 = Op0;
5821       TmpOp1 = Op1;
5822       Opc = ISD::OR;
5823       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5824       Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
5825       break;
5826     case ISD::SETUO:
5827       Invert = true;
5828       LLVM_FALLTHROUGH;
5829     case ISD::SETO:
5830       // Expand this to (OLT | OGE).
5831       TmpOp0 = Op0;
5832       TmpOp1 = Op1;
5833       Opc = ISD::OR;
5834       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5835       Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1);
5836       break;
5837     }
5838   } else {
5839     // Integer comparisons.
5840     switch (SetCCOpcode) {
5841     default: llvm_unreachable("Illegal integer comparison");
5842     case ISD::SETNE:  Invert = true; LLVM_FALLTHROUGH;
5843     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
5844     case ISD::SETLT:  Swap = true; LLVM_FALLTHROUGH;
5845     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
5846     case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
5847     case ISD::SETGE:  Opc = ARMISD::VCGE; break;
5848     case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
5849     case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
5850     case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
5851     case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
5852     }
5853 
5854     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
5855     if (Opc == ARMISD::VCEQ) {
5856       SDValue AndOp;
5857       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5858         AndOp = Op0;
5859       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
5860         AndOp = Op1;
5861 
5862       // Ignore bitconvert.
5863       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
5864         AndOp = AndOp.getOperand(0);
5865 
5866       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
5867         Opc = ARMISD::VTST;
5868         Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
5869         Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
5870         Invert = !Invert;
5871       }
5872     }
5873   }
5874 
5875   if (Swap)
5876     std::swap(Op0, Op1);
5877 
5878   // If one of the operands is a constant vector zero, attempt to fold the
5879   // comparison to a specialized compare-against-zero form.
5880   SDValue SingleOp;
5881   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5882     SingleOp = Op0;
5883   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
5884     if (Opc == ARMISD::VCGE)
5885       Opc = ARMISD::VCLEZ;
5886     else if (Opc == ARMISD::VCGT)
5887       Opc = ARMISD::VCLTZ;
5888     SingleOp = Op1;
5889   }
5890 
5891   SDValue Result;
5892   if (SingleOp.getNode()) {
5893     switch (Opc) {
5894     case ARMISD::VCEQ:
5895       Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break;
5896     case ARMISD::VCGE:
5897       Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break;
5898     case ARMISD::VCLEZ:
5899       Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break;
5900     case ARMISD::VCGT:
5901       Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break;
5902     case ARMISD::VCLTZ:
5903       Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break;
5904     default:
5905       Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5906     }
5907   } else {
5908      Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5909   }
5910 
5911   Result = DAG.getSExtOrTrunc(Result, dl, VT);
5912 
5913   if (Invert)
5914     Result = DAG.getNOT(dl, Result, VT);
5915 
5916   return Result;
5917 }
5918 
5919 static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) {
5920   SDValue LHS = Op.getOperand(0);
5921   SDValue RHS = Op.getOperand(1);
5922   SDValue Carry = Op.getOperand(2);
5923   SDValue Cond = Op.getOperand(3);
5924   SDLoc DL(Op);
5925 
5926   assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
5927 
5928   // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
5929   // have to invert the carry first.
5930   Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
5931                       DAG.getConstant(1, DL, MVT::i32), Carry);
5932   // This converts the boolean value carry into the carry flag.
5933   Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
5934 
5935   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
5936   SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
5937 
5938   SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
5939   SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
5940   SDValue ARMcc = DAG.getConstant(
5941       IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
5942   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5943   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
5944                                    Cmp.getValue(1), SDValue());
5945   return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
5946                      CCR, Chain.getValue(1));
5947 }
5948 
5949 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
5950 /// valid vector constant for a NEON or MVE instruction with a "modified immediate"
5951 /// operand (e.g., VMOV).  If so, return the encoded value.
5952 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
5953                                  unsigned SplatBitSize, SelectionDAG &DAG,
5954                                  const SDLoc &dl, EVT &VT, bool is128Bits,
5955                                  NEONModImmType type) {
5956   unsigned OpCmode, Imm;
5957 
5958   // SplatBitSize is set to the smallest size that splats the vector, so a
5959   // zero vector will always have SplatBitSize == 8.  However, NEON modified
5960   // immediate instructions others than VMOV do not support the 8-bit encoding
5961   // of a zero vector, and the default encoding of zero is supposed to be the
5962   // 32-bit version.
5963   if (SplatBits == 0)
5964     SplatBitSize = 32;
5965 
5966   switch (SplatBitSize) {
5967   case 8:
5968     if (type != VMOVModImm)
5969       return SDValue();
5970     // Any 1-byte value is OK.  Op=0, Cmode=1110.
5971     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
5972     OpCmode = 0xe;
5973     Imm = SplatBits;
5974     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
5975     break;
5976 
5977   case 16:
5978     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
5979     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
5980     if ((SplatBits & ~0xff) == 0) {
5981       // Value = 0x00nn: Op=x, Cmode=100x.
5982       OpCmode = 0x8;
5983       Imm = SplatBits;
5984       break;
5985     }
5986     if ((SplatBits & ~0xff00) == 0) {
5987       // Value = 0xnn00: Op=x, Cmode=101x.
5988       OpCmode = 0xa;
5989       Imm = SplatBits >> 8;
5990       break;
5991     }
5992     return SDValue();
5993 
5994   case 32:
5995     // NEON's 32-bit VMOV supports splat values where:
5996     // * only one byte is nonzero, or
5997     // * the least significant byte is 0xff and the second byte is nonzero, or
5998     // * the least significant 2 bytes are 0xff and the third is nonzero.
5999     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
6000     if ((SplatBits & ~0xff) == 0) {
6001       // Value = 0x000000nn: Op=x, Cmode=000x.
6002       OpCmode = 0;
6003       Imm = SplatBits;
6004       break;
6005     }
6006     if ((SplatBits & ~0xff00) == 0) {
6007       // Value = 0x0000nn00: Op=x, Cmode=001x.
6008       OpCmode = 0x2;
6009       Imm = SplatBits >> 8;
6010       break;
6011     }
6012     if ((SplatBits & ~0xff0000) == 0) {
6013       // Value = 0x00nn0000: Op=x, Cmode=010x.
6014       OpCmode = 0x4;
6015       Imm = SplatBits >> 16;
6016       break;
6017     }
6018     if ((SplatBits & ~0xff000000) == 0) {
6019       // Value = 0xnn000000: Op=x, Cmode=011x.
6020       OpCmode = 0x6;
6021       Imm = SplatBits >> 24;
6022       break;
6023     }
6024 
6025     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
6026     if (type == OtherModImm) return SDValue();
6027 
6028     if ((SplatBits & ~0xffff) == 0 &&
6029         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
6030       // Value = 0x0000nnff: Op=x, Cmode=1100.
6031       OpCmode = 0xc;
6032       Imm = SplatBits >> 8;
6033       break;
6034     }
6035 
6036     // cmode == 0b1101 is not supported for MVE VMVN
6037     if (type == MVEVMVNModImm)
6038       return SDValue();
6039 
6040     if ((SplatBits & ~0xffffff) == 0 &&
6041         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
6042       // Value = 0x00nnffff: Op=x, Cmode=1101.
6043       OpCmode = 0xd;
6044       Imm = SplatBits >> 16;
6045       break;
6046     }
6047 
6048     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
6049     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
6050     // VMOV.I32.  A (very) minor optimization would be to replicate the value
6051     // and fall through here to test for a valid 64-bit splat.  But, then the
6052     // caller would also need to check and handle the change in size.
6053     return SDValue();
6054 
6055   case 64: {
6056     if (type != VMOVModImm)
6057       return SDValue();
6058     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
6059     uint64_t BitMask = 0xff;
6060     uint64_t Val = 0;
6061     unsigned ImmMask = 1;
6062     Imm = 0;
6063     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
6064       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
6065         Val |= BitMask;
6066         Imm |= ImmMask;
6067       } else if ((SplatBits & BitMask) != 0) {
6068         return SDValue();
6069       }
6070       BitMask <<= 8;
6071       ImmMask <<= 1;
6072     }
6073 
6074     if (DAG.getDataLayout().isBigEndian())
6075       // swap higher and lower 32 bit word
6076       Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
6077 
6078     // Op=1, Cmode=1110.
6079     OpCmode = 0x1e;
6080     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
6081     break;
6082   }
6083 
6084   default:
6085     llvm_unreachable("unexpected size for isNEONModifiedImm");
6086   }
6087 
6088   unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
6089   return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
6090 }
6091 
6092 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
6093                                            const ARMSubtarget *ST) const {
6094   EVT VT = Op.getValueType();
6095   bool IsDouble = (VT == MVT::f64);
6096   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
6097   const APFloat &FPVal = CFP->getValueAPF();
6098 
6099   // Prevent floating-point constants from using literal loads
6100   // when execute-only is enabled.
6101   if (ST->genExecuteOnly()) {
6102     // If we can represent the constant as an immediate, don't lower it
6103     if (isFPImmLegal(FPVal, VT))
6104       return Op;
6105     // Otherwise, construct as integer, and move to float register
6106     APInt INTVal = FPVal.bitcastToAPInt();
6107     SDLoc DL(CFP);
6108     switch (VT.getSimpleVT().SimpleTy) {
6109       default:
6110         llvm_unreachable("Unknown floating point type!");
6111         break;
6112       case MVT::f64: {
6113         SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
6114         SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
6115         if (!ST->isLittle())
6116           std::swap(Lo, Hi);
6117         return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
6118       }
6119       case MVT::f32:
6120           return DAG.getNode(ARMISD::VMOVSR, DL, VT,
6121               DAG.getConstant(INTVal, DL, MVT::i32));
6122     }
6123   }
6124 
6125   if (!ST->hasVFP3Base())
6126     return SDValue();
6127 
6128   // Use the default (constant pool) lowering for double constants when we have
6129   // an SP-only FPU
6130   if (IsDouble && !Subtarget->hasFP64())
6131     return SDValue();
6132 
6133   // Try splatting with a VMOV.f32...
6134   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
6135 
6136   if (ImmVal != -1) {
6137     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
6138       // We have code in place to select a valid ConstantFP already, no need to
6139       // do any mangling.
6140       return Op;
6141     }
6142 
6143     // It's a float and we are trying to use NEON operations where
6144     // possible. Lower it to a splat followed by an extract.
6145     SDLoc DL(Op);
6146     SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
6147     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
6148                                       NewVal);
6149     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
6150                        DAG.getConstant(0, DL, MVT::i32));
6151   }
6152 
6153   // The rest of our options are NEON only, make sure that's allowed before
6154   // proceeding..
6155   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
6156     return SDValue();
6157 
6158   EVT VMovVT;
6159   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
6160 
6161   // It wouldn't really be worth bothering for doubles except for one very
6162   // important value, which does happen to match: 0.0. So make sure we don't do
6163   // anything stupid.
6164   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
6165     return SDValue();
6166 
6167   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
6168   SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
6169                                      VMovVT, false, VMOVModImm);
6170   if (NewVal != SDValue()) {
6171     SDLoc DL(Op);
6172     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
6173                                       NewVal);
6174     if (IsDouble)
6175       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
6176 
6177     // It's a float: cast and extract a vector element.
6178     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
6179                                        VecConstant);
6180     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
6181                        DAG.getConstant(0, DL, MVT::i32));
6182   }
6183 
6184   // Finally, try a VMVN.i32
6185   NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
6186                              false, VMVNModImm);
6187   if (NewVal != SDValue()) {
6188     SDLoc DL(Op);
6189     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
6190 
6191     if (IsDouble)
6192       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
6193 
6194     // It's a float: cast and extract a vector element.
6195     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
6196                                        VecConstant);
6197     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
6198                        DAG.getConstant(0, DL, MVT::i32));
6199   }
6200 
6201   return SDValue();
6202 }
6203 
6204 // check if an VEXT instruction can handle the shuffle mask when the
6205 // vector sources of the shuffle are the same.
6206 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
6207   unsigned NumElts = VT.getVectorNumElements();
6208 
6209   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
6210   if (M[0] < 0)
6211     return false;
6212 
6213   Imm = M[0];
6214 
6215   // If this is a VEXT shuffle, the immediate value is the index of the first
6216   // element.  The other shuffle indices must be the successive elements after
6217   // the first one.
6218   unsigned ExpectedElt = Imm;
6219   for (unsigned i = 1; i < NumElts; ++i) {
6220     // Increment the expected index.  If it wraps around, just follow it
6221     // back to index zero and keep going.
6222     ++ExpectedElt;
6223     if (ExpectedElt == NumElts)
6224       ExpectedElt = 0;
6225 
6226     if (M[i] < 0) continue; // ignore UNDEF indices
6227     if (ExpectedElt != static_cast<unsigned>(M[i]))
6228       return false;
6229   }
6230 
6231   return true;
6232 }
6233 
6234 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
6235                        bool &ReverseVEXT, unsigned &Imm) {
6236   unsigned NumElts = VT.getVectorNumElements();
6237   ReverseVEXT = false;
6238 
6239   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
6240   if (M[0] < 0)
6241     return false;
6242 
6243   Imm = M[0];
6244 
6245   // If this is a VEXT shuffle, the immediate value is the index of the first
6246   // element.  The other shuffle indices must be the successive elements after
6247   // the first one.
6248   unsigned ExpectedElt = Imm;
6249   for (unsigned i = 1; i < NumElts; ++i) {
6250     // Increment the expected index.  If it wraps around, it may still be
6251     // a VEXT but the source vectors must be swapped.
6252     ExpectedElt += 1;
6253     if (ExpectedElt == NumElts * 2) {
6254       ExpectedElt = 0;
6255       ReverseVEXT = true;
6256     }
6257 
6258     if (M[i] < 0) continue; // ignore UNDEF indices
6259     if (ExpectedElt != static_cast<unsigned>(M[i]))
6260       return false;
6261   }
6262 
6263   // Adjust the index value if the source operands will be swapped.
6264   if (ReverseVEXT)
6265     Imm -= NumElts;
6266 
6267   return true;
6268 }
6269 
6270 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
6271 /// instruction with the specified blocksize.  (The order of the elements
6272 /// within each block of the vector is reversed.)
6273 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
6274   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
6275          "Only possible block sizes for VREV are: 16, 32, 64");
6276 
6277   unsigned EltSz = VT.getScalarSizeInBits();
6278   if (EltSz == 64)
6279     return false;
6280 
6281   unsigned NumElts = VT.getVectorNumElements();
6282   unsigned BlockElts = M[0] + 1;
6283   // If the first shuffle index is UNDEF, be optimistic.
6284   if (M[0] < 0)
6285     BlockElts = BlockSize / EltSz;
6286 
6287   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
6288     return false;
6289 
6290   for (unsigned i = 0; i < NumElts; ++i) {
6291     if (M[i] < 0) continue; // ignore UNDEF indices
6292     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
6293       return false;
6294   }
6295 
6296   return true;
6297 }
6298 
6299 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
6300   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
6301   // range, then 0 is placed into the resulting vector. So pretty much any mask
6302   // of 8 elements can work here.
6303   return VT == MVT::v8i8 && M.size() == 8;
6304 }
6305 
6306 static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask,
6307                                unsigned Index) {
6308   if (Mask.size() == Elements * 2)
6309     return Index / Elements;
6310   return Mask[Index] == 0 ? 0 : 1;
6311 }
6312 
6313 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
6314 // checking that pairs of elements in the shuffle mask represent the same index
6315 // in each vector, incrementing the expected index by 2 at each step.
6316 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
6317 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
6318 //  v2={e,f,g,h}
6319 // WhichResult gives the offset for each element in the mask based on which
6320 // of the two results it belongs to.
6321 //
6322 // The transpose can be represented either as:
6323 // result1 = shufflevector v1, v2, result1_shuffle_mask
6324 // result2 = shufflevector v1, v2, result2_shuffle_mask
6325 // where v1/v2 and the shuffle masks have the same number of elements
6326 // (here WhichResult (see below) indicates which result is being checked)
6327 //
6328 // or as:
6329 // results = shufflevector v1, v2, shuffle_mask
6330 // where both results are returned in one vector and the shuffle mask has twice
6331 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
6332 // want to check the low half and high half of the shuffle mask as if it were
6333 // the other case
6334 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
6335   unsigned EltSz = VT.getScalarSizeInBits();
6336   if (EltSz == 64)
6337     return false;
6338 
6339   unsigned NumElts = VT.getVectorNumElements();
6340   if (M.size() != NumElts && M.size() != NumElts*2)
6341     return false;
6342 
6343   // If the mask is twice as long as the input vector then we need to check the
6344   // upper and lower parts of the mask with a matching value for WhichResult
6345   // FIXME: A mask with only even values will be rejected in case the first
6346   // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
6347   // M[0] is used to determine WhichResult
6348   for (unsigned i = 0; i < M.size(); i += NumElts) {
6349     WhichResult = SelectPairHalf(NumElts, M, i);
6350     for (unsigned j = 0; j < NumElts; j += 2) {
6351       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
6352           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
6353         return false;
6354     }
6355   }
6356 
6357   if (M.size() == NumElts*2)
6358     WhichResult = 0;
6359 
6360   return true;
6361 }
6362 
6363 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
6364 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6365 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
6366 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
6367   unsigned EltSz = VT.getScalarSizeInBits();
6368   if (EltSz == 64)
6369     return false;
6370 
6371   unsigned NumElts = VT.getVectorNumElements();
6372   if (M.size() != NumElts && M.size() != NumElts*2)
6373     return false;
6374 
6375   for (unsigned i = 0; i < M.size(); i += NumElts) {
6376     WhichResult = SelectPairHalf(NumElts, M, i);
6377     for (unsigned j = 0; j < NumElts; j += 2) {
6378       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
6379           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
6380         return false;
6381     }
6382   }
6383 
6384   if (M.size() == NumElts*2)
6385     WhichResult = 0;
6386 
6387   return true;
6388 }
6389 
6390 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
6391 // that the mask elements are either all even and in steps of size 2 or all odd
6392 // and in steps of size 2.
6393 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
6394 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
6395 //  v2={e,f,g,h}
6396 // Requires similar checks to that of isVTRNMask with
6397 // respect the how results are returned.
6398 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
6399   unsigned EltSz = VT.getScalarSizeInBits();
6400   if (EltSz == 64)
6401     return false;
6402 
6403   unsigned NumElts = VT.getVectorNumElements();
6404   if (M.size() != NumElts && M.size() != NumElts*2)
6405     return false;
6406 
6407   for (unsigned i = 0; i < M.size(); i += NumElts) {
6408     WhichResult = SelectPairHalf(NumElts, M, i);
6409     for (unsigned j = 0; j < NumElts; ++j) {
6410       if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
6411         return false;
6412     }
6413   }
6414 
6415   if (M.size() == NumElts*2)
6416     WhichResult = 0;
6417 
6418   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6419   if (VT.is64BitVector() && EltSz == 32)
6420     return false;
6421 
6422   return true;
6423 }
6424 
6425 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
6426 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6427 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
6428 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
6429   unsigned EltSz = VT.getScalarSizeInBits();
6430   if (EltSz == 64)
6431     return false;
6432 
6433   unsigned NumElts = VT.getVectorNumElements();
6434   if (M.size() != NumElts && M.size() != NumElts*2)
6435     return false;
6436 
6437   unsigned Half = NumElts / 2;
6438   for (unsigned i = 0; i < M.size(); i += NumElts) {
6439     WhichResult = SelectPairHalf(NumElts, M, i);
6440     for (unsigned j = 0; j < NumElts; j += Half) {
6441       unsigned Idx = WhichResult;
6442       for (unsigned k = 0; k < Half; ++k) {
6443         int MIdx = M[i + j + k];
6444         if (MIdx >= 0 && (unsigned) MIdx != Idx)
6445           return false;
6446         Idx += 2;
6447       }
6448     }
6449   }
6450 
6451   if (M.size() == NumElts*2)
6452     WhichResult = 0;
6453 
6454   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6455   if (VT.is64BitVector() && EltSz == 32)
6456     return false;
6457 
6458   return true;
6459 }
6460 
6461 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
6462 // that pairs of elements of the shufflemask represent the same index in each
6463 // vector incrementing sequentially through the vectors.
6464 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
6465 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
6466 //  v2={e,f,g,h}
6467 // Requires similar checks to that of isVTRNMask with respect the how results
6468 // are returned.
6469 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
6470   unsigned EltSz = VT.getScalarSizeInBits();
6471   if (EltSz == 64)
6472     return false;
6473 
6474   unsigned NumElts = VT.getVectorNumElements();
6475   if (M.size() != NumElts && M.size() != NumElts*2)
6476     return false;
6477 
6478   for (unsigned i = 0; i < M.size(); i += NumElts) {
6479     WhichResult = SelectPairHalf(NumElts, M, i);
6480     unsigned Idx = WhichResult * NumElts / 2;
6481     for (unsigned j = 0; j < NumElts; j += 2) {
6482       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
6483           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
6484         return false;
6485       Idx += 1;
6486     }
6487   }
6488 
6489   if (M.size() == NumElts*2)
6490     WhichResult = 0;
6491 
6492   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6493   if (VT.is64BitVector() && EltSz == 32)
6494     return false;
6495 
6496   return true;
6497 }
6498 
6499 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
6500 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6501 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
6502 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
6503   unsigned EltSz = VT.getScalarSizeInBits();
6504   if (EltSz == 64)
6505     return false;
6506 
6507   unsigned NumElts = VT.getVectorNumElements();
6508   if (M.size() != NumElts && M.size() != NumElts*2)
6509     return false;
6510 
6511   for (unsigned i = 0; i < M.size(); i += NumElts) {
6512     WhichResult = SelectPairHalf(NumElts, M, i);
6513     unsigned Idx = WhichResult * NumElts / 2;
6514     for (unsigned j = 0; j < NumElts; j += 2) {
6515       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
6516           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
6517         return false;
6518       Idx += 1;
6519     }
6520   }
6521 
6522   if (M.size() == NumElts*2)
6523     WhichResult = 0;
6524 
6525   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6526   if (VT.is64BitVector() && EltSz == 32)
6527     return false;
6528 
6529   return true;
6530 }
6531 
6532 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
6533 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
6534 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
6535                                            unsigned &WhichResult,
6536                                            bool &isV_UNDEF) {
6537   isV_UNDEF = false;
6538   if (isVTRNMask(ShuffleMask, VT, WhichResult))
6539     return ARMISD::VTRN;
6540   if (isVUZPMask(ShuffleMask, VT, WhichResult))
6541     return ARMISD::VUZP;
6542   if (isVZIPMask(ShuffleMask, VT, WhichResult))
6543     return ARMISD::VZIP;
6544 
6545   isV_UNDEF = true;
6546   if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
6547     return ARMISD::VTRN;
6548   if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6549     return ARMISD::VUZP;
6550   if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6551     return ARMISD::VZIP;
6552 
6553   return 0;
6554 }
6555 
6556 /// \return true if this is a reverse operation on an vector.
6557 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
6558   unsigned NumElts = VT.getVectorNumElements();
6559   // Make sure the mask has the right size.
6560   if (NumElts != M.size())
6561       return false;
6562 
6563   // Look for <15, ..., 3, -1, 1, 0>.
6564   for (unsigned i = 0; i != NumElts; ++i)
6565     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
6566       return false;
6567 
6568   return true;
6569 }
6570 
6571 // If N is an integer constant that can be moved into a register in one
6572 // instruction, return an SDValue of such a constant (will become a MOV
6573 // instruction).  Otherwise return null.
6574 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
6575                                      const ARMSubtarget *ST, const SDLoc &dl) {
6576   uint64_t Val;
6577   if (!isa<ConstantSDNode>(N))
6578     return SDValue();
6579   Val = cast<ConstantSDNode>(N)->getZExtValue();
6580 
6581   if (ST->isThumb1Only()) {
6582     if (Val <= 255 || ~Val <= 255)
6583       return DAG.getConstant(Val, dl, MVT::i32);
6584   } else {
6585     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
6586       return DAG.getConstant(Val, dl, MVT::i32);
6587   }
6588   return SDValue();
6589 }
6590 
6591 // If this is a case we can't handle, return null and let the default
6592 // expansion code take care of it.
6593 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
6594                                              const ARMSubtarget *ST) const {
6595   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
6596   SDLoc dl(Op);
6597   EVT VT = Op.getValueType();
6598 
6599   APInt SplatBits, SplatUndef;
6600   unsigned SplatBitSize;
6601   bool HasAnyUndefs;
6602   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6603     if (SplatUndef.isAllOnesValue())
6604       return DAG.getUNDEF(VT);
6605 
6606     if ((ST->hasNEON() && SplatBitSize <= 64) ||
6607         (ST->hasMVEIntegerOps() && SplatBitSize <= 32)) {
6608       // Check if an immediate VMOV works.
6609       EVT VmovVT;
6610       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
6611                                       SplatUndef.getZExtValue(), SplatBitSize,
6612                                       DAG, dl, VmovVT, VT.is128BitVector(),
6613                                       VMOVModImm);
6614 
6615       if (Val.getNode()) {
6616         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
6617         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6618       }
6619 
6620       // Try an immediate VMVN.
6621       uint64_t NegatedImm = (~SplatBits).getZExtValue();
6622       Val = isNEONModifiedImm(
6623           NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
6624           DAG, dl, VmovVT, VT.is128BitVector(),
6625           ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm);
6626       if (Val.getNode()) {
6627         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
6628         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6629       }
6630 
6631       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
6632       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
6633         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
6634         if (ImmVal != -1) {
6635           SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
6636           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
6637         }
6638       }
6639     }
6640   }
6641 
6642   // Scan through the operands to see if only one value is used.
6643   //
6644   // As an optimisation, even if more than one value is used it may be more
6645   // profitable to splat with one value then change some lanes.
6646   //
6647   // Heuristically we decide to do this if the vector has a "dominant" value,
6648   // defined as splatted to more than half of the lanes.
6649   unsigned NumElts = VT.getVectorNumElements();
6650   bool isOnlyLowElement = true;
6651   bool usesOnlyOneValue = true;
6652   bool hasDominantValue = false;
6653   bool isConstant = true;
6654 
6655   // Map of the number of times a particular SDValue appears in the
6656   // element list.
6657   DenseMap<SDValue, unsigned> ValueCounts;
6658   SDValue Value;
6659   for (unsigned i = 0; i < NumElts; ++i) {
6660     SDValue V = Op.getOperand(i);
6661     if (V.isUndef())
6662       continue;
6663     if (i > 0)
6664       isOnlyLowElement = false;
6665     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
6666       isConstant = false;
6667 
6668     ValueCounts.insert(std::make_pair(V, 0));
6669     unsigned &Count = ValueCounts[V];
6670 
6671     // Is this value dominant? (takes up more than half of the lanes)
6672     if (++Count > (NumElts / 2)) {
6673       hasDominantValue = true;
6674       Value = V;
6675     }
6676   }
6677   if (ValueCounts.size() != 1)
6678     usesOnlyOneValue = false;
6679   if (!Value.getNode() && !ValueCounts.empty())
6680     Value = ValueCounts.begin()->first;
6681 
6682   if (ValueCounts.empty())
6683     return DAG.getUNDEF(VT);
6684 
6685   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
6686   // Keep going if we are hitting this case.
6687   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
6688     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
6689 
6690   unsigned EltSize = VT.getScalarSizeInBits();
6691 
6692   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
6693   // i32 and try again.
6694   if (hasDominantValue && EltSize <= 32) {
6695     if (!isConstant) {
6696       SDValue N;
6697 
6698       // If we are VDUPing a value that comes directly from a vector, that will
6699       // cause an unnecessary move to and from a GPR, where instead we could
6700       // just use VDUPLANE. We can only do this if the lane being extracted
6701       // is at a constant index, as the VDUP from lane instructions only have
6702       // constant-index forms.
6703       ConstantSDNode *constIndex;
6704       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6705           (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
6706         // We need to create a new undef vector to use for the VDUPLANE if the
6707         // size of the vector from which we get the value is different than the
6708         // size of the vector that we need to create. We will insert the element
6709         // such that the register coalescer will remove unnecessary copies.
6710         if (VT != Value->getOperand(0).getValueType()) {
6711           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
6712                              VT.getVectorNumElements();
6713           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6714                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
6715                         Value, DAG.getConstant(index, dl, MVT::i32)),
6716                            DAG.getConstant(index, dl, MVT::i32));
6717         } else
6718           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6719                         Value->getOperand(0), Value->getOperand(1));
6720       } else
6721         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
6722 
6723       if (!usesOnlyOneValue) {
6724         // The dominant value was splatted as 'N', but we now have to insert
6725         // all differing elements.
6726         for (unsigned I = 0; I < NumElts; ++I) {
6727           if (Op.getOperand(I) == Value)
6728             continue;
6729           SmallVector<SDValue, 3> Ops;
6730           Ops.push_back(N);
6731           Ops.push_back(Op.getOperand(I));
6732           Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
6733           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
6734         }
6735       }
6736       return N;
6737     }
6738     if (VT.getVectorElementType().isFloatingPoint()) {
6739       SmallVector<SDValue, 8> Ops;
6740       MVT FVT = VT.getVectorElementType().getSimpleVT();
6741       assert(FVT == MVT::f32 || FVT == MVT::f16);
6742       MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16;
6743       for (unsigned i = 0; i < NumElts; ++i)
6744         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT,
6745                                   Op.getOperand(i)));
6746       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts);
6747       SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
6748       Val = LowerBUILD_VECTOR(Val, DAG, ST);
6749       if (Val.getNode())
6750         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6751     }
6752     if (usesOnlyOneValue) {
6753       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
6754       if (isConstant && Val.getNode())
6755         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
6756     }
6757   }
6758 
6759   // If all elements are constants and the case above didn't get hit, fall back
6760   // to the default expansion, which will generate a load from the constant
6761   // pool.
6762   if (isConstant)
6763     return SDValue();
6764 
6765   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
6766   if (NumElts >= 4) {
6767     SDValue shuffle = ReconstructShuffle(Op, DAG);
6768     if (shuffle != SDValue())
6769       return shuffle;
6770   }
6771 
6772   if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
6773     // If we haven't found an efficient lowering, try splitting a 128-bit vector
6774     // into two 64-bit vectors; we might discover a better way to lower it.
6775     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
6776     EVT ExtVT = VT.getVectorElementType();
6777     EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
6778     SDValue Lower =
6779         DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2));
6780     if (Lower.getOpcode() == ISD::BUILD_VECTOR)
6781       Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
6782     SDValue Upper = DAG.getBuildVector(
6783         HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
6784     if (Upper.getOpcode() == ISD::BUILD_VECTOR)
6785       Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
6786     if (Lower && Upper)
6787       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
6788   }
6789 
6790   // Vectors with 32- or 64-bit elements can be built by directly assigning
6791   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
6792   // will be legalized.
6793   if (EltSize >= 32) {
6794     // Do the expansion with floating-point types, since that is what the VFP
6795     // registers are defined to use, and since i64 is not legal.
6796     EVT EltVT = EVT::getFloatingPointVT(EltSize);
6797     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
6798     SmallVector<SDValue, 8> Ops;
6799     for (unsigned i = 0; i < NumElts; ++i)
6800       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
6801     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
6802     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6803   }
6804 
6805   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
6806   // know the default expansion would otherwise fall back on something even
6807   // worse. For a vector with one or two non-undef values, that's
6808   // scalar_to_vector for the elements followed by a shuffle (provided the
6809   // shuffle is valid for the target) and materialization element by element
6810   // on the stack followed by a load for everything else.
6811   if (!isConstant && !usesOnlyOneValue) {
6812     SDValue Vec = DAG.getUNDEF(VT);
6813     for (unsigned i = 0 ; i < NumElts; ++i) {
6814       SDValue V = Op.getOperand(i);
6815       if (V.isUndef())
6816         continue;
6817       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
6818       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
6819     }
6820     return Vec;
6821   }
6822 
6823   return SDValue();
6824 }
6825 
6826 // Gather data to see if the operation can be modelled as a
6827 // shuffle in combination with VEXTs.
6828 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
6829                                               SelectionDAG &DAG) const {
6830   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
6831   SDLoc dl(Op);
6832   EVT VT = Op.getValueType();
6833   unsigned NumElts = VT.getVectorNumElements();
6834 
6835   struct ShuffleSourceInfo {
6836     SDValue Vec;
6837     unsigned MinElt = std::numeric_limits<unsigned>::max();
6838     unsigned MaxElt = 0;
6839 
6840     // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
6841     // be compatible with the shuffle we intend to construct. As a result
6842     // ShuffleVec will be some sliding window into the original Vec.
6843     SDValue ShuffleVec;
6844 
6845     // Code should guarantee that element i in Vec starts at element "WindowBase
6846     // + i * WindowScale in ShuffleVec".
6847     int WindowBase = 0;
6848     int WindowScale = 1;
6849 
6850     ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
6851 
6852     bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
6853   };
6854 
6855   // First gather all vectors used as an immediate source for this BUILD_VECTOR
6856   // node.
6857   SmallVector<ShuffleSourceInfo, 2> Sources;
6858   for (unsigned i = 0; i < NumElts; ++i) {
6859     SDValue V = Op.getOperand(i);
6860     if (V.isUndef())
6861       continue;
6862     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
6863       // A shuffle can only come from building a vector from various
6864       // elements of other vectors.
6865       return SDValue();
6866     } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
6867       // Furthermore, shuffles require a constant mask, whereas extractelts
6868       // accept variable indices.
6869       return SDValue();
6870     }
6871 
6872     // Add this element source to the list if it's not already there.
6873     SDValue SourceVec = V.getOperand(0);
6874     auto Source = llvm::find(Sources, SourceVec);
6875     if (Source == Sources.end())
6876       Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
6877 
6878     // Update the minimum and maximum lane number seen.
6879     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
6880     Source->MinElt = std::min(Source->MinElt, EltNo);
6881     Source->MaxElt = std::max(Source->MaxElt, EltNo);
6882   }
6883 
6884   // Currently only do something sane when at most two source vectors
6885   // are involved.
6886   if (Sources.size() > 2)
6887     return SDValue();
6888 
6889   // Find out the smallest element size among result and two sources, and use
6890   // it as element size to build the shuffle_vector.
6891   EVT SmallestEltTy = VT.getVectorElementType();
6892   for (auto &Source : Sources) {
6893     EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
6894     if (SrcEltTy.bitsLT(SmallestEltTy))
6895       SmallestEltTy = SrcEltTy;
6896   }
6897   unsigned ResMultiplier =
6898       VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
6899   NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
6900   EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
6901 
6902   // If the source vector is too wide or too narrow, we may nevertheless be able
6903   // to construct a compatible shuffle either by concatenating it with UNDEF or
6904   // extracting a suitable range of elements.
6905   for (auto &Src : Sources) {
6906     EVT SrcVT = Src.ShuffleVec.getValueType();
6907 
6908     if (SrcVT.getSizeInBits() == VT.getSizeInBits())
6909       continue;
6910 
6911     // This stage of the search produces a source with the same element type as
6912     // the original, but with a total width matching the BUILD_VECTOR output.
6913     EVT EltVT = SrcVT.getVectorElementType();
6914     unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits();
6915     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
6916 
6917     if (SrcVT.getSizeInBits() < VT.getSizeInBits()) {
6918       if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits())
6919         return SDValue();
6920       // We can pad out the smaller vector for free, so if it's part of a
6921       // shuffle...
6922       Src.ShuffleVec =
6923           DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
6924                       DAG.getUNDEF(Src.ShuffleVec.getValueType()));
6925       continue;
6926     }
6927 
6928     if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits())
6929       return SDValue();
6930 
6931     if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
6932       // Span too large for a VEXT to cope
6933       return SDValue();
6934     }
6935 
6936     if (Src.MinElt >= NumSrcElts) {
6937       // The extraction can just take the second half
6938       Src.ShuffleVec =
6939           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6940                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
6941       Src.WindowBase = -NumSrcElts;
6942     } else if (Src.MaxElt < NumSrcElts) {
6943       // The extraction can just take the first half
6944       Src.ShuffleVec =
6945           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6946                       DAG.getConstant(0, dl, MVT::i32));
6947     } else {
6948       // An actual VEXT is needed
6949       SDValue VEXTSrc1 =
6950           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6951                       DAG.getConstant(0, dl, MVT::i32));
6952       SDValue VEXTSrc2 =
6953           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6954                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
6955 
6956       Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
6957                                    VEXTSrc2,
6958                                    DAG.getConstant(Src.MinElt, dl, MVT::i32));
6959       Src.WindowBase = -Src.MinElt;
6960     }
6961   }
6962 
6963   // Another possible incompatibility occurs from the vector element types. We
6964   // can fix this by bitcasting the source vectors to the same type we intend
6965   // for the shuffle.
6966   for (auto &Src : Sources) {
6967     EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
6968     if (SrcEltTy == SmallestEltTy)
6969       continue;
6970     assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
6971     Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
6972     Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
6973     Src.WindowBase *= Src.WindowScale;
6974   }
6975 
6976   // Final sanity check before we try to actually produce a shuffle.
6977   LLVM_DEBUG(for (auto Src
6978                   : Sources)
6979                  assert(Src.ShuffleVec.getValueType() == ShuffleVT););
6980 
6981   // The stars all align, our next step is to produce the mask for the shuffle.
6982   SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
6983   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
6984   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
6985     SDValue Entry = Op.getOperand(i);
6986     if (Entry.isUndef())
6987       continue;
6988 
6989     auto Src = llvm::find(Sources, Entry.getOperand(0));
6990     int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
6991 
6992     // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
6993     // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
6994     // segment.
6995     EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
6996     int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
6997                                VT.getScalarSizeInBits());
6998     int LanesDefined = BitsDefined / BitsPerShuffleLane;
6999 
7000     // This source is expected to fill ResMultiplier lanes of the final shuffle,
7001     // starting at the appropriate offset.
7002     int *LaneMask = &Mask[i * ResMultiplier];
7003 
7004     int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
7005     ExtractBase += NumElts * (Src - Sources.begin());
7006     for (int j = 0; j < LanesDefined; ++j)
7007       LaneMask[j] = ExtractBase + j;
7008   }
7009 
7010   // Final check before we try to produce nonsense...
7011   if (!isShuffleMaskLegal(Mask, ShuffleVT))
7012     return SDValue();
7013 
7014   // We can't handle more than two sources. This should have already
7015   // been checked before this point.
7016   assert(Sources.size() <= 2 && "Too many sources!");
7017 
7018   SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
7019   for (unsigned i = 0; i < Sources.size(); ++i)
7020     ShuffleOps[i] = Sources[i].ShuffleVec;
7021 
7022   SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
7023                                          ShuffleOps[1], Mask);
7024   return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
7025 }
7026 
7027 enum ShuffleOpCodes {
7028   OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
7029   OP_VREV,
7030   OP_VDUP0,
7031   OP_VDUP1,
7032   OP_VDUP2,
7033   OP_VDUP3,
7034   OP_VEXT1,
7035   OP_VEXT2,
7036   OP_VEXT3,
7037   OP_VUZPL, // VUZP, left result
7038   OP_VUZPR, // VUZP, right result
7039   OP_VZIPL, // VZIP, left result
7040   OP_VZIPR, // VZIP, right result
7041   OP_VTRNL, // VTRN, left result
7042   OP_VTRNR  // VTRN, right result
7043 };
7044 
7045 static bool isLegalMVEShuffleOp(unsigned PFEntry) {
7046   unsigned OpNum = (PFEntry >> 26) & 0x0F;
7047   switch (OpNum) {
7048   case OP_COPY:
7049   case OP_VREV:
7050   case OP_VDUP0:
7051   case OP_VDUP1:
7052   case OP_VDUP2:
7053   case OP_VDUP3:
7054     return true;
7055   }
7056   return false;
7057 }
7058 
7059 /// isShuffleMaskLegal - Targets can use this to indicate that they only
7060 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
7061 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
7062 /// are assumed to be legal.
7063 bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
7064   if (VT.getVectorNumElements() == 4 &&
7065       (VT.is128BitVector() || VT.is64BitVector())) {
7066     unsigned PFIndexes[4];
7067     for (unsigned i = 0; i != 4; ++i) {
7068       if (M[i] < 0)
7069         PFIndexes[i] = 8;
7070       else
7071         PFIndexes[i] = M[i];
7072     }
7073 
7074     // Compute the index in the perfect shuffle table.
7075     unsigned PFTableIndex =
7076       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7077     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
7078     unsigned Cost = (PFEntry >> 30);
7079 
7080     if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry)))
7081       return true;
7082   }
7083 
7084   bool ReverseVEXT, isV_UNDEF;
7085   unsigned Imm, WhichResult;
7086 
7087   unsigned EltSize = VT.getScalarSizeInBits();
7088   if (EltSize >= 32 ||
7089       ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
7090       isVREVMask(M, VT, 64) ||
7091       isVREVMask(M, VT, 32) ||
7092       isVREVMask(M, VT, 16))
7093     return true;
7094   else if (Subtarget->hasNEON() &&
7095            (isVEXTMask(M, VT, ReverseVEXT, Imm) ||
7096             isVTBLMask(M, VT) ||
7097             isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF)))
7098     return true;
7099   else if (Subtarget->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) &&
7100            isReverseMask(M, VT))
7101     return true;
7102   else
7103     return false;
7104 }
7105 
7106 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
7107 /// the specified operations to build the shuffle.
7108 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
7109                                       SDValue RHS, SelectionDAG &DAG,
7110                                       const SDLoc &dl) {
7111   unsigned OpNum = (PFEntry >> 26) & 0x0F;
7112   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7113   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
7114 
7115   if (OpNum == OP_COPY) {
7116     if (LHSID == (1*9+2)*9+3) return LHS;
7117     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
7118     return RHS;
7119   }
7120 
7121   SDValue OpLHS, OpRHS;
7122   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
7123   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
7124   EVT VT = OpLHS.getValueType();
7125 
7126   switch (OpNum) {
7127   default: llvm_unreachable("Unknown shuffle opcode!");
7128   case OP_VREV:
7129     // VREV divides the vector in half and swaps within the half.
7130     if (VT.getVectorElementType() == MVT::i32 ||
7131         VT.getVectorElementType() == MVT::f32)
7132       return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
7133     // vrev <4 x i16> -> VREV32
7134     if (VT.getVectorElementType() == MVT::i16)
7135       return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
7136     // vrev <4 x i8> -> VREV16
7137     assert(VT.getVectorElementType() == MVT::i8);
7138     return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
7139   case OP_VDUP0:
7140   case OP_VDUP1:
7141   case OP_VDUP2:
7142   case OP_VDUP3:
7143     return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
7144                        OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
7145   case OP_VEXT1:
7146   case OP_VEXT2:
7147   case OP_VEXT3:
7148     return DAG.getNode(ARMISD::VEXT, dl, VT,
7149                        OpLHS, OpRHS,
7150                        DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
7151   case OP_VUZPL:
7152   case OP_VUZPR:
7153     return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
7154                        OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
7155   case OP_VZIPL:
7156   case OP_VZIPR:
7157     return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
7158                        OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
7159   case OP_VTRNL:
7160   case OP_VTRNR:
7161     return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
7162                        OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
7163   }
7164 }
7165 
7166 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
7167                                        ArrayRef<int> ShuffleMask,
7168                                        SelectionDAG &DAG) {
7169   // Check to see if we can use the VTBL instruction.
7170   SDValue V1 = Op.getOperand(0);
7171   SDValue V2 = Op.getOperand(1);
7172   SDLoc DL(Op);
7173 
7174   SmallVector<SDValue, 8> VTBLMask;
7175   for (ArrayRef<int>::iterator
7176          I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
7177     VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
7178 
7179   if (V2.getNode()->isUndef())
7180     return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
7181                        DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
7182 
7183   return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
7184                      DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
7185 }
7186 
7187 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
7188                                                       SelectionDAG &DAG) {
7189   SDLoc DL(Op);
7190   SDValue OpLHS = Op.getOperand(0);
7191   EVT VT = OpLHS.getValueType();
7192 
7193   assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&
7194          "Expect an v8i16/v16i8 type");
7195   OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS);
7196   // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
7197   // extract the first 8 bytes into the top double word and the last 8 bytes
7198   // into the bottom double word. The v8i16 case is similar.
7199   unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
7200   return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
7201                      DAG.getConstant(ExtractNum, DL, MVT::i32));
7202 }
7203 
7204 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
7205                                    const ARMSubtarget *ST) {
7206   SDValue V1 = Op.getOperand(0);
7207   SDValue V2 = Op.getOperand(1);
7208   SDLoc dl(Op);
7209   EVT VT = Op.getValueType();
7210   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
7211 
7212   // Convert shuffles that are directly supported on NEON to target-specific
7213   // DAG nodes, instead of keeping them as shuffles and matching them again
7214   // during code selection.  This is more efficient and avoids the possibility
7215   // of inconsistencies between legalization and selection.
7216   // FIXME: floating-point vectors should be canonicalized to integer vectors
7217   // of the same time so that they get CSEd properly.
7218   ArrayRef<int> ShuffleMask = SVN->getMask();
7219 
7220   unsigned EltSize = VT.getScalarSizeInBits();
7221   if (EltSize <= 32) {
7222     if (SVN->isSplat()) {
7223       int Lane = SVN->getSplatIndex();
7224       // If this is undef splat, generate it via "just" vdup, if possible.
7225       if (Lane == -1) Lane = 0;
7226 
7227       // Test if V1 is a SCALAR_TO_VECTOR.
7228       if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
7229         return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
7230       }
7231       // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
7232       // (and probably will turn into a SCALAR_TO_VECTOR once legalization
7233       // reaches it).
7234       if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
7235           !isa<ConstantSDNode>(V1.getOperand(0))) {
7236         bool IsScalarToVector = true;
7237         for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
7238           if (!V1.getOperand(i).isUndef()) {
7239             IsScalarToVector = false;
7240             break;
7241           }
7242         if (IsScalarToVector)
7243           return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
7244       }
7245       return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
7246                          DAG.getConstant(Lane, dl, MVT::i32));
7247     }
7248 
7249     bool ReverseVEXT = false;
7250     unsigned Imm = 0;
7251     if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
7252       if (ReverseVEXT)
7253         std::swap(V1, V2);
7254       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
7255                          DAG.getConstant(Imm, dl, MVT::i32));
7256     }
7257 
7258     if (isVREVMask(ShuffleMask, VT, 64))
7259       return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
7260     if (isVREVMask(ShuffleMask, VT, 32))
7261       return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
7262     if (isVREVMask(ShuffleMask, VT, 16))
7263       return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
7264 
7265     if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
7266       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
7267                          DAG.getConstant(Imm, dl, MVT::i32));
7268     }
7269 
7270     // Check for Neon shuffles that modify both input vectors in place.
7271     // If both results are used, i.e., if there are two shuffles with the same
7272     // source operands and with masks corresponding to both results of one of
7273     // these operations, DAG memoization will ensure that a single node is
7274     // used for both shuffles.
7275     unsigned WhichResult = 0;
7276     bool isV_UNDEF = false;
7277     if (ST->hasNEON()) {
7278       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
7279               ShuffleMask, VT, WhichResult, isV_UNDEF)) {
7280         if (isV_UNDEF)
7281           V2 = V1;
7282         return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
7283             .getValue(WhichResult);
7284       }
7285     }
7286 
7287     // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
7288     // shuffles that produce a result larger than their operands with:
7289     //   shuffle(concat(v1, undef), concat(v2, undef))
7290     // ->
7291     //   shuffle(concat(v1, v2), undef)
7292     // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
7293     //
7294     // This is useful in the general case, but there are special cases where
7295     // native shuffles produce larger results: the two-result ops.
7296     //
7297     // Look through the concat when lowering them:
7298     //   shuffle(concat(v1, v2), undef)
7299     // ->
7300     //   concat(VZIP(v1, v2):0, :1)
7301     //
7302     if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
7303       SDValue SubV1 = V1->getOperand(0);
7304       SDValue SubV2 = V1->getOperand(1);
7305       EVT SubVT = SubV1.getValueType();
7306 
7307       // We expect these to have been canonicalized to -1.
7308       assert(llvm::all_of(ShuffleMask, [&](int i) {
7309         return i < (int)VT.getVectorNumElements();
7310       }) && "Unexpected shuffle index into UNDEF operand!");
7311 
7312       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
7313               ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
7314         if (isV_UNDEF)
7315           SubV2 = SubV1;
7316         assert((WhichResult == 0) &&
7317                "In-place shuffle of concat can only have one result!");
7318         SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
7319                                   SubV1, SubV2);
7320         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
7321                            Res.getValue(1));
7322       }
7323     }
7324   }
7325 
7326   // If the shuffle is not directly supported and it has 4 elements, use
7327   // the PerfectShuffle-generated table to synthesize it from other shuffles.
7328   unsigned NumElts = VT.getVectorNumElements();
7329   if (NumElts == 4) {
7330     unsigned PFIndexes[4];
7331     for (unsigned i = 0; i != 4; ++i) {
7332       if (ShuffleMask[i] < 0)
7333         PFIndexes[i] = 8;
7334       else
7335         PFIndexes[i] = ShuffleMask[i];
7336     }
7337 
7338     // Compute the index in the perfect shuffle table.
7339     unsigned PFTableIndex =
7340       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7341     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
7342     unsigned Cost = (PFEntry >> 30);
7343 
7344     if (Cost <= 4) {
7345       if (ST->hasNEON())
7346         return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
7347       else if (isLegalMVEShuffleOp(PFEntry)) {
7348         unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7349         unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
7350         unsigned PFEntryLHS = PerfectShuffleTable[LHSID];
7351         unsigned PFEntryRHS = PerfectShuffleTable[RHSID];
7352         if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS))
7353           return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
7354       }
7355     }
7356   }
7357 
7358   // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
7359   if (EltSize >= 32) {
7360     // Do the expansion with floating-point types, since that is what the VFP
7361     // registers are defined to use, and since i64 is not legal.
7362     EVT EltVT = EVT::getFloatingPointVT(EltSize);
7363     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
7364     V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
7365     V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
7366     SmallVector<SDValue, 8> Ops;
7367     for (unsigned i = 0; i < NumElts; ++i) {
7368       if (ShuffleMask[i] < 0)
7369         Ops.push_back(DAG.getUNDEF(EltVT));
7370       else
7371         Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
7372                                   ShuffleMask[i] < (int)NumElts ? V1 : V2,
7373                                   DAG.getConstant(ShuffleMask[i] & (NumElts-1),
7374                                                   dl, MVT::i32)));
7375     }
7376     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
7377     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
7378   }
7379 
7380   if (ST->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
7381     return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
7382 
7383   if (ST->hasNEON() && VT == MVT::v8i8)
7384     if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
7385       return NewOp;
7386 
7387   return SDValue();
7388 }
7389 
7390 SDValue ARMTargetLowering::
7391 LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
7392   // INSERT_VECTOR_ELT is legal only for immediate indexes.
7393   SDValue Lane = Op.getOperand(2);
7394   if (!isa<ConstantSDNode>(Lane))
7395     return SDValue();
7396 
7397   SDValue Elt = Op.getOperand(1);
7398   EVT EltVT = Elt.getValueType();
7399   if (getTypeAction(*DAG.getContext(), EltVT) ==
7400       TargetLowering::TypePromoteFloat) {
7401     // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32,
7402     // but the type system will try to do that if we don't intervene.
7403     // Reinterpret any such vector-element insertion as one with the
7404     // corresponding integer types.
7405 
7406     SDLoc dl(Op);
7407 
7408     EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits());
7409     assert(getTypeAction(*DAG.getContext(), IEltVT) !=
7410            TargetLowering::TypePromoteFloat);
7411 
7412     SDValue VecIn = Op.getOperand(0);
7413     EVT VecVT = VecIn.getValueType();
7414     EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT,
7415                                   VecVT.getVectorNumElements());
7416 
7417     SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt);
7418     SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn);
7419     SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT,
7420                                   IVecIn, IElt, Lane);
7421     return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut);
7422   }
7423 
7424   return Op;
7425 }
7426 
7427 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
7428   // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
7429   SDValue Lane = Op.getOperand(1);
7430   if (!isa<ConstantSDNode>(Lane))
7431     return SDValue();
7432 
7433   SDValue Vec = Op.getOperand(0);
7434   if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
7435     SDLoc dl(Op);
7436     return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
7437   }
7438 
7439   return Op;
7440 }
7441 
7442 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7443   // The only time a CONCAT_VECTORS operation can have legal types is when
7444   // two 64-bit vectors are concatenated to a 128-bit vector.
7445   assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
7446          "unexpected CONCAT_VECTORS");
7447   SDLoc dl(Op);
7448   SDValue Val = DAG.getUNDEF(MVT::v2f64);
7449   SDValue Op0 = Op.getOperand(0);
7450   SDValue Op1 = Op.getOperand(1);
7451   if (!Op0.isUndef())
7452     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
7453                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
7454                       DAG.getIntPtrConstant(0, dl));
7455   if (!Op1.isUndef())
7456     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
7457                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
7458                       DAG.getIntPtrConstant(1, dl));
7459   return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
7460 }
7461 
7462 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
7463 /// element has been zero/sign-extended, depending on the isSigned parameter,
7464 /// from an integer type half its size.
7465 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
7466                                    bool isSigned) {
7467   // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
7468   EVT VT = N->getValueType(0);
7469   if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
7470     SDNode *BVN = N->getOperand(0).getNode();
7471     if (BVN->getValueType(0) != MVT::v4i32 ||
7472         BVN->getOpcode() != ISD::BUILD_VECTOR)
7473       return false;
7474     unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
7475     unsigned HiElt = 1 - LoElt;
7476     ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
7477     ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
7478     ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
7479     ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
7480     if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
7481       return false;
7482     if (isSigned) {
7483       if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
7484           Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
7485         return true;
7486     } else {
7487       if (Hi0->isNullValue() && Hi1->isNullValue())
7488         return true;
7489     }
7490     return false;
7491   }
7492 
7493   if (N->getOpcode() != ISD::BUILD_VECTOR)
7494     return false;
7495 
7496   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
7497     SDNode *Elt = N->getOperand(i).getNode();
7498     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
7499       unsigned EltSize = VT.getScalarSizeInBits();
7500       unsigned HalfSize = EltSize / 2;
7501       if (isSigned) {
7502         if (!isIntN(HalfSize, C->getSExtValue()))
7503           return false;
7504       } else {
7505         if (!isUIntN(HalfSize, C->getZExtValue()))
7506           return false;
7507       }
7508       continue;
7509     }
7510     return false;
7511   }
7512 
7513   return true;
7514 }
7515 
7516 /// isSignExtended - Check if a node is a vector value that is sign-extended
7517 /// or a constant BUILD_VECTOR with sign-extended elements.
7518 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
7519   if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
7520     return true;
7521   if (isExtendedBUILD_VECTOR(N, DAG, true))
7522     return true;
7523   return false;
7524 }
7525 
7526 /// isZeroExtended - Check if a node is a vector value that is zero-extended
7527 /// or a constant BUILD_VECTOR with zero-extended elements.
7528 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
7529   if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
7530     return true;
7531   if (isExtendedBUILD_VECTOR(N, DAG, false))
7532     return true;
7533   return false;
7534 }
7535 
7536 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
7537   if (OrigVT.getSizeInBits() >= 64)
7538     return OrigVT;
7539 
7540   assert(OrigVT.isSimple() && "Expecting a simple value type");
7541 
7542   MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
7543   switch (OrigSimpleTy) {
7544   default: llvm_unreachable("Unexpected Vector Type");
7545   case MVT::v2i8:
7546   case MVT::v2i16:
7547      return MVT::v2i32;
7548   case MVT::v4i8:
7549     return  MVT::v4i16;
7550   }
7551 }
7552 
7553 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
7554 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
7555 /// We insert the required extension here to get the vector to fill a D register.
7556 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
7557                                             const EVT &OrigTy,
7558                                             const EVT &ExtTy,
7559                                             unsigned ExtOpcode) {
7560   // The vector originally had a size of OrigTy. It was then extended to ExtTy.
7561   // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
7562   // 64-bits we need to insert a new extension so that it will be 64-bits.
7563   assert(ExtTy.is128BitVector() && "Unexpected extension size");
7564   if (OrigTy.getSizeInBits() >= 64)
7565     return N;
7566 
7567   // Must extend size to at least 64 bits to be used as an operand for VMULL.
7568   EVT NewVT = getExtensionTo64Bits(OrigTy);
7569 
7570   return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
7571 }
7572 
7573 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
7574 /// does not do any sign/zero extension. If the original vector is less
7575 /// than 64 bits, an appropriate extension will be added after the load to
7576 /// reach a total size of 64 bits. We have to add the extension separately
7577 /// because ARM does not have a sign/zero extending load for vectors.
7578 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
7579   EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());
7580 
7581   // The load already has the right type.
7582   if (ExtendedTy == LD->getMemoryVT())
7583     return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
7584                        LD->getBasePtr(), LD->getPointerInfo(),
7585                        LD->getAlignment(), LD->getMemOperand()->getFlags());
7586 
7587   // We need to create a zextload/sextload. We cannot just create a load
7588   // followed by a zext/zext node because LowerMUL is also run during normal
7589   // operation legalization where we can't create illegal types.
7590   return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
7591                         LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
7592                         LD->getMemoryVT(), LD->getAlignment(),
7593                         LD->getMemOperand()->getFlags());
7594 }
7595 
7596 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
7597 /// extending load, or BUILD_VECTOR with extended elements, return the
7598 /// unextended value. The unextended vector should be 64 bits so that it can
7599 /// be used as an operand to a VMULL instruction. If the original vector size
7600 /// before extension is less than 64 bits we add a an extension to resize
7601 /// the vector to 64 bits.
7602 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
7603   if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
7604     return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
7605                                         N->getOperand(0)->getValueType(0),
7606                                         N->getValueType(0),
7607                                         N->getOpcode());
7608 
7609   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
7610     assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&
7611            "Expected extending load");
7612 
7613     SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG);
7614     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1));
7615     unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
7616     SDValue extLoad =
7617         DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad);
7618     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad);
7619 
7620     return newLoad;
7621   }
7622 
7623   // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
7624   // have been legalized as a BITCAST from v4i32.
7625   if (N->getOpcode() == ISD::BITCAST) {
7626     SDNode *BVN = N->getOperand(0).getNode();
7627     assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
7628            BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
7629     unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
7630     return DAG.getBuildVector(
7631         MVT::v2i32, SDLoc(N),
7632         {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
7633   }
7634   // Construct a new BUILD_VECTOR with elements truncated to half the size.
7635   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
7636   EVT VT = N->getValueType(0);
7637   unsigned EltSize = VT.getScalarSizeInBits() / 2;
7638   unsigned NumElts = VT.getVectorNumElements();
7639   MVT TruncVT = MVT::getIntegerVT(EltSize);
7640   SmallVector<SDValue, 8> Ops;
7641   SDLoc dl(N);
7642   for (unsigned i = 0; i != NumElts; ++i) {
7643     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
7644     const APInt &CInt = C->getAPIntValue();
7645     // Element types smaller than 32 bits are not legal, so use i32 elements.
7646     // The values are implicitly truncated so sext vs. zext doesn't matter.
7647     Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
7648   }
7649   return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
7650 }
7651 
7652 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
7653   unsigned Opcode = N->getOpcode();
7654   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7655     SDNode *N0 = N->getOperand(0).getNode();
7656     SDNode *N1 = N->getOperand(1).getNode();
7657     return N0->hasOneUse() && N1->hasOneUse() &&
7658       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
7659   }
7660   return false;
7661 }
7662 
7663 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
7664   unsigned Opcode = N->getOpcode();
7665   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7666     SDNode *N0 = N->getOperand(0).getNode();
7667     SDNode *N1 = N->getOperand(1).getNode();
7668     return N0->hasOneUse() && N1->hasOneUse() &&
7669       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
7670   }
7671   return false;
7672 }
7673 
7674 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
7675   // Multiplications are only custom-lowered for 128-bit vectors so that
7676   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
7677   EVT VT = Op.getValueType();
7678   assert(VT.is128BitVector() && VT.isInteger() &&
7679          "unexpected type for custom-lowering ISD::MUL");
7680   SDNode *N0 = Op.getOperand(0).getNode();
7681   SDNode *N1 = Op.getOperand(1).getNode();
7682   unsigned NewOpc = 0;
7683   bool isMLA = false;
7684   bool isN0SExt = isSignExtended(N0, DAG);
7685   bool isN1SExt = isSignExtended(N1, DAG);
7686   if (isN0SExt && isN1SExt)
7687     NewOpc = ARMISD::VMULLs;
7688   else {
7689     bool isN0ZExt = isZeroExtended(N0, DAG);
7690     bool isN1ZExt = isZeroExtended(N1, DAG);
7691     if (isN0ZExt && isN1ZExt)
7692       NewOpc = ARMISD::VMULLu;
7693     else if (isN1SExt || isN1ZExt) {
7694       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
7695       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
7696       if (isN1SExt && isAddSubSExt(N0, DAG)) {
7697         NewOpc = ARMISD::VMULLs;
7698         isMLA = true;
7699       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
7700         NewOpc = ARMISD::VMULLu;
7701         isMLA = true;
7702       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
7703         std::swap(N0, N1);
7704         NewOpc = ARMISD::VMULLu;
7705         isMLA = true;
7706       }
7707     }
7708 
7709     if (!NewOpc) {
7710       if (VT == MVT::v2i64)
7711         // Fall through to expand this.  It is not legal.
7712         return SDValue();
7713       else
7714         // Other vector multiplications are legal.
7715         return Op;
7716     }
7717   }
7718 
7719   // Legalize to a VMULL instruction.
7720   SDLoc DL(Op);
7721   SDValue Op0;
7722   SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
7723   if (!isMLA) {
7724     Op0 = SkipExtensionForVMULL(N0, DAG);
7725     assert(Op0.getValueType().is64BitVector() &&
7726            Op1.getValueType().is64BitVector() &&
7727            "unexpected types for extended operands to VMULL");
7728     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
7729   }
7730 
7731   // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
7732   // isel lowering to take advantage of no-stall back to back vmul + vmla.
7733   //   vmull q0, d4, d6
7734   //   vmlal q0, d5, d6
7735   // is faster than
7736   //   vaddl q0, d4, d5
7737   //   vmovl q1, d6
7738   //   vmul  q0, q0, q1
7739   SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
7740   SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
7741   EVT Op1VT = Op1.getValueType();
7742   return DAG.getNode(N0->getOpcode(), DL, VT,
7743                      DAG.getNode(NewOpc, DL, VT,
7744                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
7745                      DAG.getNode(NewOpc, DL, VT,
7746                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
7747 }
7748 
7749 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
7750                               SelectionDAG &DAG) {
7751   // TODO: Should this propagate fast-math-flags?
7752 
7753   // Convert to float
7754   // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
7755   // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
7756   X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
7757   Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
7758   X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
7759   Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
7760   // Get reciprocal estimate.
7761   // float4 recip = vrecpeq_f32(yf);
7762   Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7763                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7764                    Y);
7765   // Because char has a smaller range than uchar, we can actually get away
7766   // without any newton steps.  This requires that we use a weird bias
7767   // of 0xb000, however (again, this has been exhaustively tested).
7768   // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
7769   X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
7770   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
7771   Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
7772   X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
7773   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
7774   // Convert back to short.
7775   X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
7776   X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
7777   return X;
7778 }
7779 
7780 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
7781                                SelectionDAG &DAG) {
7782   // TODO: Should this propagate fast-math-flags?
7783 
7784   SDValue N2;
7785   // Convert to float.
7786   // float4 yf = vcvt_f32_s32(vmovl_s16(y));
7787   // float4 xf = vcvt_f32_s32(vmovl_s16(x));
7788   N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
7789   N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
7790   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7791   N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7792 
7793   // Use reciprocal estimate and one refinement step.
7794   // float4 recip = vrecpeq_f32(yf);
7795   // recip *= vrecpsq_f32(yf, recip);
7796   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7797                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7798                    N1);
7799   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7800                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7801                    N1, N2);
7802   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7803   // Because short has a smaller range than ushort, we can actually get away
7804   // with only a single newton step.  This requires that we use a weird bias
7805   // of 89, however (again, this has been exhaustively tested).
7806   // float4 result = as_float4(as_int4(xf*recip) + 0x89);
7807   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7808   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7809   N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
7810   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7811   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7812   // Convert back to integer and return.
7813   // return vmovn_s32(vcvt_s32_f32(result));
7814   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7815   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7816   return N0;
7817 }
7818 
7819 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
7820   EVT VT = Op.getValueType();
7821   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7822          "unexpected type for custom-lowering ISD::SDIV");
7823 
7824   SDLoc dl(Op);
7825   SDValue N0 = Op.getOperand(0);
7826   SDValue N1 = Op.getOperand(1);
7827   SDValue N2, N3;
7828 
7829   if (VT == MVT::v8i8) {
7830     N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
7831     N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
7832 
7833     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7834                      DAG.getIntPtrConstant(4, dl));
7835     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7836                      DAG.getIntPtrConstant(4, dl));
7837     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7838                      DAG.getIntPtrConstant(0, dl));
7839     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7840                      DAG.getIntPtrConstant(0, dl));
7841 
7842     N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
7843     N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
7844 
7845     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7846     N0 = LowerCONCAT_VECTORS(N0, DAG);
7847 
7848     N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
7849     return N0;
7850   }
7851   return LowerSDIV_v4i16(N0, N1, dl, DAG);
7852 }
7853 
7854 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
7855   // TODO: Should this propagate fast-math-flags?
7856   EVT VT = Op.getValueType();
7857   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7858          "unexpected type for custom-lowering ISD::UDIV");
7859 
7860   SDLoc dl(Op);
7861   SDValue N0 = Op.getOperand(0);
7862   SDValue N1 = Op.getOperand(1);
7863   SDValue N2, N3;
7864 
7865   if (VT == MVT::v8i8) {
7866     N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
7867     N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
7868 
7869     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7870                      DAG.getIntPtrConstant(4, dl));
7871     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7872                      DAG.getIntPtrConstant(4, dl));
7873     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7874                      DAG.getIntPtrConstant(0, dl));
7875     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7876                      DAG.getIntPtrConstant(0, dl));
7877 
7878     N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
7879     N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
7880 
7881     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7882     N0 = LowerCONCAT_VECTORS(N0, DAG);
7883 
7884     N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
7885                      DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
7886                                      MVT::i32),
7887                      N0);
7888     return N0;
7889   }
7890 
7891   // v4i16 sdiv ... Convert to float.
7892   // float4 yf = vcvt_f32_s32(vmovl_u16(y));
7893   // float4 xf = vcvt_f32_s32(vmovl_u16(x));
7894   N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
7895   N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
7896   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7897   SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7898 
7899   // Use reciprocal estimate and two refinement steps.
7900   // float4 recip = vrecpeq_f32(yf);
7901   // recip *= vrecpsq_f32(yf, recip);
7902   // recip *= vrecpsq_f32(yf, recip);
7903   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7904                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7905                    BN1);
7906   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7907                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7908                    BN1, N2);
7909   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7910   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7911                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7912                    BN1, N2);
7913   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7914   // Simply multiplying by the reciprocal estimate can leave us a few ulps
7915   // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
7916   // and that it will never cause us to return an answer too large).
7917   // float4 result = as_float4(as_int4(xf*recip) + 2);
7918   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7919   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7920   N1 = DAG.getConstant(2, dl, MVT::v4i32);
7921   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7922   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7923   // Convert back to integer and return.
7924   // return vmovn_u32(vcvt_s32_f32(result));
7925   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7926   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7927   return N0;
7928 }
7929 
7930 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
7931   SDNode *N = Op.getNode();
7932   EVT VT = N->getValueType(0);
7933   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
7934 
7935   SDValue Carry = Op.getOperand(2);
7936 
7937   SDLoc DL(Op);
7938 
7939   SDValue Result;
7940   if (Op.getOpcode() == ISD::ADDCARRY) {
7941     // This converts the boolean value carry into the carry flag.
7942     Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
7943 
7944     // Do the addition proper using the carry flag we wanted.
7945     Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0),
7946                          Op.getOperand(1), Carry);
7947 
7948     // Now convert the carry flag into a boolean value.
7949     Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
7950   } else {
7951     // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
7952     // have to invert the carry first.
7953     Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
7954                         DAG.getConstant(1, DL, MVT::i32), Carry);
7955     // This converts the boolean value carry into the carry flag.
7956     Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
7957 
7958     // Do the subtraction proper using the carry flag we wanted.
7959     Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0),
7960                          Op.getOperand(1), Carry);
7961 
7962     // Now convert the carry flag into a boolean value.
7963     Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
7964     // But the carry returned by ARMISD::SUBE is not a borrow as expected
7965     // by ISD::SUBCARRY, so compute 1 - C.
7966     Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
7967                         DAG.getConstant(1, DL, MVT::i32), Carry);
7968   }
7969 
7970   // Return both values.
7971   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry);
7972 }
7973 
7974 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
7975   assert(Subtarget->isTargetDarwin());
7976 
7977   // For iOS, we want to call an alternative entry point: __sincos_stret,
7978   // return values are passed via sret.
7979   SDLoc dl(Op);
7980   SDValue Arg = Op.getOperand(0);
7981   EVT ArgVT = Arg.getValueType();
7982   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
7983   auto PtrVT = getPointerTy(DAG.getDataLayout());
7984 
7985   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7986   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7987 
7988   // Pair of floats / doubles used to pass the result.
7989   Type *RetTy = StructType::get(ArgTy, ArgTy);
7990   auto &DL = DAG.getDataLayout();
7991 
7992   ArgListTy Args;
7993   bool ShouldUseSRet = Subtarget->isAPCS_ABI();
7994   SDValue SRet;
7995   if (ShouldUseSRet) {
7996     // Create stack object for sret.
7997     const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
7998     const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy);
7999     int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
8000     SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));
8001 
8002     ArgListEntry Entry;
8003     Entry.Node = SRet;
8004     Entry.Ty = RetTy->getPointerTo();
8005     Entry.IsSExt = false;
8006     Entry.IsZExt = false;
8007     Entry.IsSRet = true;
8008     Args.push_back(Entry);
8009     RetTy = Type::getVoidTy(*DAG.getContext());
8010   }
8011 
8012   ArgListEntry Entry;
8013   Entry.Node = Arg;
8014   Entry.Ty = ArgTy;
8015   Entry.IsSExt = false;
8016   Entry.IsZExt = false;
8017   Args.push_back(Entry);
8018 
8019   RTLIB::Libcall LC =
8020       (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
8021   const char *LibcallName = getLibcallName(LC);
8022   CallingConv::ID CC = getLibcallCallingConv(LC);
8023   SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));
8024 
8025   TargetLowering::CallLoweringInfo CLI(DAG);
8026   CLI.setDebugLoc(dl)
8027       .setChain(DAG.getEntryNode())
8028       .setCallee(CC, RetTy, Callee, std::move(Args))
8029       .setDiscardResult(ShouldUseSRet);
8030   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
8031 
8032   if (!ShouldUseSRet)
8033     return CallResult.first;
8034 
8035   SDValue LoadSin =
8036       DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
8037 
8038   // Address of cos field.
8039   SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
8040                             DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
8041   SDValue LoadCos =
8042       DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
8043 
8044   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
8045   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
8046                      LoadSin.getValue(0), LoadCos.getValue(0));
8047 }
8048 
8049 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
8050                                                   bool Signed,
8051                                                   SDValue &Chain) const {
8052   EVT VT = Op.getValueType();
8053   assert((VT == MVT::i32 || VT == MVT::i64) &&
8054          "unexpected type for custom lowering DIV");
8055   SDLoc dl(Op);
8056 
8057   const auto &DL = DAG.getDataLayout();
8058   const auto &TLI = DAG.getTargetLoweringInfo();
8059 
8060   const char *Name = nullptr;
8061   if (Signed)
8062     Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
8063   else
8064     Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";
8065 
8066   SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));
8067 
8068   ARMTargetLowering::ArgListTy Args;
8069 
8070   for (auto AI : {1, 0}) {
8071     ArgListEntry Arg;
8072     Arg.Node = Op.getOperand(AI);
8073     Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
8074     Args.push_back(Arg);
8075   }
8076 
8077   CallLoweringInfo CLI(DAG);
8078   CLI.setDebugLoc(dl)
8079     .setChain(Chain)
8080     .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
8081                ES, std::move(Args));
8082 
8083   return LowerCallTo(CLI).first;
8084 }
8085 
8086 // This is a code size optimisation: return the original SDIV node to
8087 // DAGCombiner when we don't want to expand SDIV into a sequence of
8088 // instructions, and an empty node otherwise which will cause the
8089 // SDIV to be expanded in DAGCombine.
8090 SDValue
8091 ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
8092                                  SelectionDAG &DAG,
8093                                  SmallVectorImpl<SDNode *> &Created) const {
8094   // TODO: Support SREM
8095   if (N->getOpcode() != ISD::SDIV)
8096     return SDValue();
8097 
8098   const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget());
8099   const bool MinSize = ST.hasMinSize();
8100   const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode()
8101                                       : ST.hasDivideInARMMode();
8102 
8103   // Don't touch vector types; rewriting this may lead to scalarizing
8104   // the int divs.
8105   if (N->getOperand(0).getValueType().isVector())
8106     return SDValue();
8107 
8108   // Bail if MinSize is not set, and also for both ARM and Thumb mode we need
8109   // hwdiv support for this to be really profitable.
8110   if (!(MinSize && HasDivide))
8111     return SDValue();
8112 
8113   // ARM mode is a bit simpler than Thumb: we can handle large power
8114   // of 2 immediates with 1 mov instruction; no further checks required,
8115   // just return the sdiv node.
8116   if (!ST.isThumb())
8117     return SDValue(N, 0);
8118 
8119   // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV,
8120   // and thus lose the code size benefits of a MOVS that requires only 2.
8121   // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here,
8122   // but as it's doing exactly this, it's not worth the trouble to get TTI.
8123   if (Divisor.sgt(128))
8124     return SDValue();
8125 
8126   return SDValue(N, 0);
8127 }
8128 
8129 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
8130                                             bool Signed) const {
8131   assert(Op.getValueType() == MVT::i32 &&
8132          "unexpected type for custom lowering DIV");
8133   SDLoc dl(Op);
8134 
8135   SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
8136                                DAG.getEntryNode(), Op.getOperand(1));
8137 
8138   return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
8139 }
8140 
8141 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
8142   SDLoc DL(N);
8143   SDValue Op = N->getOperand(1);
8144   if (N->getValueType(0) == MVT::i32)
8145     return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
8146   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
8147                            DAG.getConstant(0, DL, MVT::i32));
8148   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
8149                            DAG.getConstant(1, DL, MVT::i32));
8150   return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
8151                      DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
8152 }
8153 
8154 void ARMTargetLowering::ExpandDIV_Windows(
8155     SDValue Op, SelectionDAG &DAG, bool Signed,
8156     SmallVectorImpl<SDValue> &Results) const {
8157   const auto &DL = DAG.getDataLayout();
8158   const auto &TLI = DAG.getTargetLoweringInfo();
8159 
8160   assert(Op.getValueType() == MVT::i64 &&
8161          "unexpected type for custom lowering DIV");
8162   SDLoc dl(Op);
8163 
8164   SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());
8165 
8166   SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
8167 
8168   SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
8169   SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
8170                               DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
8171   Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);
8172 
8173   Results.push_back(Lower);
8174   Results.push_back(Upper);
8175 }
8176 
8177 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
8178   if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
8179     // Acquire/Release load/store is not legal for targets without a dmb or
8180     // equivalent available.
8181     return SDValue();
8182 
8183   // Monotonic load/store is legal for all targets.
8184   return Op;
8185 }
8186 
8187 static void ReplaceREADCYCLECOUNTER(SDNode *N,
8188                                     SmallVectorImpl<SDValue> &Results,
8189                                     SelectionDAG &DAG,
8190                                     const ARMSubtarget *Subtarget) {
8191   SDLoc DL(N);
8192   // Under Power Management extensions, the cycle-count is:
8193   //    mrc p15, #0, <Rt>, c9, c13, #0
8194   SDValue Ops[] = { N->getOperand(0), // Chain
8195                     DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
8196                     DAG.getConstant(15, DL, MVT::i32),
8197                     DAG.getConstant(0, DL, MVT::i32),
8198                     DAG.getConstant(9, DL, MVT::i32),
8199                     DAG.getConstant(13, DL, MVT::i32),
8200                     DAG.getConstant(0, DL, MVT::i32)
8201   };
8202 
8203   SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
8204                                  DAG.getVTList(MVT::i32, MVT::Other), Ops);
8205   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
8206                                 DAG.getConstant(0, DL, MVT::i32)));
8207   Results.push_back(Cycles32.getValue(1));
8208 }
8209 
8210 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
8211   SDLoc dl(V.getNode());
8212   SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
8213   SDValue VHi = DAG.getAnyExtOrTrunc(
8214       DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
8215       dl, MVT::i32);
8216   bool isBigEndian = DAG.getDataLayout().isBigEndian();
8217   if (isBigEndian)
8218     std::swap (VLo, VHi);
8219   SDValue RegClass =
8220       DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
8221   SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
8222   SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
8223   const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
8224   return SDValue(
8225       DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
8226 }
8227 
8228 static void ReplaceCMP_SWAP_64Results(SDNode *N,
8229                                        SmallVectorImpl<SDValue> & Results,
8230                                        SelectionDAG &DAG) {
8231   assert(N->getValueType(0) == MVT::i64 &&
8232          "AtomicCmpSwap on types less than 64 should be legal");
8233   SDValue Ops[] = {N->getOperand(1),
8234                    createGPRPairNode(DAG, N->getOperand(2)),
8235                    createGPRPairNode(DAG, N->getOperand(3)),
8236                    N->getOperand(0)};
8237   SDNode *CmpSwap = DAG.getMachineNode(
8238       ARM::CMP_SWAP_64, SDLoc(N),
8239       DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
8240 
8241   MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
8242   DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
8243 
8244   bool isBigEndian = DAG.getDataLayout().isBigEndian();
8245 
8246   Results.push_back(
8247       DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0,
8248                                  SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)));
8249   Results.push_back(
8250       DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1,
8251                                  SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)));
8252   Results.push_back(SDValue(CmpSwap, 2));
8253 }
8254 
8255 static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget,
8256                           SelectionDAG &DAG) {
8257   const auto &TLI = DAG.getTargetLoweringInfo();
8258 
8259   assert(Subtarget.getTargetTriple().isOSMSVCRT() &&
8260          "Custom lowering is MSVCRT specific!");
8261 
8262   SDLoc dl(Op);
8263   SDValue Val = Op.getOperand(0);
8264   MVT Ty = Val->getSimpleValueType(0);
8265   SDValue Exponent = DAG.getNode(ISD::SINT_TO_FP, dl, Ty, Op.getOperand(1));
8266   SDValue Callee = DAG.getExternalSymbol(Ty == MVT::f32 ? "powf" : "pow",
8267                                          TLI.getPointerTy(DAG.getDataLayout()));
8268 
8269   TargetLowering::ArgListTy Args;
8270   TargetLowering::ArgListEntry Entry;
8271 
8272   Entry.Node = Val;
8273   Entry.Ty = Val.getValueType().getTypeForEVT(*DAG.getContext());
8274   Entry.IsZExt = true;
8275   Args.push_back(Entry);
8276 
8277   Entry.Node = Exponent;
8278   Entry.Ty = Exponent.getValueType().getTypeForEVT(*DAG.getContext());
8279   Entry.IsZExt = true;
8280   Args.push_back(Entry);
8281 
8282   Type *LCRTy = Val.getValueType().getTypeForEVT(*DAG.getContext());
8283 
8284   // In the in-chain to the call is the entry node  If we are emitting a
8285   // tailcall, the chain will be mutated if the node has a non-entry input
8286   // chain.
8287   SDValue InChain = DAG.getEntryNode();
8288   SDValue TCChain = InChain;
8289 
8290   const Function &F = DAG.getMachineFunction().getFunction();
8291   bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) &&
8292               F.getReturnType() == LCRTy;
8293   if (IsTC)
8294     InChain = TCChain;
8295 
8296   TargetLowering::CallLoweringInfo CLI(DAG);
8297   CLI.setDebugLoc(dl)
8298       .setChain(InChain)
8299       .setCallee(CallingConv::ARM_AAPCS_VFP, LCRTy, Callee, std::move(Args))
8300       .setTailCall(IsTC);
8301   std::pair<SDValue, SDValue> CI = TLI.LowerCallTo(CLI);
8302 
8303   // Return the chain (the DAG root) if it is a tail call
8304   return !CI.second.getNode() ? DAG.getRoot() : CI.first;
8305 }
8306 
8307 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
8308   LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump());
8309   switch (Op.getOpcode()) {
8310   default: llvm_unreachable("Don't know how to custom lower this!");
8311   case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
8312   case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
8313   case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
8314   case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
8315   case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
8316   case ISD::SELECT:        return LowerSELECT(Op, DAG);
8317   case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
8318   case ISD::BRCOND:        return LowerBRCOND(Op, DAG);
8319   case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
8320   case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
8321   case ISD::VASTART:       return LowerVASTART(Op, DAG);
8322   case ISD::ATOMIC_FENCE:  return LowerATOMIC_FENCE(Op, DAG, Subtarget);
8323   case ISD::PREFETCH:      return LowerPREFETCH(Op, DAG, Subtarget);
8324   case ISD::SINT_TO_FP:
8325   case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
8326   case ISD::FP_TO_SINT:
8327   case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
8328   case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
8329   case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
8330   case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
8331   case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
8332   case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
8333   case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
8334   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
8335                                                                Subtarget);
8336   case ISD::BITCAST:       return ExpandBITCAST(Op.getNode(), DAG, Subtarget);
8337   case ISD::SHL:
8338   case ISD::SRL:
8339   case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
8340   case ISD::SREM:          return LowerREM(Op.getNode(), DAG);
8341   case ISD::UREM:          return LowerREM(Op.getNode(), DAG);
8342   case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
8343   case ISD::SRL_PARTS:
8344   case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
8345   case ISD::CTTZ:
8346   case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
8347   case ISD::CTPOP:         return LowerCTPOP(Op.getNode(), DAG, Subtarget);
8348   case ISD::SETCC:         return LowerVSETCC(Op, DAG);
8349   case ISD::SETCCCARRY:    return LowerSETCCCARRY(Op, DAG);
8350   case ISD::ConstantFP:    return LowerConstantFP(Op, DAG, Subtarget);
8351   case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);
8352   case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
8353   case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
8354   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
8355   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
8356   case ISD::FLT_ROUNDS_:   return LowerFLT_ROUNDS_(Op, DAG);
8357   case ISD::MUL:           return LowerMUL(Op, DAG);
8358   case ISD::SDIV:
8359     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
8360       return LowerDIV_Windows(Op, DAG, /* Signed */ true);
8361     return LowerSDIV(Op, DAG);
8362   case ISD::UDIV:
8363     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
8364       return LowerDIV_Windows(Op, DAG, /* Signed */ false);
8365     return LowerUDIV(Op, DAG);
8366   case ISD::ADDCARRY:
8367   case ISD::SUBCARRY:      return LowerADDSUBCARRY(Op, DAG);
8368   case ISD::SADDO:
8369   case ISD::SSUBO:
8370     return LowerSignedALUO(Op, DAG);
8371   case ISD::UADDO:
8372   case ISD::USUBO:
8373     return LowerUnsignedALUO(Op, DAG);
8374   case ISD::ATOMIC_LOAD:
8375   case ISD::ATOMIC_STORE:  return LowerAtomicLoadStore(Op, DAG);
8376   case ISD::FSINCOS:       return LowerFSINCOS(Op, DAG);
8377   case ISD::SDIVREM:
8378   case ISD::UDIVREM:       return LowerDivRem(Op, DAG);
8379   case ISD::DYNAMIC_STACKALLOC:
8380     if (Subtarget->isTargetWindows())
8381       return LowerDYNAMIC_STACKALLOC(Op, DAG);
8382     llvm_unreachable("Don't know how to custom lower this!");
8383   case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
8384   case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
8385   case ISD::FPOWI: return LowerFPOWI(Op, *Subtarget, DAG);
8386   case ARMISD::WIN__DBZCHK: return SDValue();
8387   }
8388 }
8389 
8390 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
8391                                  SelectionDAG &DAG) {
8392   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
8393   unsigned Opc = 0;
8394   if (IntNo == Intrinsic::arm_smlald)
8395     Opc = ARMISD::SMLALD;
8396   else if (IntNo == Intrinsic::arm_smlaldx)
8397     Opc = ARMISD::SMLALDX;
8398   else if (IntNo == Intrinsic::arm_smlsld)
8399     Opc = ARMISD::SMLSLD;
8400   else if (IntNo == Intrinsic::arm_smlsldx)
8401     Opc = ARMISD::SMLSLDX;
8402   else
8403     return;
8404 
8405   SDLoc dl(N);
8406   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
8407                            N->getOperand(3),
8408                            DAG.getConstant(0, dl, MVT::i32));
8409   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
8410                            N->getOperand(3),
8411                            DAG.getConstant(1, dl, MVT::i32));
8412 
8413   SDValue LongMul = DAG.getNode(Opc, dl,
8414                                 DAG.getVTList(MVT::i32, MVT::i32),
8415                                 N->getOperand(1), N->getOperand(2),
8416                                 Lo, Hi);
8417   Results.push_back(LongMul.getValue(0));
8418   Results.push_back(LongMul.getValue(1));
8419 }
8420 
8421 /// ReplaceNodeResults - Replace the results of node with an illegal result
8422 /// type with new values built out of custom code.
8423 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
8424                                            SmallVectorImpl<SDValue> &Results,
8425                                            SelectionDAG &DAG) const {
8426   SDValue Res;
8427   switch (N->getOpcode()) {
8428   default:
8429     llvm_unreachable("Don't know how to custom expand this!");
8430   case ISD::READ_REGISTER:
8431     ExpandREAD_REGISTER(N, Results, DAG);
8432     break;
8433   case ISD::BITCAST:
8434     Res = ExpandBITCAST(N, DAG, Subtarget);
8435     break;
8436   case ISD::SRL:
8437   case ISD::SRA:
8438   case ISD::SHL:
8439     Res = Expand64BitShift(N, DAG, Subtarget);
8440     break;
8441   case ISD::SREM:
8442   case ISD::UREM:
8443     Res = LowerREM(N, DAG);
8444     break;
8445   case ISD::SDIVREM:
8446   case ISD::UDIVREM:
8447     Res = LowerDivRem(SDValue(N, 0), DAG);
8448     assert(Res.getNumOperands() == 2 && "DivRem needs two values");
8449     Results.push_back(Res.getValue(0));
8450     Results.push_back(Res.getValue(1));
8451     return;
8452   case ISD::READCYCLECOUNTER:
8453     ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
8454     return;
8455   case ISD::UDIV:
8456   case ISD::SDIV:
8457     assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
8458     return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
8459                              Results);
8460   case ISD::ATOMIC_CMP_SWAP:
8461     ReplaceCMP_SWAP_64Results(N, Results, DAG);
8462     return;
8463   case ISD::INTRINSIC_WO_CHAIN:
8464     return ReplaceLongIntrinsic(N, Results, DAG);
8465   case ISD::ABS:
8466      lowerABS(N, Results, DAG);
8467      return ;
8468 
8469   }
8470   if (Res.getNode())
8471     Results.push_back(Res);
8472 }
8473 
8474 //===----------------------------------------------------------------------===//
8475 //                           ARM Scheduler Hooks
8476 //===----------------------------------------------------------------------===//
8477 
8478 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
8479 /// registers the function context.
8480 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
8481                                                MachineBasicBlock *MBB,
8482                                                MachineBasicBlock *DispatchBB,
8483                                                int FI) const {
8484   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
8485          "ROPI/RWPI not currently supported with SjLj");
8486   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8487   DebugLoc dl = MI.getDebugLoc();
8488   MachineFunction *MF = MBB->getParent();
8489   MachineRegisterInfo *MRI = &MF->getRegInfo();
8490   MachineConstantPool *MCP = MF->getConstantPool();
8491   ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
8492   const Function &F = MF->getFunction();
8493 
8494   bool isThumb = Subtarget->isThumb();
8495   bool isThumb2 = Subtarget->isThumb2();
8496 
8497   unsigned PCLabelId = AFI->createPICLabelUId();
8498   unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
8499   ARMConstantPoolValue *CPV =
8500     ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj);
8501   unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
8502 
8503   const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
8504                                            : &ARM::GPRRegClass;
8505 
8506   // Grab constant pool and fixed stack memory operands.
8507   MachineMemOperand *CPMMO =
8508       MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
8509                                MachineMemOperand::MOLoad, 4, 4);
8510 
8511   MachineMemOperand *FIMMOSt =
8512       MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
8513                                MachineMemOperand::MOStore, 4, 4);
8514 
8515   // Load the address of the dispatch MBB into the jump buffer.
8516   if (isThumb2) {
8517     // Incoming value: jbuf
8518     //   ldr.n  r5, LCPI1_1
8519     //   orr    r5, r5, #1
8520     //   add    r5, pc
8521     //   str    r5, [$jbuf, #+4] ; &jbuf[1]
8522     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8523     BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
8524         .addConstantPoolIndex(CPI)
8525         .addMemOperand(CPMMO)
8526         .add(predOps(ARMCC::AL));
8527     // Set the low bit because of thumb mode.
8528     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
8529     BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
8530         .addReg(NewVReg1, RegState::Kill)
8531         .addImm(0x01)
8532         .add(predOps(ARMCC::AL))
8533         .add(condCodeOp());
8534     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8535     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
8536       .addReg(NewVReg2, RegState::Kill)
8537       .addImm(PCLabelId);
8538     BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
8539         .addReg(NewVReg3, RegState::Kill)
8540         .addFrameIndex(FI)
8541         .addImm(36) // &jbuf[1] :: pc
8542         .addMemOperand(FIMMOSt)
8543         .add(predOps(ARMCC::AL));
8544   } else if (isThumb) {
8545     // Incoming value: jbuf
8546     //   ldr.n  r1, LCPI1_4
8547     //   add    r1, pc
8548     //   mov    r2, #1
8549     //   orrs   r1, r2
8550     //   add    r2, $jbuf, #+4 ; &jbuf[1]
8551     //   str    r1, [r2]
8552     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8553     BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
8554         .addConstantPoolIndex(CPI)
8555         .addMemOperand(CPMMO)
8556         .add(predOps(ARMCC::AL));
8557     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
8558     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
8559       .addReg(NewVReg1, RegState::Kill)
8560       .addImm(PCLabelId);
8561     // Set the low bit because of thumb mode.
8562     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8563     BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
8564         .addReg(ARM::CPSR, RegState::Define)
8565         .addImm(1)
8566         .add(predOps(ARMCC::AL));
8567     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8568     BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
8569         .addReg(ARM::CPSR, RegState::Define)
8570         .addReg(NewVReg2, RegState::Kill)
8571         .addReg(NewVReg3, RegState::Kill)
8572         .add(predOps(ARMCC::AL));
8573     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8574     BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
8575             .addFrameIndex(FI)
8576             .addImm(36); // &jbuf[1] :: pc
8577     BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
8578         .addReg(NewVReg4, RegState::Kill)
8579         .addReg(NewVReg5, RegState::Kill)
8580         .addImm(0)
8581         .addMemOperand(FIMMOSt)
8582         .add(predOps(ARMCC::AL));
8583   } else {
8584     // Incoming value: jbuf
8585     //   ldr  r1, LCPI1_1
8586     //   add  r1, pc, r1
8587     //   str  r1, [$jbuf, #+4] ; &jbuf[1]
8588     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8589     BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
8590         .addConstantPoolIndex(CPI)
8591         .addImm(0)
8592         .addMemOperand(CPMMO)
8593         .add(predOps(ARMCC::AL));
8594     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
8595     BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
8596         .addReg(NewVReg1, RegState::Kill)
8597         .addImm(PCLabelId)
8598         .add(predOps(ARMCC::AL));
8599     BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
8600         .addReg(NewVReg2, RegState::Kill)
8601         .addFrameIndex(FI)
8602         .addImm(36) // &jbuf[1] :: pc
8603         .addMemOperand(FIMMOSt)
8604         .add(predOps(ARMCC::AL));
8605   }
8606 }
8607 
8608 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
8609                                               MachineBasicBlock *MBB) const {
8610   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8611   DebugLoc dl = MI.getDebugLoc();
8612   MachineFunction *MF = MBB->getParent();
8613   MachineRegisterInfo *MRI = &MF->getRegInfo();
8614   MachineFrameInfo &MFI = MF->getFrameInfo();
8615   int FI = MFI.getFunctionContextIndex();
8616 
8617   const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
8618                                                         : &ARM::GPRnopcRegClass;
8619 
8620   // Get a mapping of the call site numbers to all of the landing pads they're
8621   // associated with.
8622   DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad;
8623   unsigned MaxCSNum = 0;
8624   for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
8625        ++BB) {
8626     if (!BB->isEHPad()) continue;
8627 
8628     // FIXME: We should assert that the EH_LABEL is the first MI in the landing
8629     // pad.
8630     for (MachineBasicBlock::iterator
8631            II = BB->begin(), IE = BB->end(); II != IE; ++II) {
8632       if (!II->isEHLabel()) continue;
8633 
8634       MCSymbol *Sym = II->getOperand(0).getMCSymbol();
8635       if (!MF->hasCallSiteLandingPad(Sym)) continue;
8636 
8637       SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
8638       for (SmallVectorImpl<unsigned>::iterator
8639              CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
8640            CSI != CSE; ++CSI) {
8641         CallSiteNumToLPad[*CSI].push_back(&*BB);
8642         MaxCSNum = std::max(MaxCSNum, *CSI);
8643       }
8644       break;
8645     }
8646   }
8647 
8648   // Get an ordered list of the machine basic blocks for the jump table.
8649   std::vector<MachineBasicBlock*> LPadList;
8650   SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
8651   LPadList.reserve(CallSiteNumToLPad.size());
8652   for (unsigned I = 1; I <= MaxCSNum; ++I) {
8653     SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
8654     for (SmallVectorImpl<MachineBasicBlock*>::iterator
8655            II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
8656       LPadList.push_back(*II);
8657       InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
8658     }
8659   }
8660 
8661   assert(!LPadList.empty() &&
8662          "No landing pad destinations for the dispatch jump table!");
8663 
8664   // Create the jump table and associated information.
8665   MachineJumpTableInfo *JTI =
8666     MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
8667   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
8668 
8669   // Create the MBBs for the dispatch code.
8670 
8671   // Shove the dispatch's address into the return slot in the function context.
8672   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
8673   DispatchBB->setIsEHPad();
8674 
8675   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
8676   unsigned trap_opcode;
8677   if (Subtarget->isThumb())
8678     trap_opcode = ARM::tTRAP;
8679   else
8680     trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
8681 
8682   BuildMI(TrapBB, dl, TII->get(trap_opcode));
8683   DispatchBB->addSuccessor(TrapBB);
8684 
8685   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
8686   DispatchBB->addSuccessor(DispContBB);
8687 
8688   // Insert and MBBs.
8689   MF->insert(MF->end(), DispatchBB);
8690   MF->insert(MF->end(), DispContBB);
8691   MF->insert(MF->end(), TrapBB);
8692 
8693   // Insert code into the entry block that creates and registers the function
8694   // context.
8695   SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
8696 
8697   MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
8698       MachinePointerInfo::getFixedStack(*MF, FI),
8699       MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4);
8700 
8701   MachineInstrBuilder MIB;
8702   MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
8703 
8704   const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
8705   const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
8706 
8707   // Add a register mask with no preserved registers.  This results in all
8708   // registers being marked as clobbered. This can't work if the dispatch block
8709   // is in a Thumb1 function and is linked with ARM code which uses the FP
8710   // registers, as there is no way to preserve the FP registers in Thumb1 mode.
8711   MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));
8712 
8713   bool IsPositionIndependent = isPositionIndependent();
8714   unsigned NumLPads = LPadList.size();
8715   if (Subtarget->isThumb2()) {
8716     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8717     BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
8718         .addFrameIndex(FI)
8719         .addImm(4)
8720         .addMemOperand(FIMMOLd)
8721         .add(predOps(ARMCC::AL));
8722 
8723     if (NumLPads < 256) {
8724       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
8725           .addReg(NewVReg1)
8726           .addImm(LPadList.size())
8727           .add(predOps(ARMCC::AL));
8728     } else {
8729       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8730       BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
8731           .addImm(NumLPads & 0xFFFF)
8732           .add(predOps(ARMCC::AL));
8733 
8734       unsigned VReg2 = VReg1;
8735       if ((NumLPads & 0xFFFF0000) != 0) {
8736         VReg2 = MRI->createVirtualRegister(TRC);
8737         BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
8738             .addReg(VReg1)
8739             .addImm(NumLPads >> 16)
8740             .add(predOps(ARMCC::AL));
8741       }
8742 
8743       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
8744           .addReg(NewVReg1)
8745           .addReg(VReg2)
8746           .add(predOps(ARMCC::AL));
8747     }
8748 
8749     BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
8750       .addMBB(TrapBB)
8751       .addImm(ARMCC::HI)
8752       .addReg(ARM::CPSR);
8753 
8754     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8755     BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3)
8756         .addJumpTableIndex(MJTI)
8757         .add(predOps(ARMCC::AL));
8758 
8759     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8760     BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
8761         .addReg(NewVReg3, RegState::Kill)
8762         .addReg(NewVReg1)
8763         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
8764         .add(predOps(ARMCC::AL))
8765         .add(condCodeOp());
8766 
8767     BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
8768       .addReg(NewVReg4, RegState::Kill)
8769       .addReg(NewVReg1)
8770       .addJumpTableIndex(MJTI);
8771   } else if (Subtarget->isThumb()) {
8772     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8773     BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
8774         .addFrameIndex(FI)
8775         .addImm(1)
8776         .addMemOperand(FIMMOLd)
8777         .add(predOps(ARMCC::AL));
8778 
8779     if (NumLPads < 256) {
8780       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
8781           .addReg(NewVReg1)
8782           .addImm(NumLPads)
8783           .add(predOps(ARMCC::AL));
8784     } else {
8785       MachineConstantPool *ConstantPool = MF->getConstantPool();
8786       Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
8787       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8788 
8789       // MachineConstantPool wants an explicit alignment.
8790       unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8791       if (Align == 0)
8792         Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8793       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8794 
8795       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8796       BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
8797           .addReg(VReg1, RegState::Define)
8798           .addConstantPoolIndex(Idx)
8799           .add(predOps(ARMCC::AL));
8800       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
8801           .addReg(NewVReg1)
8802           .addReg(VReg1)
8803           .add(predOps(ARMCC::AL));
8804     }
8805 
8806     BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
8807       .addMBB(TrapBB)
8808       .addImm(ARMCC::HI)
8809       .addReg(ARM::CPSR);
8810 
8811     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
8812     BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
8813         .addReg(ARM::CPSR, RegState::Define)
8814         .addReg(NewVReg1)
8815         .addImm(2)
8816         .add(predOps(ARMCC::AL));
8817 
8818     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8819     BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
8820         .addJumpTableIndex(MJTI)
8821         .add(predOps(ARMCC::AL));
8822 
8823     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8824     BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
8825         .addReg(ARM::CPSR, RegState::Define)
8826         .addReg(NewVReg2, RegState::Kill)
8827         .addReg(NewVReg3)
8828         .add(predOps(ARMCC::AL));
8829 
8830     MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8831         MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8832 
8833     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8834     BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
8835         .addReg(NewVReg4, RegState::Kill)
8836         .addImm(0)
8837         .addMemOperand(JTMMOLd)
8838         .add(predOps(ARMCC::AL));
8839 
8840     unsigned NewVReg6 = NewVReg5;
8841     if (IsPositionIndependent) {
8842       NewVReg6 = MRI->createVirtualRegister(TRC);
8843       BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
8844           .addReg(ARM::CPSR, RegState::Define)
8845           .addReg(NewVReg5, RegState::Kill)
8846           .addReg(NewVReg3)
8847           .add(predOps(ARMCC::AL));
8848     }
8849 
8850     BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
8851       .addReg(NewVReg6, RegState::Kill)
8852       .addJumpTableIndex(MJTI);
8853   } else {
8854     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8855     BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
8856         .addFrameIndex(FI)
8857         .addImm(4)
8858         .addMemOperand(FIMMOLd)
8859         .add(predOps(ARMCC::AL));
8860 
8861     if (NumLPads < 256) {
8862       BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
8863           .addReg(NewVReg1)
8864           .addImm(NumLPads)
8865           .add(predOps(ARMCC::AL));
8866     } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
8867       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8868       BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
8869           .addImm(NumLPads & 0xFFFF)
8870           .add(predOps(ARMCC::AL));
8871 
8872       unsigned VReg2 = VReg1;
8873       if ((NumLPads & 0xFFFF0000) != 0) {
8874         VReg2 = MRI->createVirtualRegister(TRC);
8875         BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
8876             .addReg(VReg1)
8877             .addImm(NumLPads >> 16)
8878             .add(predOps(ARMCC::AL));
8879       }
8880 
8881       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8882           .addReg(NewVReg1)
8883           .addReg(VReg2)
8884           .add(predOps(ARMCC::AL));
8885     } else {
8886       MachineConstantPool *ConstantPool = MF->getConstantPool();
8887       Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
8888       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8889 
8890       // MachineConstantPool wants an explicit alignment.
8891       unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8892       if (Align == 0)
8893         Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8894       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8895 
8896       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8897       BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
8898           .addReg(VReg1, RegState::Define)
8899           .addConstantPoolIndex(Idx)
8900           .addImm(0)
8901           .add(predOps(ARMCC::AL));
8902       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8903           .addReg(NewVReg1)
8904           .addReg(VReg1, RegState::Kill)
8905           .add(predOps(ARMCC::AL));
8906     }
8907 
8908     BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
8909       .addMBB(TrapBB)
8910       .addImm(ARMCC::HI)
8911       .addReg(ARM::CPSR);
8912 
8913     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8914     BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
8915         .addReg(NewVReg1)
8916         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
8917         .add(predOps(ARMCC::AL))
8918         .add(condCodeOp());
8919     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8920     BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
8921         .addJumpTableIndex(MJTI)
8922         .add(predOps(ARMCC::AL));
8923 
8924     MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8925         MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8926     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8927     BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
8928         .addReg(NewVReg3, RegState::Kill)
8929         .addReg(NewVReg4)
8930         .addImm(0)
8931         .addMemOperand(JTMMOLd)
8932         .add(predOps(ARMCC::AL));
8933 
8934     if (IsPositionIndependent) {
8935       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
8936         .addReg(NewVReg5, RegState::Kill)
8937         .addReg(NewVReg4)
8938         .addJumpTableIndex(MJTI);
8939     } else {
8940       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
8941         .addReg(NewVReg5, RegState::Kill)
8942         .addJumpTableIndex(MJTI);
8943     }
8944   }
8945 
8946   // Add the jump table entries as successors to the MBB.
8947   SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
8948   for (std::vector<MachineBasicBlock*>::iterator
8949          I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
8950     MachineBasicBlock *CurMBB = *I;
8951     if (SeenMBBs.insert(CurMBB).second)
8952       DispContBB->addSuccessor(CurMBB);
8953   }
8954 
8955   // N.B. the order the invoke BBs are processed in doesn't matter here.
8956   const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
8957   SmallVector<MachineBasicBlock*, 64> MBBLPads;
8958   for (MachineBasicBlock *BB : InvokeBBs) {
8959 
8960     // Remove the landing pad successor from the invoke block and replace it
8961     // with the new dispatch block.
8962     SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
8963                                                   BB->succ_end());
8964     while (!Successors.empty()) {
8965       MachineBasicBlock *SMBB = Successors.pop_back_val();
8966       if (SMBB->isEHPad()) {
8967         BB->removeSuccessor(SMBB);
8968         MBBLPads.push_back(SMBB);
8969       }
8970     }
8971 
8972     BB->addSuccessor(DispatchBB, BranchProbability::getZero());
8973     BB->normalizeSuccProbs();
8974 
8975     // Find the invoke call and mark all of the callee-saved registers as
8976     // 'implicit defined' so that they're spilled. This prevents code from
8977     // moving instructions to before the EH block, where they will never be
8978     // executed.
8979     for (MachineBasicBlock::reverse_iterator
8980            II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
8981       if (!II->isCall()) continue;
8982 
8983       DenseMap<unsigned, bool> DefRegs;
8984       for (MachineInstr::mop_iterator
8985              OI = II->operands_begin(), OE = II->operands_end();
8986            OI != OE; ++OI) {
8987         if (!OI->isReg()) continue;
8988         DefRegs[OI->getReg()] = true;
8989       }
8990 
8991       MachineInstrBuilder MIB(*MF, &*II);
8992 
8993       for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
8994         unsigned Reg = SavedRegs[i];
8995         if (Subtarget->isThumb2() &&
8996             !ARM::tGPRRegClass.contains(Reg) &&
8997             !ARM::hGPRRegClass.contains(Reg))
8998           continue;
8999         if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
9000           continue;
9001         if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
9002           continue;
9003         if (!DefRegs[Reg])
9004           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
9005       }
9006 
9007       break;
9008     }
9009   }
9010 
9011   // Mark all former landing pads as non-landing pads. The dispatch is the only
9012   // landing pad now.
9013   for (SmallVectorImpl<MachineBasicBlock*>::iterator
9014          I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
9015     (*I)->setIsEHPad(false);
9016 
9017   // The instruction is gone now.
9018   MI.eraseFromParent();
9019 }
9020 
9021 static
9022 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
9023   for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
9024        E = MBB->succ_end(); I != E; ++I)
9025     if (*I != Succ)
9026       return *I;
9027   llvm_unreachable("Expecting a BB with two successors!");
9028 }
9029 
9030 /// Return the load opcode for a given load size. If load size >= 8,
9031 /// neon opcode will be returned.
9032 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
9033   if (LdSize >= 8)
9034     return LdSize == 16 ? ARM::VLD1q32wb_fixed
9035                         : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
9036   if (IsThumb1)
9037     return LdSize == 4 ? ARM::tLDRi
9038                        : LdSize == 2 ? ARM::tLDRHi
9039                                      : LdSize == 1 ? ARM::tLDRBi : 0;
9040   if (IsThumb2)
9041     return LdSize == 4 ? ARM::t2LDR_POST
9042                        : LdSize == 2 ? ARM::t2LDRH_POST
9043                                      : LdSize == 1 ? ARM::t2LDRB_POST : 0;
9044   return LdSize == 4 ? ARM::LDR_POST_IMM
9045                      : LdSize == 2 ? ARM::LDRH_POST
9046                                    : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
9047 }
9048 
9049 /// Return the store opcode for a given store size. If store size >= 8,
9050 /// neon opcode will be returned.
9051 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
9052   if (StSize >= 8)
9053     return StSize == 16 ? ARM::VST1q32wb_fixed
9054                         : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
9055   if (IsThumb1)
9056     return StSize == 4 ? ARM::tSTRi
9057                        : StSize == 2 ? ARM::tSTRHi
9058                                      : StSize == 1 ? ARM::tSTRBi : 0;
9059   if (IsThumb2)
9060     return StSize == 4 ? ARM::t2STR_POST
9061                        : StSize == 2 ? ARM::t2STRH_POST
9062                                      : StSize == 1 ? ARM::t2STRB_POST : 0;
9063   return StSize == 4 ? ARM::STR_POST_IMM
9064                      : StSize == 2 ? ARM::STRH_POST
9065                                    : StSize == 1 ? ARM::STRB_POST_IMM : 0;
9066 }
9067 
9068 /// Emit a post-increment load operation with given size. The instructions
9069 /// will be added to BB at Pos.
9070 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
9071                        const TargetInstrInfo *TII, const DebugLoc &dl,
9072                        unsigned LdSize, unsigned Data, unsigned AddrIn,
9073                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
9074   unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
9075   assert(LdOpc != 0 && "Should have a load opcode");
9076   if (LdSize >= 8) {
9077     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
9078         .addReg(AddrOut, RegState::Define)
9079         .addReg(AddrIn)
9080         .addImm(0)
9081         .add(predOps(ARMCC::AL));
9082   } else if (IsThumb1) {
9083     // load + update AddrIn
9084     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
9085         .addReg(AddrIn)
9086         .addImm(0)
9087         .add(predOps(ARMCC::AL));
9088     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
9089         .add(t1CondCodeOp())
9090         .addReg(AddrIn)
9091         .addImm(LdSize)
9092         .add(predOps(ARMCC::AL));
9093   } else if (IsThumb2) {
9094     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
9095         .addReg(AddrOut, RegState::Define)
9096         .addReg(AddrIn)
9097         .addImm(LdSize)
9098         .add(predOps(ARMCC::AL));
9099   } else { // arm
9100     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
9101         .addReg(AddrOut, RegState::Define)
9102         .addReg(AddrIn)
9103         .addReg(0)
9104         .addImm(LdSize)
9105         .add(predOps(ARMCC::AL));
9106   }
9107 }
9108 
9109 /// Emit a post-increment store operation with given size. The instructions
9110 /// will be added to BB at Pos.
9111 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
9112                        const TargetInstrInfo *TII, const DebugLoc &dl,
9113                        unsigned StSize, unsigned Data, unsigned AddrIn,
9114                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
9115   unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
9116   assert(StOpc != 0 && "Should have a store opcode");
9117   if (StSize >= 8) {
9118     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
9119         .addReg(AddrIn)
9120         .addImm(0)
9121         .addReg(Data)
9122         .add(predOps(ARMCC::AL));
9123   } else if (IsThumb1) {
9124     // store + update AddrIn
9125     BuildMI(*BB, Pos, dl, TII->get(StOpc))
9126         .addReg(Data)
9127         .addReg(AddrIn)
9128         .addImm(0)
9129         .add(predOps(ARMCC::AL));
9130     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
9131         .add(t1CondCodeOp())
9132         .addReg(AddrIn)
9133         .addImm(StSize)
9134         .add(predOps(ARMCC::AL));
9135   } else if (IsThumb2) {
9136     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
9137         .addReg(Data)
9138         .addReg(AddrIn)
9139         .addImm(StSize)
9140         .add(predOps(ARMCC::AL));
9141   } else { // arm
9142     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
9143         .addReg(Data)
9144         .addReg(AddrIn)
9145         .addReg(0)
9146         .addImm(StSize)
9147         .add(predOps(ARMCC::AL));
9148   }
9149 }
9150 
9151 MachineBasicBlock *
9152 ARMTargetLowering::EmitStructByval(MachineInstr &MI,
9153                                    MachineBasicBlock *BB) const {
9154   // This pseudo instruction has 3 operands: dst, src, size
9155   // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
9156   // Otherwise, we will generate unrolled scalar copies.
9157   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
9158   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9159   MachineFunction::iterator It = ++BB->getIterator();
9160 
9161   unsigned dest = MI.getOperand(0).getReg();
9162   unsigned src = MI.getOperand(1).getReg();
9163   unsigned SizeVal = MI.getOperand(2).getImm();
9164   unsigned Align = MI.getOperand(3).getImm();
9165   DebugLoc dl = MI.getDebugLoc();
9166 
9167   MachineFunction *MF = BB->getParent();
9168   MachineRegisterInfo &MRI = MF->getRegInfo();
9169   unsigned UnitSize = 0;
9170   const TargetRegisterClass *TRC = nullptr;
9171   const TargetRegisterClass *VecTRC = nullptr;
9172 
9173   bool IsThumb1 = Subtarget->isThumb1Only();
9174   bool IsThumb2 = Subtarget->isThumb2();
9175   bool IsThumb = Subtarget->isThumb();
9176 
9177   if (Align & 1) {
9178     UnitSize = 1;
9179   } else if (Align & 2) {
9180     UnitSize = 2;
9181   } else {
9182     // Check whether we can use NEON instructions.
9183     if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) &&
9184         Subtarget->hasNEON()) {
9185       if ((Align % 16 == 0) && SizeVal >= 16)
9186         UnitSize = 16;
9187       else if ((Align % 8 == 0) && SizeVal >= 8)
9188         UnitSize = 8;
9189     }
9190     // Can't use NEON instructions.
9191     if (UnitSize == 0)
9192       UnitSize = 4;
9193   }
9194 
9195   // Select the correct opcode and register class for unit size load/store
9196   bool IsNeon = UnitSize >= 8;
9197   TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
9198   if (IsNeon)
9199     VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
9200                             : UnitSize == 8 ? &ARM::DPRRegClass
9201                                             : nullptr;
9202 
9203   unsigned BytesLeft = SizeVal % UnitSize;
9204   unsigned LoopSize = SizeVal - BytesLeft;
9205 
9206   if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
9207     // Use LDR and STR to copy.
9208     // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
9209     // [destOut] = STR_POST(scratch, destIn, UnitSize)
9210     unsigned srcIn = src;
9211     unsigned destIn = dest;
9212     for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
9213       unsigned srcOut = MRI.createVirtualRegister(TRC);
9214       unsigned destOut = MRI.createVirtualRegister(TRC);
9215       unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
9216       emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
9217                  IsThumb1, IsThumb2);
9218       emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
9219                  IsThumb1, IsThumb2);
9220       srcIn = srcOut;
9221       destIn = destOut;
9222     }
9223 
9224     // Handle the leftover bytes with LDRB and STRB.
9225     // [scratch, srcOut] = LDRB_POST(srcIn, 1)
9226     // [destOut] = STRB_POST(scratch, destIn, 1)
9227     for (unsigned i = 0; i < BytesLeft; i++) {
9228       unsigned srcOut = MRI.createVirtualRegister(TRC);
9229       unsigned destOut = MRI.createVirtualRegister(TRC);
9230       unsigned scratch = MRI.createVirtualRegister(TRC);
9231       emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
9232                  IsThumb1, IsThumb2);
9233       emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
9234                  IsThumb1, IsThumb2);
9235       srcIn = srcOut;
9236       destIn = destOut;
9237     }
9238     MI.eraseFromParent(); // The instruction is gone now.
9239     return BB;
9240   }
9241 
9242   // Expand the pseudo op to a loop.
9243   // thisMBB:
9244   //   ...
9245   //   movw varEnd, # --> with thumb2
9246   //   movt varEnd, #
9247   //   ldrcp varEnd, idx --> without thumb2
9248   //   fallthrough --> loopMBB
9249   // loopMBB:
9250   //   PHI varPhi, varEnd, varLoop
9251   //   PHI srcPhi, src, srcLoop
9252   //   PHI destPhi, dst, destLoop
9253   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
9254   //   [destLoop] = STR_POST(scratch, destPhi, UnitSize)
9255   //   subs varLoop, varPhi, #UnitSize
9256   //   bne loopMBB
9257   //   fallthrough --> exitMBB
9258   // exitMBB:
9259   //   epilogue to handle left-over bytes
9260   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
9261   //   [destOut] = STRB_POST(scratch, destLoop, 1)
9262   MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
9263   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
9264   MF->insert(It, loopMBB);
9265   MF->insert(It, exitMBB);
9266 
9267   // Transfer the remainder of BB and its successor edges to exitMBB.
9268   exitMBB->splice(exitMBB->begin(), BB,
9269                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9270   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
9271 
9272   // Load an immediate to varEnd.
9273   unsigned varEnd = MRI.createVirtualRegister(TRC);
9274   if (Subtarget->useMovt()) {
9275     unsigned Vtmp = varEnd;
9276     if ((LoopSize & 0xFFFF0000) != 0)
9277       Vtmp = MRI.createVirtualRegister(TRC);
9278     BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
9279         .addImm(LoopSize & 0xFFFF)
9280         .add(predOps(ARMCC::AL));
9281 
9282     if ((LoopSize & 0xFFFF0000) != 0)
9283       BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
9284           .addReg(Vtmp)
9285           .addImm(LoopSize >> 16)
9286           .add(predOps(ARMCC::AL));
9287   } else {
9288     MachineConstantPool *ConstantPool = MF->getConstantPool();
9289     Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
9290     const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
9291 
9292     // MachineConstantPool wants an explicit alignment.
9293     unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
9294     if (Align == 0)
9295       Align = MF->getDataLayout().getTypeAllocSize(C->getType());
9296     unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
9297     MachineMemOperand *CPMMO =
9298         MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
9299                                  MachineMemOperand::MOLoad, 4, 4);
9300 
9301     if (IsThumb)
9302       BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci))
9303           .addReg(varEnd, RegState::Define)
9304           .addConstantPoolIndex(Idx)
9305           .add(predOps(ARMCC::AL))
9306           .addMemOperand(CPMMO);
9307     else
9308       BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp))
9309           .addReg(varEnd, RegState::Define)
9310           .addConstantPoolIndex(Idx)
9311           .addImm(0)
9312           .add(predOps(ARMCC::AL))
9313           .addMemOperand(CPMMO);
9314   }
9315   BB->addSuccessor(loopMBB);
9316 
9317   // Generate the loop body:
9318   //   varPhi = PHI(varLoop, varEnd)
9319   //   srcPhi = PHI(srcLoop, src)
9320   //   destPhi = PHI(destLoop, dst)
9321   MachineBasicBlock *entryBB = BB;
9322   BB = loopMBB;
9323   unsigned varLoop = MRI.createVirtualRegister(TRC);
9324   unsigned varPhi = MRI.createVirtualRegister(TRC);
9325   unsigned srcLoop = MRI.createVirtualRegister(TRC);
9326   unsigned srcPhi = MRI.createVirtualRegister(TRC);
9327   unsigned destLoop = MRI.createVirtualRegister(TRC);
9328   unsigned destPhi = MRI.createVirtualRegister(TRC);
9329 
9330   BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
9331     .addReg(varLoop).addMBB(loopMBB)
9332     .addReg(varEnd).addMBB(entryBB);
9333   BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
9334     .addReg(srcLoop).addMBB(loopMBB)
9335     .addReg(src).addMBB(entryBB);
9336   BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
9337     .addReg(destLoop).addMBB(loopMBB)
9338     .addReg(dest).addMBB(entryBB);
9339 
9340   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
9341   //   [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
9342   unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
9343   emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
9344              IsThumb1, IsThumb2);
9345   emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
9346              IsThumb1, IsThumb2);
9347 
9348   // Decrement loop variable by UnitSize.
9349   if (IsThumb1) {
9350     BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop)
9351         .add(t1CondCodeOp())
9352         .addReg(varPhi)
9353         .addImm(UnitSize)
9354         .add(predOps(ARMCC::AL));
9355   } else {
9356     MachineInstrBuilder MIB =
9357         BuildMI(*BB, BB->end(), dl,
9358                 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
9359     MIB.addReg(varPhi)
9360         .addImm(UnitSize)
9361         .add(predOps(ARMCC::AL))
9362         .add(condCodeOp());
9363     MIB->getOperand(5).setReg(ARM::CPSR);
9364     MIB->getOperand(5).setIsDef(true);
9365   }
9366   BuildMI(*BB, BB->end(), dl,
9367           TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
9368       .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
9369 
9370   // loopMBB can loop back to loopMBB or fall through to exitMBB.
9371   BB->addSuccessor(loopMBB);
9372   BB->addSuccessor(exitMBB);
9373 
9374   // Add epilogue to handle BytesLeft.
9375   BB = exitMBB;
9376   auto StartOfExit = exitMBB->begin();
9377 
9378   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
9379   //   [destOut] = STRB_POST(scratch, destLoop, 1)
9380   unsigned srcIn = srcLoop;
9381   unsigned destIn = destLoop;
9382   for (unsigned i = 0; i < BytesLeft; i++) {
9383     unsigned srcOut = MRI.createVirtualRegister(TRC);
9384     unsigned destOut = MRI.createVirtualRegister(TRC);
9385     unsigned scratch = MRI.createVirtualRegister(TRC);
9386     emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
9387                IsThumb1, IsThumb2);
9388     emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
9389                IsThumb1, IsThumb2);
9390     srcIn = srcOut;
9391     destIn = destOut;
9392   }
9393 
9394   MI.eraseFromParent(); // The instruction is gone now.
9395   return BB;
9396 }
9397 
9398 MachineBasicBlock *
9399 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
9400                                        MachineBasicBlock *MBB) const {
9401   const TargetMachine &TM = getTargetMachine();
9402   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
9403   DebugLoc DL = MI.getDebugLoc();
9404 
9405   assert(Subtarget->isTargetWindows() &&
9406          "__chkstk is only supported on Windows");
9407   assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
9408 
9409   // __chkstk takes the number of words to allocate on the stack in R4, and
9410   // returns the stack adjustment in number of bytes in R4.  This will not
9411   // clober any other registers (other than the obvious lr).
9412   //
9413   // Although, technically, IP should be considered a register which may be
9414   // clobbered, the call itself will not touch it.  Windows on ARM is a pure
9415   // thumb-2 environment, so there is no interworking required.  As a result, we
9416   // do not expect a veneer to be emitted by the linker, clobbering IP.
9417   //
9418   // Each module receives its own copy of __chkstk, so no import thunk is
9419   // required, again, ensuring that IP is not clobbered.
9420   //
9421   // Finally, although some linkers may theoretically provide a trampoline for
9422   // out of range calls (which is quite common due to a 32M range limitation of
9423   // branches for Thumb), we can generate the long-call version via
9424   // -mcmodel=large, alleviating the need for the trampoline which may clobber
9425   // IP.
9426 
9427   switch (TM.getCodeModel()) {
9428   case CodeModel::Tiny:
9429     llvm_unreachable("Tiny code model not available on ARM.");
9430   case CodeModel::Small:
9431   case CodeModel::Medium:
9432   case CodeModel::Kernel:
9433     BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
9434         .add(predOps(ARMCC::AL))
9435         .addExternalSymbol("__chkstk")
9436         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
9437         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
9438         .addReg(ARM::R12,
9439                 RegState::Implicit | RegState::Define | RegState::Dead)
9440         .addReg(ARM::CPSR,
9441                 RegState::Implicit | RegState::Define | RegState::Dead);
9442     break;
9443   case CodeModel::Large: {
9444     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
9445     unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
9446 
9447     BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
9448       .addExternalSymbol("__chkstk");
9449     BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
9450         .add(predOps(ARMCC::AL))
9451         .addReg(Reg, RegState::Kill)
9452         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
9453         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
9454         .addReg(ARM::R12,
9455                 RegState::Implicit | RegState::Define | RegState::Dead)
9456         .addReg(ARM::CPSR,
9457                 RegState::Implicit | RegState::Define | RegState::Dead);
9458     break;
9459   }
9460   }
9461 
9462   BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
9463       .addReg(ARM::SP, RegState::Kill)
9464       .addReg(ARM::R4, RegState::Kill)
9465       .setMIFlags(MachineInstr::FrameSetup)
9466       .add(predOps(ARMCC::AL))
9467       .add(condCodeOp());
9468 
9469   MI.eraseFromParent();
9470   return MBB;
9471 }
9472 
9473 MachineBasicBlock *
9474 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
9475                                        MachineBasicBlock *MBB) const {
9476   DebugLoc DL = MI.getDebugLoc();
9477   MachineFunction *MF = MBB->getParent();
9478   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
9479 
9480   MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
9481   MF->insert(++MBB->getIterator(), ContBB);
9482   ContBB->splice(ContBB->begin(), MBB,
9483                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
9484   ContBB->transferSuccessorsAndUpdatePHIs(MBB);
9485   MBB->addSuccessor(ContBB);
9486 
9487   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
9488   BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
9489   MF->push_back(TrapBB);
9490   MBB->addSuccessor(TrapBB);
9491 
9492   BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
9493       .addReg(MI.getOperand(0).getReg())
9494       .addImm(0)
9495       .add(predOps(ARMCC::AL));
9496   BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
9497       .addMBB(TrapBB)
9498       .addImm(ARMCC::EQ)
9499       .addReg(ARM::CPSR);
9500 
9501   MI.eraseFromParent();
9502   return ContBB;
9503 }
9504 
9505 // The CPSR operand of SelectItr might be missing a kill marker
9506 // because there were multiple uses of CPSR, and ISel didn't know
9507 // which to mark. Figure out whether SelectItr should have had a
9508 // kill marker, and set it if it should. Returns the correct kill
9509 // marker value.
9510 static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,
9511                                    MachineBasicBlock* BB,
9512                                    const TargetRegisterInfo* TRI) {
9513   // Scan forward through BB for a use/def of CPSR.
9514   MachineBasicBlock::iterator miI(std::next(SelectItr));
9515   for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
9516     const MachineInstr& mi = *miI;
9517     if (mi.readsRegister(ARM::CPSR))
9518       return false;
9519     if (mi.definesRegister(ARM::CPSR))
9520       break; // Should have kill-flag - update below.
9521   }
9522 
9523   // If we hit the end of the block, check whether CPSR is live into a
9524   // successor.
9525   if (miI == BB->end()) {
9526     for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
9527                                           sEnd = BB->succ_end();
9528          sItr != sEnd; ++sItr) {
9529       MachineBasicBlock* succ = *sItr;
9530       if (succ->isLiveIn(ARM::CPSR))
9531         return false;
9532     }
9533   }
9534 
9535   // We found a def, or hit the end of the basic block and CPSR wasn't live
9536   // out. SelectMI should have a kill flag on CPSR.
9537   SelectItr->addRegisterKilled(ARM::CPSR, TRI);
9538   return true;
9539 }
9540 
9541 MachineBasicBlock *
9542 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9543                                                MachineBasicBlock *BB) const {
9544   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
9545   DebugLoc dl = MI.getDebugLoc();
9546   bool isThumb2 = Subtarget->isThumb2();
9547   switch (MI.getOpcode()) {
9548   default: {
9549     MI.print(errs());
9550     llvm_unreachable("Unexpected instr type to insert");
9551   }
9552 
9553   // Thumb1 post-indexed loads are really just single-register LDMs.
9554   case ARM::tLDR_postidx: {
9555     MachineOperand Def(MI.getOperand(1));
9556     BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
9557         .add(Def)  // Rn_wb
9558         .add(MI.getOperand(2))  // Rn
9559         .add(MI.getOperand(3))  // PredImm
9560         .add(MI.getOperand(4))  // PredReg
9561         .add(MI.getOperand(0))  // Rt
9562         .cloneMemRefs(MI);
9563     MI.eraseFromParent();
9564     return BB;
9565   }
9566 
9567   // The Thumb2 pre-indexed stores have the same MI operands, they just
9568   // define them differently in the .td files from the isel patterns, so
9569   // they need pseudos.
9570   case ARM::t2STR_preidx:
9571     MI.setDesc(TII->get(ARM::t2STR_PRE));
9572     return BB;
9573   case ARM::t2STRB_preidx:
9574     MI.setDesc(TII->get(ARM::t2STRB_PRE));
9575     return BB;
9576   case ARM::t2STRH_preidx:
9577     MI.setDesc(TII->get(ARM::t2STRH_PRE));
9578     return BB;
9579 
9580   case ARM::STRi_preidx:
9581   case ARM::STRBi_preidx: {
9582     unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
9583                                                          : ARM::STRB_PRE_IMM;
9584     // Decode the offset.
9585     unsigned Offset = MI.getOperand(4).getImm();
9586     bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
9587     Offset = ARM_AM::getAM2Offset(Offset);
9588     if (isSub)
9589       Offset = -Offset;
9590 
9591     MachineMemOperand *MMO = *MI.memoperands_begin();
9592     BuildMI(*BB, MI, dl, TII->get(NewOpc))
9593         .add(MI.getOperand(0)) // Rn_wb
9594         .add(MI.getOperand(1)) // Rt
9595         .add(MI.getOperand(2)) // Rn
9596         .addImm(Offset)        // offset (skip GPR==zero_reg)
9597         .add(MI.getOperand(5)) // pred
9598         .add(MI.getOperand(6))
9599         .addMemOperand(MMO);
9600     MI.eraseFromParent();
9601     return BB;
9602   }
9603   case ARM::STRr_preidx:
9604   case ARM::STRBr_preidx:
9605   case ARM::STRH_preidx: {
9606     unsigned NewOpc;
9607     switch (MI.getOpcode()) {
9608     default: llvm_unreachable("unexpected opcode!");
9609     case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
9610     case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
9611     case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
9612     }
9613     MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
9614     for (unsigned i = 0; i < MI.getNumOperands(); ++i)
9615       MIB.add(MI.getOperand(i));
9616     MI.eraseFromParent();
9617     return BB;
9618   }
9619 
9620   case ARM::tMOVCCr_pseudo: {
9621     // To "insert" a SELECT_CC instruction, we actually have to insert the
9622     // diamond control-flow pattern.  The incoming instruction knows the
9623     // destination vreg to set, the condition code register to branch on, the
9624     // true/false values to select between, and a branch opcode to use.
9625     const BasicBlock *LLVM_BB = BB->getBasicBlock();
9626     MachineFunction::iterator It = ++BB->getIterator();
9627 
9628     //  thisMBB:
9629     //  ...
9630     //   TrueVal = ...
9631     //   cmpTY ccX, r1, r2
9632     //   bCC copy1MBB
9633     //   fallthrough --> copy0MBB
9634     MachineBasicBlock *thisMBB  = BB;
9635     MachineFunction *F = BB->getParent();
9636     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
9637     MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
9638     F->insert(It, copy0MBB);
9639     F->insert(It, sinkMBB);
9640 
9641     // Check whether CPSR is live past the tMOVCCr_pseudo.
9642     const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
9643     if (!MI.killsRegister(ARM::CPSR) &&
9644         !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) {
9645       copy0MBB->addLiveIn(ARM::CPSR);
9646       sinkMBB->addLiveIn(ARM::CPSR);
9647     }
9648 
9649     // Transfer the remainder of BB and its successor edges to sinkMBB.
9650     sinkMBB->splice(sinkMBB->begin(), BB,
9651                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
9652     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
9653 
9654     BB->addSuccessor(copy0MBB);
9655     BB->addSuccessor(sinkMBB);
9656 
9657     BuildMI(BB, dl, TII->get(ARM::tBcc))
9658         .addMBB(sinkMBB)
9659         .addImm(MI.getOperand(3).getImm())
9660         .addReg(MI.getOperand(4).getReg());
9661 
9662     //  copy0MBB:
9663     //   %FalseValue = ...
9664     //   # fallthrough to sinkMBB
9665     BB = copy0MBB;
9666 
9667     // Update machine-CFG edges
9668     BB->addSuccessor(sinkMBB);
9669 
9670     //  sinkMBB:
9671     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
9672     //  ...
9673     BB = sinkMBB;
9674     BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
9675         .addReg(MI.getOperand(1).getReg())
9676         .addMBB(copy0MBB)
9677         .addReg(MI.getOperand(2).getReg())
9678         .addMBB(thisMBB);
9679 
9680     MI.eraseFromParent(); // The pseudo instruction is gone now.
9681     return BB;
9682   }
9683 
9684   case ARM::BCCi64:
9685   case ARM::BCCZi64: {
9686     // If there is an unconditional branch to the other successor, remove it.
9687     BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
9688 
9689     // Compare both parts that make up the double comparison separately for
9690     // equality.
9691     bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
9692 
9693     unsigned LHS1 = MI.getOperand(1).getReg();
9694     unsigned LHS2 = MI.getOperand(2).getReg();
9695     if (RHSisZero) {
9696       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9697           .addReg(LHS1)
9698           .addImm(0)
9699           .add(predOps(ARMCC::AL));
9700       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9701         .addReg(LHS2).addImm(0)
9702         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
9703     } else {
9704       unsigned RHS1 = MI.getOperand(3).getReg();
9705       unsigned RHS2 = MI.getOperand(4).getReg();
9706       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
9707           .addReg(LHS1)
9708           .addReg(RHS1)
9709           .add(predOps(ARMCC::AL));
9710       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
9711         .addReg(LHS2).addReg(RHS2)
9712         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
9713     }
9714 
9715     MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
9716     MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
9717     if (MI.getOperand(0).getImm() == ARMCC::NE)
9718       std::swap(destMBB, exitMBB);
9719 
9720     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
9721       .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
9722     if (isThumb2)
9723       BuildMI(BB, dl, TII->get(ARM::t2B))
9724           .addMBB(exitMBB)
9725           .add(predOps(ARMCC::AL));
9726     else
9727       BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
9728 
9729     MI.eraseFromParent(); // The pseudo instruction is gone now.
9730     return BB;
9731   }
9732 
9733   case ARM::Int_eh_sjlj_setjmp:
9734   case ARM::Int_eh_sjlj_setjmp_nofp:
9735   case ARM::tInt_eh_sjlj_setjmp:
9736   case ARM::t2Int_eh_sjlj_setjmp:
9737   case ARM::t2Int_eh_sjlj_setjmp_nofp:
9738     return BB;
9739 
9740   case ARM::Int_eh_sjlj_setup_dispatch:
9741     EmitSjLjDispatchBlock(MI, BB);
9742     return BB;
9743 
9744   case ARM::ABS:
9745   case ARM::t2ABS: {
9746     // To insert an ABS instruction, we have to insert the
9747     // diamond control-flow pattern.  The incoming instruction knows the
9748     // source vreg to test against 0, the destination vreg to set,
9749     // the condition code register to branch on, the
9750     // true/false values to select between, and a branch opcode to use.
9751     // It transforms
9752     //     V1 = ABS V0
9753     // into
9754     //     V2 = MOVS V0
9755     //     BCC                      (branch to SinkBB if V0 >= 0)
9756     //     RSBBB: V3 = RSBri V2, 0  (compute ABS if V2 < 0)
9757     //     SinkBB: V1 = PHI(V2, V3)
9758     const BasicBlock *LLVM_BB = BB->getBasicBlock();
9759     MachineFunction::iterator BBI = ++BB->getIterator();
9760     MachineFunction *Fn = BB->getParent();
9761     MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
9762     MachineBasicBlock *SinkBB  = Fn->CreateMachineBasicBlock(LLVM_BB);
9763     Fn->insert(BBI, RSBBB);
9764     Fn->insert(BBI, SinkBB);
9765 
9766     unsigned int ABSSrcReg = MI.getOperand(1).getReg();
9767     unsigned int ABSDstReg = MI.getOperand(0).getReg();
9768     bool ABSSrcKIll = MI.getOperand(1).isKill();
9769     bool isThumb2 = Subtarget->isThumb2();
9770     MachineRegisterInfo &MRI = Fn->getRegInfo();
9771     // In Thumb mode S must not be specified if source register is the SP or
9772     // PC and if destination register is the SP, so restrict register class
9773     unsigned NewRsbDstReg =
9774       MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
9775 
9776     // Transfer the remainder of BB and its successor edges to sinkMBB.
9777     SinkBB->splice(SinkBB->begin(), BB,
9778                    std::next(MachineBasicBlock::iterator(MI)), BB->end());
9779     SinkBB->transferSuccessorsAndUpdatePHIs(BB);
9780 
9781     BB->addSuccessor(RSBBB);
9782     BB->addSuccessor(SinkBB);
9783 
9784     // fall through to SinkMBB
9785     RSBBB->addSuccessor(SinkBB);
9786 
9787     // insert a cmp at the end of BB
9788     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9789         .addReg(ABSSrcReg)
9790         .addImm(0)
9791         .add(predOps(ARMCC::AL));
9792 
9793     // insert a bcc with opposite CC to ARMCC::MI at the end of BB
9794     BuildMI(BB, dl,
9795       TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
9796       .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
9797 
9798     // insert rsbri in RSBBB
9799     // Note: BCC and rsbri will be converted into predicated rsbmi
9800     // by if-conversion pass
9801     BuildMI(*RSBBB, RSBBB->begin(), dl,
9802             TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
9803         .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
9804         .addImm(0)
9805         .add(predOps(ARMCC::AL))
9806         .add(condCodeOp());
9807 
9808     // insert PHI in SinkBB,
9809     // reuse ABSDstReg to not change uses of ABS instruction
9810     BuildMI(*SinkBB, SinkBB->begin(), dl,
9811       TII->get(ARM::PHI), ABSDstReg)
9812       .addReg(NewRsbDstReg).addMBB(RSBBB)
9813       .addReg(ABSSrcReg).addMBB(BB);
9814 
9815     // remove ABS instruction
9816     MI.eraseFromParent();
9817 
9818     // return last added BB
9819     return SinkBB;
9820   }
9821   case ARM::COPY_STRUCT_BYVAL_I32:
9822     ++NumLoopByVals;
9823     return EmitStructByval(MI, BB);
9824   case ARM::WIN__CHKSTK:
9825     return EmitLowered__chkstk(MI, BB);
9826   case ARM::WIN__DBZCHK:
9827     return EmitLowered__dbzchk(MI, BB);
9828   }
9829 }
9830 
9831 /// Attaches vregs to MEMCPY that it will use as scratch registers
9832 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
9833 /// instead of as a custom inserter because we need the use list from the SDNode.
9834 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
9835                                     MachineInstr &MI, const SDNode *Node) {
9836   bool isThumb1 = Subtarget->isThumb1Only();
9837 
9838   DebugLoc DL = MI.getDebugLoc();
9839   MachineFunction *MF = MI.getParent()->getParent();
9840   MachineRegisterInfo &MRI = MF->getRegInfo();
9841   MachineInstrBuilder MIB(*MF, MI);
9842 
9843   // If the new dst/src is unused mark it as dead.
9844   if (!Node->hasAnyUseOfValue(0)) {
9845     MI.getOperand(0).setIsDead(true);
9846   }
9847   if (!Node->hasAnyUseOfValue(1)) {
9848     MI.getOperand(1).setIsDead(true);
9849   }
9850 
9851   // The MEMCPY both defines and kills the scratch registers.
9852   for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
9853     unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
9854                                                          : &ARM::GPRRegClass);
9855     MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
9856   }
9857 }
9858 
9859 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9860                                                       SDNode *Node) const {
9861   if (MI.getOpcode() == ARM::MEMCPY) {
9862     attachMEMCPYScratchRegs(Subtarget, MI, Node);
9863     return;
9864   }
9865 
9866   const MCInstrDesc *MCID = &MI.getDesc();
9867   // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
9868   // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
9869   // operand is still set to noreg. If needed, set the optional operand's
9870   // register to CPSR, and remove the redundant implicit def.
9871   //
9872   // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR).
9873 
9874   // Rename pseudo opcodes.
9875   unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
9876   unsigned ccOutIdx;
9877   if (NewOpc) {
9878     const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
9879     MCID = &TII->get(NewOpc);
9880 
9881     assert(MCID->getNumOperands() ==
9882            MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()
9883         && "converted opcode should be the same except for cc_out"
9884            " (and, on Thumb1, pred)");
9885 
9886     MI.setDesc(*MCID);
9887 
9888     // Add the optional cc_out operand
9889     MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
9890 
9891     // On Thumb1, move all input operands to the end, then add the predicate
9892     if (Subtarget->isThumb1Only()) {
9893       for (unsigned c = MCID->getNumOperands() - 4; c--;) {
9894         MI.addOperand(MI.getOperand(1));
9895         MI.RemoveOperand(1);
9896       }
9897 
9898       // Restore the ties
9899       for (unsigned i = MI.getNumOperands(); i--;) {
9900         const MachineOperand& op = MI.getOperand(i);
9901         if (op.isReg() && op.isUse()) {
9902           int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO);
9903           if (DefIdx != -1)
9904             MI.tieOperands(DefIdx, i);
9905         }
9906       }
9907 
9908       MI.addOperand(MachineOperand::CreateImm(ARMCC::AL));
9909       MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
9910       ccOutIdx = 1;
9911     } else
9912       ccOutIdx = MCID->getNumOperands() - 1;
9913   } else
9914     ccOutIdx = MCID->getNumOperands() - 1;
9915 
9916   // Any ARM instruction that sets the 's' bit should specify an optional
9917   // "cc_out" operand in the last operand position.
9918   if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
9919     assert(!NewOpc && "Optional cc_out operand required");
9920     return;
9921   }
9922   // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
9923   // since we already have an optional CPSR def.
9924   bool definesCPSR = false;
9925   bool deadCPSR = false;
9926   for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
9927        ++i) {
9928     const MachineOperand &MO = MI.getOperand(i);
9929     if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
9930       definesCPSR = true;
9931       if (MO.isDead())
9932         deadCPSR = true;
9933       MI.RemoveOperand(i);
9934       break;
9935     }
9936   }
9937   if (!definesCPSR) {
9938     assert(!NewOpc && "Optional cc_out operand required");
9939     return;
9940   }
9941   assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
9942   if (deadCPSR) {
9943     assert(!MI.getOperand(ccOutIdx).getReg() &&
9944            "expect uninitialized optional cc_out operand");
9945     // Thumb1 instructions must have the S bit even if the CPSR is dead.
9946     if (!Subtarget->isThumb1Only())
9947       return;
9948   }
9949 
9950   // If this instruction was defined with an optional CPSR def and its dag node
9951   // had a live implicit CPSR def, then activate the optional CPSR def.
9952   MachineOperand &MO = MI.getOperand(ccOutIdx);
9953   MO.setReg(ARM::CPSR);
9954   MO.setIsDef(true);
9955 }
9956 
9957 //===----------------------------------------------------------------------===//
9958 //                           ARM Optimization Hooks
9959 //===----------------------------------------------------------------------===//
9960 
9961 // Helper function that checks if N is a null or all ones constant.
9962 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
9963   return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
9964 }
9965 
9966 // Return true if N is conditionally 0 or all ones.
9967 // Detects these expressions where cc is an i1 value:
9968 //
9969 //   (select cc 0, y)   [AllOnes=0]
9970 //   (select cc y, 0)   [AllOnes=0]
9971 //   (zext cc)          [AllOnes=0]
9972 //   (sext cc)          [AllOnes=0/1]
9973 //   (select cc -1, y)  [AllOnes=1]
9974 //   (select cc y, -1)  [AllOnes=1]
9975 //
9976 // Invert is set when N is the null/all ones constant when CC is false.
9977 // OtherOp is set to the alternative value of N.
9978 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
9979                                        SDValue &CC, bool &Invert,
9980                                        SDValue &OtherOp,
9981                                        SelectionDAG &DAG) {
9982   switch (N->getOpcode()) {
9983   default: return false;
9984   case ISD::SELECT: {
9985     CC = N->getOperand(0);
9986     SDValue N1 = N->getOperand(1);
9987     SDValue N2 = N->getOperand(2);
9988     if (isZeroOrAllOnes(N1, AllOnes)) {
9989       Invert = false;
9990       OtherOp = N2;
9991       return true;
9992     }
9993     if (isZeroOrAllOnes(N2, AllOnes)) {
9994       Invert = true;
9995       OtherOp = N1;
9996       return true;
9997     }
9998     return false;
9999   }
10000   case ISD::ZERO_EXTEND:
10001     // (zext cc) can never be the all ones value.
10002     if (AllOnes)
10003       return false;
10004     LLVM_FALLTHROUGH;
10005   case ISD::SIGN_EXTEND: {
10006     SDLoc dl(N);
10007     EVT VT = N->getValueType(0);
10008     CC = N->getOperand(0);
10009     if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC)
10010       return false;
10011     Invert = !AllOnes;
10012     if (AllOnes)
10013       // When looking for an AllOnes constant, N is an sext, and the 'other'
10014       // value is 0.
10015       OtherOp = DAG.getConstant(0, dl, VT);
10016     else if (N->getOpcode() == ISD::ZERO_EXTEND)
10017       // When looking for a 0 constant, N can be zext or sext.
10018       OtherOp = DAG.getConstant(1, dl, VT);
10019     else
10020       OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
10021                                 VT);
10022     return true;
10023   }
10024   }
10025 }
10026 
10027 // Combine a constant select operand into its use:
10028 //
10029 //   (add (select cc, 0, c), x)  -> (select cc, x, (add, x, c))
10030 //   (sub x, (select cc, 0, c))  -> (select cc, x, (sub, x, c))
10031 //   (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))  [AllOnes=1]
10032 //   (or  (select cc, 0, c), x)  -> (select cc, x, (or, x, c))
10033 //   (xor (select cc, 0, c), x)  -> (select cc, x, (xor, x, c))
10034 //
10035 // The transform is rejected if the select doesn't have a constant operand that
10036 // is null, or all ones when AllOnes is set.
10037 //
10038 // Also recognize sext/zext from i1:
10039 //
10040 //   (add (zext cc), x) -> (select cc (add x, 1), x)
10041 //   (add (sext cc), x) -> (select cc (add x, -1), x)
10042 //
10043 // These transformations eventually create predicated instructions.
10044 //
10045 // @param N       The node to transform.
10046 // @param Slct    The N operand that is a select.
10047 // @param OtherOp The other N operand (x above).
10048 // @param DCI     Context.
10049 // @param AllOnes Require the select constant to be all ones instead of null.
10050 // @returns The new node, or SDValue() on failure.
10051 static
10052 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
10053                             TargetLowering::DAGCombinerInfo &DCI,
10054                             bool AllOnes = false) {
10055   SelectionDAG &DAG = DCI.DAG;
10056   EVT VT = N->getValueType(0);
10057   SDValue NonConstantVal;
10058   SDValue CCOp;
10059   bool SwapSelectOps;
10060   if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
10061                                   NonConstantVal, DAG))
10062     return SDValue();
10063 
10064   // Slct is now know to be the desired identity constant when CC is true.
10065   SDValue TrueVal = OtherOp;
10066   SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
10067                                  OtherOp, NonConstantVal);
10068   // Unless SwapSelectOps says CC should be false.
10069   if (SwapSelectOps)
10070     std::swap(TrueVal, FalseVal);
10071 
10072   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
10073                      CCOp, TrueVal, FalseVal);
10074 }
10075 
10076 // Attempt combineSelectAndUse on each operand of a commutative operator N.
10077 static
10078 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
10079                                        TargetLowering::DAGCombinerInfo &DCI) {
10080   SDValue N0 = N->getOperand(0);
10081   SDValue N1 = N->getOperand(1);
10082   if (N0.getNode()->hasOneUse())
10083     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
10084       return Result;
10085   if (N1.getNode()->hasOneUse())
10086     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
10087       return Result;
10088   return SDValue();
10089 }
10090 
10091 static bool IsVUZPShuffleNode(SDNode *N) {
10092   // VUZP shuffle node.
10093   if (N->getOpcode() == ARMISD::VUZP)
10094     return true;
10095 
10096   // "VUZP" on i32 is an alias for VTRN.
10097   if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
10098     return true;
10099 
10100   return false;
10101 }
10102 
10103 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
10104                                  TargetLowering::DAGCombinerInfo &DCI,
10105                                  const ARMSubtarget *Subtarget) {
10106   // Look for ADD(VUZP.0, VUZP.1).
10107   if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
10108       N0 == N1)
10109    return SDValue();
10110 
10111   // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
10112   if (!N->getValueType(0).is64BitVector())
10113     return SDValue();
10114 
10115   // Generate vpadd.
10116   SelectionDAG &DAG = DCI.DAG;
10117   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10118   SDLoc dl(N);
10119   SDNode *Unzip = N0.getNode();
10120   EVT VT = N->getValueType(0);
10121 
10122   SmallVector<SDValue, 8> Ops;
10123   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
10124                                 TLI.getPointerTy(DAG.getDataLayout())));
10125   Ops.push_back(Unzip->getOperand(0));
10126   Ops.push_back(Unzip->getOperand(1));
10127 
10128   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
10129 }
10130 
10131 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
10132                                       TargetLowering::DAGCombinerInfo &DCI,
10133                                       const ARMSubtarget *Subtarget) {
10134   // Check for two extended operands.
10135   if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
10136         N1.getOpcode() == ISD::SIGN_EXTEND) &&
10137       !(N0.getOpcode() == ISD::ZERO_EXTEND &&
10138         N1.getOpcode() == ISD::ZERO_EXTEND))
10139     return SDValue();
10140 
10141   SDValue N00 = N0.getOperand(0);
10142   SDValue N10 = N1.getOperand(0);
10143 
10144   // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
10145   if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
10146       N00 == N10)
10147     return SDValue();
10148 
10149   // We only recognize Q register paddl here; this can't be reached until
10150   // after type legalization.
10151   if (!N00.getValueType().is64BitVector() ||
10152       !N0.getValueType().is128BitVector())
10153     return SDValue();
10154 
10155   // Generate vpaddl.
10156   SelectionDAG &DAG = DCI.DAG;
10157   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10158   SDLoc dl(N);
10159   EVT VT = N->getValueType(0);
10160 
10161   SmallVector<SDValue, 8> Ops;
10162   // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
10163   unsigned Opcode;
10164   if (N0.getOpcode() == ISD::SIGN_EXTEND)
10165     Opcode = Intrinsic::arm_neon_vpaddls;
10166   else
10167     Opcode = Intrinsic::arm_neon_vpaddlu;
10168   Ops.push_back(DAG.getConstant(Opcode, dl,
10169                                 TLI.getPointerTy(DAG.getDataLayout())));
10170   EVT ElemTy = N00.getValueType().getVectorElementType();
10171   unsigned NumElts = VT.getVectorNumElements();
10172   EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
10173   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
10174                                N00.getOperand(0), N00.getOperand(1));
10175   Ops.push_back(Concat);
10176 
10177   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
10178 }
10179 
10180 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
10181 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
10182 // much easier to match.
10183 static SDValue
10184 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
10185                                TargetLowering::DAGCombinerInfo &DCI,
10186                                const ARMSubtarget *Subtarget) {
10187   // Only perform optimization if after legalize, and if NEON is available. We
10188   // also expected both operands to be BUILD_VECTORs.
10189   if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
10190       || N0.getOpcode() != ISD::BUILD_VECTOR
10191       || N1.getOpcode() != ISD::BUILD_VECTOR)
10192     return SDValue();
10193 
10194   // Check output type since VPADDL operand elements can only be 8, 16, or 32.
10195   EVT VT = N->getValueType(0);
10196   if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
10197     return SDValue();
10198 
10199   // Check that the vector operands are of the right form.
10200   // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
10201   // operands, where N is the size of the formed vector.
10202   // Each EXTRACT_VECTOR should have the same input vector and odd or even
10203   // index such that we have a pair wise add pattern.
10204 
10205   // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
10206   if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
10207     return SDValue();
10208   SDValue Vec = N0->getOperand(0)->getOperand(0);
10209   SDNode *V = Vec.getNode();
10210   unsigned nextIndex = 0;
10211 
10212   // For each operands to the ADD which are BUILD_VECTORs,
10213   // check to see if each of their operands are an EXTRACT_VECTOR with
10214   // the same vector and appropriate index.
10215   for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
10216     if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
10217         && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
10218 
10219       SDValue ExtVec0 = N0->getOperand(i);
10220       SDValue ExtVec1 = N1->getOperand(i);
10221 
10222       // First operand is the vector, verify its the same.
10223       if (V != ExtVec0->getOperand(0).getNode() ||
10224           V != ExtVec1->getOperand(0).getNode())
10225         return SDValue();
10226 
10227       // Second is the constant, verify its correct.
10228       ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
10229       ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
10230 
10231       // For the constant, we want to see all the even or all the odd.
10232       if (!C0 || !C1 || C0->getZExtValue() != nextIndex
10233           || C1->getZExtValue() != nextIndex+1)
10234         return SDValue();
10235 
10236       // Increment index.
10237       nextIndex+=2;
10238     } else
10239       return SDValue();
10240   }
10241 
10242   // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure
10243   // we're using the entire input vector, otherwise there's a size/legality
10244   // mismatch somewhere.
10245   if (nextIndex != Vec.getValueType().getVectorNumElements() ||
10246       Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
10247     return SDValue();
10248 
10249   // Create VPADDL node.
10250   SelectionDAG &DAG = DCI.DAG;
10251   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10252 
10253   SDLoc dl(N);
10254 
10255   // Build operand list.
10256   SmallVector<SDValue, 8> Ops;
10257   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
10258                                 TLI.getPointerTy(DAG.getDataLayout())));
10259 
10260   // Input is the vector.
10261   Ops.push_back(Vec);
10262 
10263   // Get widened type and narrowed type.
10264   MVT widenType;
10265   unsigned numElem = VT.getVectorNumElements();
10266 
10267   EVT inputLaneType = Vec.getValueType().getVectorElementType();
10268   switch (inputLaneType.getSimpleVT().SimpleTy) {
10269     case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
10270     case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
10271     case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
10272     default:
10273       llvm_unreachable("Invalid vector element type for padd optimization.");
10274   }
10275 
10276   SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
10277   unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
10278   return DAG.getNode(ExtOp, dl, VT, tmp);
10279 }
10280 
10281 static SDValue findMUL_LOHI(SDValue V) {
10282   if (V->getOpcode() == ISD::UMUL_LOHI ||
10283       V->getOpcode() == ISD::SMUL_LOHI)
10284     return V;
10285   return SDValue();
10286 }
10287 
10288 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
10289                                         TargetLowering::DAGCombinerInfo &DCI,
10290                                         const ARMSubtarget *Subtarget) {
10291   if (Subtarget->isThumb()) {
10292     if (!Subtarget->hasDSP())
10293       return SDValue();
10294   } else if (!Subtarget->hasV5TEOps())
10295     return SDValue();
10296 
10297   // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and
10298   // accumulates the product into a 64-bit value. The 16-bit values will
10299   // be sign extended somehow or SRA'd into 32-bit values
10300   // (addc (adde (mul 16bit, 16bit), lo), hi)
10301   SDValue Mul = AddcNode->getOperand(0);
10302   SDValue Lo = AddcNode->getOperand(1);
10303   if (Mul.getOpcode() != ISD::MUL) {
10304     Lo = AddcNode->getOperand(0);
10305     Mul = AddcNode->getOperand(1);
10306     if (Mul.getOpcode() != ISD::MUL)
10307       return SDValue();
10308   }
10309 
10310   SDValue SRA = AddeNode->getOperand(0);
10311   SDValue Hi = AddeNode->getOperand(1);
10312   if (SRA.getOpcode() != ISD::SRA) {
10313     SRA = AddeNode->getOperand(1);
10314     Hi = AddeNode->getOperand(0);
10315     if (SRA.getOpcode() != ISD::SRA)
10316       return SDValue();
10317   }
10318   if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) {
10319     if (Const->getZExtValue() != 31)
10320       return SDValue();
10321   } else
10322     return SDValue();
10323 
10324   if (SRA.getOperand(0) != Mul)
10325     return SDValue();
10326 
10327   SelectionDAG &DAG = DCI.DAG;
10328   SDLoc dl(AddcNode);
10329   unsigned Opcode = 0;
10330   SDValue Op0;
10331   SDValue Op1;
10332 
10333   if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) {
10334     Opcode = ARMISD::SMLALBB;
10335     Op0 = Mul.getOperand(0);
10336     Op1 = Mul.getOperand(1);
10337   } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) {
10338     Opcode = ARMISD::SMLALBT;
10339     Op0 = Mul.getOperand(0);
10340     Op1 = Mul.getOperand(1).getOperand(0);
10341   } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) {
10342     Opcode = ARMISD::SMLALTB;
10343     Op0 = Mul.getOperand(0).getOperand(0);
10344     Op1 = Mul.getOperand(1);
10345   } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) {
10346     Opcode = ARMISD::SMLALTT;
10347     Op0 = Mul->getOperand(0).getOperand(0);
10348     Op1 = Mul->getOperand(1).getOperand(0);
10349   }
10350 
10351   if (!Op0 || !Op1)
10352     return SDValue();
10353 
10354   SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
10355                               Op0, Op1, Lo, Hi);
10356   // Replace the ADDs' nodes uses by the MLA node's values.
10357   SDValue HiMLALResult(SMLAL.getNode(), 1);
10358   SDValue LoMLALResult(SMLAL.getNode(), 0);
10359 
10360   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
10361   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
10362 
10363   // Return original node to notify the driver to stop replacing.
10364   SDValue resNode(AddcNode, 0);
10365   return resNode;
10366 }
10367 
10368 static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode,
10369                                      TargetLowering::DAGCombinerInfo &DCI,
10370                                      const ARMSubtarget *Subtarget) {
10371   // Look for multiply add opportunities.
10372   // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
10373   // each add nodes consumes a value from ISD::UMUL_LOHI and there is
10374   // a glue link from the first add to the second add.
10375   // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
10376   // a S/UMLAL instruction.
10377   //                  UMUL_LOHI
10378   //                 / :lo    \ :hi
10379   //                V          \          [no multiline comment]
10380   //    loAdd ->  ADDC         |
10381   //                 \ :carry /
10382   //                  V      V
10383   //                    ADDE   <- hiAdd
10384   //
10385   // In the special case where only the higher part of a signed result is used
10386   // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts
10387   // a constant with the exact value of 0x80000000, we recognize we are dealing
10388   // with a "rounded multiply and add" (or subtract) and transform it into
10389   // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively.
10390 
10391   assert((AddeSubeNode->getOpcode() == ARMISD::ADDE ||
10392           AddeSubeNode->getOpcode() == ARMISD::SUBE) &&
10393          "Expect an ADDE or SUBE");
10394 
10395   assert(AddeSubeNode->getNumOperands() == 3 &&
10396          AddeSubeNode->getOperand(2).getValueType() == MVT::i32 &&
10397          "ADDE node has the wrong inputs");
10398 
10399   // Check that we are chained to the right ADDC or SUBC node.
10400   SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode();
10401   if ((AddeSubeNode->getOpcode() == ARMISD::ADDE &&
10402        AddcSubcNode->getOpcode() != ARMISD::ADDC) ||
10403       (AddeSubeNode->getOpcode() == ARMISD::SUBE &&
10404        AddcSubcNode->getOpcode() != ARMISD::SUBC))
10405     return SDValue();
10406 
10407   SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0);
10408   SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1);
10409 
10410   // Check if the two operands are from the same mul_lohi node.
10411   if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode())
10412     return SDValue();
10413 
10414   assert(AddcSubcNode->getNumValues() == 2 &&
10415          AddcSubcNode->getValueType(0) == MVT::i32 &&
10416          "Expect ADDC with two result values. First: i32");
10417 
10418   // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it
10419   // maybe a SMLAL which multiplies two 16-bit values.
10420   if (AddeSubeNode->getOpcode() == ARMISD::ADDE &&
10421       AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI &&
10422       AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI &&
10423       AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI &&
10424       AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI)
10425     return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget);
10426 
10427   // Check for the triangle shape.
10428   SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0);
10429   SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1);
10430 
10431   // Make sure that the ADDE/SUBE operands are not coming from the same node.
10432   if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode())
10433     return SDValue();
10434 
10435   // Find the MUL_LOHI node walking up ADDE/SUBE's operands.
10436   bool IsLeftOperandMUL = false;
10437   SDValue MULOp = findMUL_LOHI(AddeSubeOp0);
10438   if (MULOp == SDValue())
10439     MULOp = findMUL_LOHI(AddeSubeOp1);
10440   else
10441     IsLeftOperandMUL = true;
10442   if (MULOp == SDValue())
10443     return SDValue();
10444 
10445   // Figure out the right opcode.
10446   unsigned Opc = MULOp->getOpcode();
10447   unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
10448 
10449   // Figure out the high and low input values to the MLAL node.
10450   SDValue *HiAddSub = nullptr;
10451   SDValue *LoMul = nullptr;
10452   SDValue *LowAddSub = nullptr;
10453 
10454   // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI.
10455   if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1)))
10456     return SDValue();
10457 
10458   if (IsLeftOperandMUL)
10459     HiAddSub = &AddeSubeOp1;
10460   else
10461     HiAddSub = &AddeSubeOp0;
10462 
10463   // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node
10464   // whose low result is fed to the ADDC/SUBC we are checking.
10465 
10466   if (AddcSubcOp0 == MULOp.getValue(0)) {
10467     LoMul = &AddcSubcOp0;
10468     LowAddSub = &AddcSubcOp1;
10469   }
10470   if (AddcSubcOp1 == MULOp.getValue(0)) {
10471     LoMul = &AddcSubcOp1;
10472     LowAddSub = &AddcSubcOp0;
10473   }
10474 
10475   if (!LoMul)
10476     return SDValue();
10477 
10478   // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC
10479   // the replacement below will create a cycle.
10480   if (AddcSubcNode == HiAddSub->getNode() ||
10481       AddcSubcNode->isPredecessorOf(HiAddSub->getNode()))
10482     return SDValue();
10483 
10484   // Create the merged node.
10485   SelectionDAG &DAG = DCI.DAG;
10486 
10487   // Start building operand list.
10488   SmallVector<SDValue, 8> Ops;
10489   Ops.push_back(LoMul->getOperand(0));
10490   Ops.push_back(LoMul->getOperand(1));
10491 
10492   // Check whether we can use SMMLAR, SMMLSR or SMMULR instead.  For this to be
10493   // the case, we must be doing signed multiplication and only use the higher
10494   // part of the result of the MLAL, furthermore the LowAddSub must be a constant
10495   // addition or subtraction with the value of 0x800000.
10496   if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() &&
10497       FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) &&
10498       LowAddSub->getNode()->getOpcode() == ISD::Constant &&
10499       static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() ==
10500           0x80000000) {
10501     Ops.push_back(*HiAddSub);
10502     if (AddcSubcNode->getOpcode() == ARMISD::SUBC) {
10503       FinalOpc = ARMISD::SMMLSR;
10504     } else {
10505       FinalOpc = ARMISD::SMMLAR;
10506     }
10507     SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops);
10508     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode);
10509 
10510     return SDValue(AddeSubeNode, 0);
10511   } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC)
10512     // SMMLS is generated during instruction selection and the rest of this
10513     // function can not handle the case where AddcSubcNode is a SUBC.
10514     return SDValue();
10515 
10516   // Finish building the operand list for {U/S}MLAL
10517   Ops.push_back(*LowAddSub);
10518   Ops.push_back(*HiAddSub);
10519 
10520   SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode),
10521                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
10522 
10523   // Replace the ADDs' nodes uses by the MLA node's values.
10524   SDValue HiMLALResult(MLALNode.getNode(), 1);
10525   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult);
10526 
10527   SDValue LoMLALResult(MLALNode.getNode(), 0);
10528   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult);
10529 
10530   // Return original node to notify the driver to stop replacing.
10531   return SDValue(AddeSubeNode, 0);
10532 }
10533 
10534 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
10535                                       TargetLowering::DAGCombinerInfo &DCI,
10536                                       const ARMSubtarget *Subtarget) {
10537   // UMAAL is similar to UMLAL except that it adds two unsigned values.
10538   // While trying to combine for the other MLAL nodes, first search for the
10539   // chance to use UMAAL. Check if Addc uses a node which has already
10540   // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde
10541   // as the addend, and it's handled in PerformUMLALCombine.
10542 
10543   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
10544     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
10545 
10546   // Check that we have a glued ADDC node.
10547   SDNode* AddcNode = AddeNode->getOperand(2).getNode();
10548   if (AddcNode->getOpcode() != ARMISD::ADDC)
10549     return SDValue();
10550 
10551   // Find the converted UMAAL or quit if it doesn't exist.
10552   SDNode *UmlalNode = nullptr;
10553   SDValue AddHi;
10554   if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
10555     UmlalNode = AddcNode->getOperand(0).getNode();
10556     AddHi = AddcNode->getOperand(1);
10557   } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
10558     UmlalNode = AddcNode->getOperand(1).getNode();
10559     AddHi = AddcNode->getOperand(0);
10560   } else {
10561     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
10562   }
10563 
10564   // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
10565   // the ADDC as well as Zero.
10566   if (!isNullConstant(UmlalNode->getOperand(3)))
10567     return SDValue();
10568 
10569   if ((isNullConstant(AddeNode->getOperand(0)) &&
10570        AddeNode->getOperand(1).getNode() == UmlalNode) ||
10571       (AddeNode->getOperand(0).getNode() == UmlalNode &&
10572        isNullConstant(AddeNode->getOperand(1)))) {
10573     SelectionDAG &DAG = DCI.DAG;
10574     SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
10575                       UmlalNode->getOperand(2), AddHi };
10576     SDValue UMAAL =  DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
10577                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
10578 
10579     // Replace the ADDs' nodes uses by the UMAAL node's values.
10580     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
10581     DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
10582 
10583     // Return original node to notify the driver to stop replacing.
10584     return SDValue(AddeNode, 0);
10585   }
10586   return SDValue();
10587 }
10588 
10589 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
10590                                    const ARMSubtarget *Subtarget) {
10591   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
10592     return SDValue();
10593 
10594   // Check that we have a pair of ADDC and ADDE as operands.
10595   // Both addends of the ADDE must be zero.
10596   SDNode* AddcNode = N->getOperand(2).getNode();
10597   SDNode* AddeNode = N->getOperand(3).getNode();
10598   if ((AddcNode->getOpcode() == ARMISD::ADDC) &&
10599       (AddeNode->getOpcode() == ARMISD::ADDE) &&
10600       isNullConstant(AddeNode->getOperand(0)) &&
10601       isNullConstant(AddeNode->getOperand(1)) &&
10602       (AddeNode->getOperand(2).getNode() == AddcNode))
10603     return DAG.getNode(ARMISD::UMAAL, SDLoc(N),
10604                        DAG.getVTList(MVT::i32, MVT::i32),
10605                        {N->getOperand(0), N->getOperand(1),
10606                         AddcNode->getOperand(0), AddcNode->getOperand(1)});
10607   else
10608     return SDValue();
10609 }
10610 
10611 static SDValue PerformAddcSubcCombine(SDNode *N,
10612                                       TargetLowering::DAGCombinerInfo &DCI,
10613                                       const ARMSubtarget *Subtarget) {
10614   SelectionDAG &DAG(DCI.DAG);
10615 
10616   if (N->getOpcode() == ARMISD::SUBC) {
10617     // (SUBC (ADDE 0, 0, C), 1) -> C
10618     SDValue LHS = N->getOperand(0);
10619     SDValue RHS = N->getOperand(1);
10620     if (LHS->getOpcode() == ARMISD::ADDE &&
10621         isNullConstant(LHS->getOperand(0)) &&
10622         isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) {
10623       return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2));
10624     }
10625   }
10626 
10627   if (Subtarget->isThumb1Only()) {
10628     SDValue RHS = N->getOperand(1);
10629     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
10630       int32_t imm = C->getSExtValue();
10631       if (imm < 0 && imm > std::numeric_limits<int>::min()) {
10632         SDLoc DL(N);
10633         RHS = DAG.getConstant(-imm, DL, MVT::i32);
10634         unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
10635                                                            : ARMISD::ADDC;
10636         return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS);
10637       }
10638     }
10639   }
10640 
10641   return SDValue();
10642 }
10643 
10644 static SDValue PerformAddeSubeCombine(SDNode *N,
10645                                       TargetLowering::DAGCombinerInfo &DCI,
10646                                       const ARMSubtarget *Subtarget) {
10647   if (Subtarget->isThumb1Only()) {
10648     SelectionDAG &DAG = DCI.DAG;
10649     SDValue RHS = N->getOperand(1);
10650     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
10651       int64_t imm = C->getSExtValue();
10652       if (imm < 0) {
10653         SDLoc DL(N);
10654 
10655         // The with-carry-in form matches bitwise not instead of the negation.
10656         // Effectively, the inverse interpretation of the carry flag already
10657         // accounts for part of the negation.
10658         RHS = DAG.getConstant(~imm, DL, MVT::i32);
10659 
10660         unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE
10661                                                            : ARMISD::ADDE;
10662         return DAG.getNode(Opcode, DL, N->getVTList(),
10663                            N->getOperand(0), RHS, N->getOperand(2));
10664       }
10665     }
10666   } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) {
10667     return AddCombineTo64bitMLAL(N, DCI, Subtarget);
10668   }
10669   return SDValue();
10670 }
10671 
10672 static SDValue PerformABSCombine(SDNode *N,
10673                                   TargetLowering::DAGCombinerInfo &DCI,
10674                                   const ARMSubtarget *Subtarget) {
10675   SDValue res;
10676   SelectionDAG &DAG = DCI.DAG;
10677   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10678 
10679   if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0)))
10680     return SDValue();
10681 
10682   if (!TLI.expandABS(N, res, DAG))
10683       return SDValue();
10684 
10685   return res;
10686 }
10687 
10688 /// PerformADDECombine - Target-specific dag combine transform from
10689 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
10690 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
10691 static SDValue PerformADDECombine(SDNode *N,
10692                                   TargetLowering::DAGCombinerInfo &DCI,
10693                                   const ARMSubtarget *Subtarget) {
10694   // Only ARM and Thumb2 support UMLAL/SMLAL.
10695   if (Subtarget->isThumb1Only())
10696     return PerformAddeSubeCombine(N, DCI, Subtarget);
10697 
10698   // Only perform the checks after legalize when the pattern is available.
10699   if (DCI.isBeforeLegalize()) return SDValue();
10700 
10701   return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
10702 }
10703 
10704 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
10705 /// operands N0 and N1.  This is a helper for PerformADDCombine that is
10706 /// called with the default operands, and if that fails, with commuted
10707 /// operands.
10708 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
10709                                           TargetLowering::DAGCombinerInfo &DCI,
10710                                           const ARMSubtarget *Subtarget){
10711   // Attempt to create vpadd for this add.
10712   if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
10713     return Result;
10714 
10715   // Attempt to create vpaddl for this add.
10716   if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
10717     return Result;
10718   if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
10719                                                       Subtarget))
10720     return Result;
10721 
10722   // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
10723   if (N0.getNode()->hasOneUse())
10724     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
10725       return Result;
10726   return SDValue();
10727 }
10728 
10729 bool
10730 ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
10731                                                  CombineLevel Level) const {
10732   if (Level == BeforeLegalizeTypes)
10733     return true;
10734 
10735   if (N->getOpcode() != ISD::SHL)
10736     return true;
10737 
10738   if (Subtarget->isThumb1Only()) {
10739     // Avoid making expensive immediates by commuting shifts. (This logic
10740     // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted
10741     // for free.)
10742     if (N->getOpcode() != ISD::SHL)
10743       return true;
10744     SDValue N1 = N->getOperand(0);
10745     if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND &&
10746         N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR)
10747       return true;
10748     if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) {
10749       if (Const->getAPIntValue().ult(256))
10750         return false;
10751       if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) &&
10752           Const->getAPIntValue().sgt(-256))
10753         return false;
10754     }
10755     return true;
10756   }
10757 
10758   // Turn off commute-with-shift transform after legalization, so it doesn't
10759   // conflict with PerformSHLSimplify.  (We could try to detect when
10760   // PerformSHLSimplify would trigger more precisely, but it isn't
10761   // really necessary.)
10762   return false;
10763 }
10764 
10765 bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
10766     const SDNode *N, CombineLevel Level) const {
10767   if (!Subtarget->isThumb1Only())
10768     return true;
10769 
10770   if (Level == BeforeLegalizeTypes)
10771     return true;
10772 
10773   return false;
10774 }
10775 
10776 bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
10777   if (!Subtarget->hasNEON()) {
10778     if (Subtarget->isThumb1Only())
10779       return VT.getScalarSizeInBits() <= 32;
10780     return true;
10781   }
10782   return VT.isScalarInteger();
10783 }
10784 
10785 static SDValue PerformSHLSimplify(SDNode *N,
10786                                 TargetLowering::DAGCombinerInfo &DCI,
10787                                 const ARMSubtarget *ST) {
10788   // Allow the generic combiner to identify potential bswaps.
10789   if (DCI.isBeforeLegalize())
10790     return SDValue();
10791 
10792   // DAG combiner will fold:
10793   // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
10794   // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2
10795   // Other code patterns that can be also be modified have the following form:
10796   // b + ((a << 1) | 510)
10797   // b + ((a << 1) & 510)
10798   // b + ((a << 1) ^ 510)
10799   // b + ((a << 1) + 510)
10800 
10801   // Many instructions can  perform the shift for free, but it requires both
10802   // the operands to be registers. If c1 << c2 is too large, a mov immediate
10803   // instruction will needed. So, unfold back to the original pattern if:
10804   // - if c1 and c2 are small enough that they don't require mov imms.
10805   // - the user(s) of the node can perform an shl
10806 
10807   // No shifted operands for 16-bit instructions.
10808   if (ST->isThumb() && ST->isThumb1Only())
10809     return SDValue();
10810 
10811   // Check that all the users could perform the shl themselves.
10812   for (auto U : N->uses()) {
10813     switch(U->getOpcode()) {
10814     default:
10815       return SDValue();
10816     case ISD::SUB:
10817     case ISD::ADD:
10818     case ISD::AND:
10819     case ISD::OR:
10820     case ISD::XOR:
10821     case ISD::SETCC:
10822     case ARMISD::CMP:
10823       // Check that the user isn't already using a constant because there
10824       // aren't any instructions that support an immediate operand and a
10825       // shifted operand.
10826       if (isa<ConstantSDNode>(U->getOperand(0)) ||
10827           isa<ConstantSDNode>(U->getOperand(1)))
10828         return SDValue();
10829 
10830       // Check that it's not already using a shift.
10831       if (U->getOperand(0).getOpcode() == ISD::SHL ||
10832           U->getOperand(1).getOpcode() == ISD::SHL)
10833         return SDValue();
10834       break;
10835     }
10836   }
10837 
10838   if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR &&
10839       N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND)
10840     return SDValue();
10841 
10842   if (N->getOperand(0).getOpcode() != ISD::SHL)
10843     return SDValue();
10844 
10845   SDValue SHL = N->getOperand(0);
10846 
10847   auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
10848   auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1));
10849   if (!C1ShlC2 || !C2)
10850     return SDValue();
10851 
10852   APInt C2Int = C2->getAPIntValue();
10853   APInt C1Int = C1ShlC2->getAPIntValue();
10854 
10855   // Check that performing a lshr will not lose any information.
10856   APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(),
10857                                      C2Int.getBitWidth() - C2->getZExtValue());
10858   if ((C1Int & Mask) != C1Int)
10859     return SDValue();
10860 
10861   // Shift the first constant.
10862   C1Int.lshrInPlace(C2Int);
10863 
10864   // The immediates are encoded as an 8-bit value that can be rotated.
10865   auto LargeImm = [](const APInt &Imm) {
10866     unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros();
10867     return Imm.getBitWidth() - Zeros > 8;
10868   };
10869 
10870   if (LargeImm(C1Int) || LargeImm(C2Int))
10871     return SDValue();
10872 
10873   SelectionDAG &DAG = DCI.DAG;
10874   SDLoc dl(N);
10875   SDValue X = SHL.getOperand(0);
10876   SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X,
10877                               DAG.getConstant(C1Int, dl, MVT::i32));
10878   // Shift left to compensate for the lshr of C1Int.
10879   SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1));
10880 
10881   LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump();
10882              SHL.dump(); N->dump());
10883   LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump());
10884   return Res;
10885 }
10886 
10887 
10888 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
10889 ///
10890 static SDValue PerformADDCombine(SDNode *N,
10891                                  TargetLowering::DAGCombinerInfo &DCI,
10892                                  const ARMSubtarget *Subtarget) {
10893   SDValue N0 = N->getOperand(0);
10894   SDValue N1 = N->getOperand(1);
10895 
10896   // Only works one way, because it needs an immediate operand.
10897   if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
10898     return Result;
10899 
10900   // First try with the default operand order.
10901   if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
10902     return Result;
10903 
10904   // If that didn't work, try again with the operands commuted.
10905   return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
10906 }
10907 
10908 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
10909 ///
10910 static SDValue PerformSUBCombine(SDNode *N,
10911                                  TargetLowering::DAGCombinerInfo &DCI) {
10912   SDValue N0 = N->getOperand(0);
10913   SDValue N1 = N->getOperand(1);
10914 
10915   // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
10916   if (N1.getNode()->hasOneUse())
10917     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
10918       return Result;
10919 
10920   return SDValue();
10921 }
10922 
10923 /// PerformVMULCombine
10924 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
10925 /// special multiplier accumulator forwarding.
10926 ///   vmul d3, d0, d2
10927 ///   vmla d3, d1, d2
10928 /// is faster than
10929 ///   vadd d3, d0, d1
10930 ///   vmul d3, d3, d2
10931 //  However, for (A + B) * (A + B),
10932 //    vadd d2, d0, d1
10933 //    vmul d3, d0, d2
10934 //    vmla d3, d1, d2
10935 //  is slower than
10936 //    vadd d2, d0, d1
10937 //    vmul d3, d2, d2
10938 static SDValue PerformVMULCombine(SDNode *N,
10939                                   TargetLowering::DAGCombinerInfo &DCI,
10940                                   const ARMSubtarget *Subtarget) {
10941   if (!Subtarget->hasVMLxForwarding())
10942     return SDValue();
10943 
10944   SelectionDAG &DAG = DCI.DAG;
10945   SDValue N0 = N->getOperand(0);
10946   SDValue N1 = N->getOperand(1);
10947   unsigned Opcode = N0.getOpcode();
10948   if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
10949       Opcode != ISD::FADD && Opcode != ISD::FSUB) {
10950     Opcode = N1.getOpcode();
10951     if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
10952         Opcode != ISD::FADD && Opcode != ISD::FSUB)
10953       return SDValue();
10954     std::swap(N0, N1);
10955   }
10956 
10957   if (N0 == N1)
10958     return SDValue();
10959 
10960   EVT VT = N->getValueType(0);
10961   SDLoc DL(N);
10962   SDValue N00 = N0->getOperand(0);
10963   SDValue N01 = N0->getOperand(1);
10964   return DAG.getNode(Opcode, DL, VT,
10965                      DAG.getNode(ISD::MUL, DL, VT, N00, N1),
10966                      DAG.getNode(ISD::MUL, DL, VT, N01, N1));
10967 }
10968 
10969 static SDValue PerformMULCombine(SDNode *N,
10970                                  TargetLowering::DAGCombinerInfo &DCI,
10971                                  const ARMSubtarget *Subtarget) {
10972   SelectionDAG &DAG = DCI.DAG;
10973 
10974   if (Subtarget->isThumb1Only())
10975     return SDValue();
10976 
10977   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
10978     return SDValue();
10979 
10980   EVT VT = N->getValueType(0);
10981   if (VT.is64BitVector() || VT.is128BitVector())
10982     return PerformVMULCombine(N, DCI, Subtarget);
10983   if (VT != MVT::i32)
10984     return SDValue();
10985 
10986   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
10987   if (!C)
10988     return SDValue();
10989 
10990   int64_t MulAmt = C->getSExtValue();
10991   unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
10992 
10993   ShiftAmt = ShiftAmt & (32 - 1);
10994   SDValue V = N->getOperand(0);
10995   SDLoc DL(N);
10996 
10997   SDValue Res;
10998   MulAmt >>= ShiftAmt;
10999 
11000   if (MulAmt >= 0) {
11001     if (isPowerOf2_32(MulAmt - 1)) {
11002       // (mul x, 2^N + 1) => (add (shl x, N), x)
11003       Res = DAG.getNode(ISD::ADD, DL, VT,
11004                         V,
11005                         DAG.getNode(ISD::SHL, DL, VT,
11006                                     V,
11007                                     DAG.getConstant(Log2_32(MulAmt - 1), DL,
11008                                                     MVT::i32)));
11009     } else if (isPowerOf2_32(MulAmt + 1)) {
11010       // (mul x, 2^N - 1) => (sub (shl x, N), x)
11011       Res = DAG.getNode(ISD::SUB, DL, VT,
11012                         DAG.getNode(ISD::SHL, DL, VT,
11013                                     V,
11014                                     DAG.getConstant(Log2_32(MulAmt + 1), DL,
11015                                                     MVT::i32)),
11016                         V);
11017     } else
11018       return SDValue();
11019   } else {
11020     uint64_t MulAmtAbs = -MulAmt;
11021     if (isPowerOf2_32(MulAmtAbs + 1)) {
11022       // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
11023       Res = DAG.getNode(ISD::SUB, DL, VT,
11024                         V,
11025                         DAG.getNode(ISD::SHL, DL, VT,
11026                                     V,
11027                                     DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
11028                                                     MVT::i32)));
11029     } else if (isPowerOf2_32(MulAmtAbs - 1)) {
11030       // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
11031       Res = DAG.getNode(ISD::ADD, DL, VT,
11032                         V,
11033                         DAG.getNode(ISD::SHL, DL, VT,
11034                                     V,
11035                                     DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
11036                                                     MVT::i32)));
11037       Res = DAG.getNode(ISD::SUB, DL, VT,
11038                         DAG.getConstant(0, DL, MVT::i32), Res);
11039     } else
11040       return SDValue();
11041   }
11042 
11043   if (ShiftAmt != 0)
11044     Res = DAG.getNode(ISD::SHL, DL, VT,
11045                       Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
11046 
11047   // Do not add new nodes to DAG combiner worklist.
11048   DCI.CombineTo(N, Res, false);
11049   return SDValue();
11050 }
11051 
11052 static SDValue CombineANDShift(SDNode *N,
11053                                TargetLowering::DAGCombinerInfo &DCI,
11054                                const ARMSubtarget *Subtarget) {
11055   // Allow DAGCombine to pattern-match before we touch the canonical form.
11056   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
11057     return SDValue();
11058 
11059   if (N->getValueType(0) != MVT::i32)
11060     return SDValue();
11061 
11062   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
11063   if (!N1C)
11064     return SDValue();
11065 
11066   uint32_t C1 = (uint32_t)N1C->getZExtValue();
11067   // Don't transform uxtb/uxth.
11068   if (C1 == 255 || C1 == 65535)
11069     return SDValue();
11070 
11071   SDNode *N0 = N->getOperand(0).getNode();
11072   if (!N0->hasOneUse())
11073     return SDValue();
11074 
11075   if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL)
11076     return SDValue();
11077 
11078   bool LeftShift = N0->getOpcode() == ISD::SHL;
11079 
11080   ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
11081   if (!N01C)
11082     return SDValue();
11083 
11084   uint32_t C2 = (uint32_t)N01C->getZExtValue();
11085   if (!C2 || C2 >= 32)
11086     return SDValue();
11087 
11088   // Clear irrelevant bits in the mask.
11089   if (LeftShift)
11090     C1 &= (-1U << C2);
11091   else
11092     C1 &= (-1U >> C2);
11093 
11094   SelectionDAG &DAG = DCI.DAG;
11095   SDLoc DL(N);
11096 
11097   // We have a pattern of the form "(and (shl x, c2) c1)" or
11098   // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to
11099   // transform to a pair of shifts, to save materializing c1.
11100 
11101   // First pattern: right shift, then mask off leading bits.
11102   // FIXME: Use demanded bits?
11103   if (!LeftShift && isMask_32(C1)) {
11104     uint32_t C3 = countLeadingZeros(C1);
11105     if (C2 < C3) {
11106       SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
11107                                 DAG.getConstant(C3 - C2, DL, MVT::i32));
11108       return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
11109                          DAG.getConstant(C3, DL, MVT::i32));
11110     }
11111   }
11112 
11113   // First pattern, reversed: left shift, then mask off trailing bits.
11114   if (LeftShift && isMask_32(~C1)) {
11115     uint32_t C3 = countTrailingZeros(C1);
11116     if (C2 < C3) {
11117       SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
11118                                 DAG.getConstant(C3 - C2, DL, MVT::i32));
11119       return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
11120                          DAG.getConstant(C3, DL, MVT::i32));
11121     }
11122   }
11123 
11124   // Second pattern: left shift, then mask off leading bits.
11125   // FIXME: Use demanded bits?
11126   if (LeftShift && isShiftedMask_32(C1)) {
11127     uint32_t Trailing = countTrailingZeros(C1);
11128     uint32_t C3 = countLeadingZeros(C1);
11129     if (Trailing == C2 && C2 + C3 < 32) {
11130       SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
11131                                 DAG.getConstant(C2 + C3, DL, MVT::i32));
11132       return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
11133                         DAG.getConstant(C3, DL, MVT::i32));
11134     }
11135   }
11136 
11137   // Second pattern, reversed: right shift, then mask off trailing bits.
11138   // FIXME: Handle other patterns of known/demanded bits.
11139   if (!LeftShift && isShiftedMask_32(C1)) {
11140     uint32_t Leading = countLeadingZeros(C1);
11141     uint32_t C3 = countTrailingZeros(C1);
11142     if (Leading == C2 && C2 + C3 < 32) {
11143       SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
11144                                 DAG.getConstant(C2 + C3, DL, MVT::i32));
11145       return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
11146                          DAG.getConstant(C3, DL, MVT::i32));
11147     }
11148   }
11149 
11150   // FIXME: Transform "(and (shl x, c2) c1)" ->
11151   // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than
11152   // c1.
11153   return SDValue();
11154 }
11155 
11156 static SDValue PerformANDCombine(SDNode *N,
11157                                  TargetLowering::DAGCombinerInfo &DCI,
11158                                  const ARMSubtarget *Subtarget) {
11159   // Attempt to use immediate-form VBIC
11160   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
11161   SDLoc dl(N);
11162   EVT VT = N->getValueType(0);
11163   SelectionDAG &DAG = DCI.DAG;
11164 
11165   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
11166     return SDValue();
11167 
11168   APInt SplatBits, SplatUndef;
11169   unsigned SplatBitSize;
11170   bool HasAnyUndefs;
11171   if (BVN &&
11172       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
11173     if (SplatBitSize <= 64) {
11174       EVT VbicVT;
11175       SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
11176                                       SplatUndef.getZExtValue(), SplatBitSize,
11177                                       DAG, dl, VbicVT, VT.is128BitVector(),
11178                                       OtherModImm);
11179       if (Val.getNode()) {
11180         SDValue Input =
11181           DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
11182         SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
11183         return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
11184       }
11185     }
11186   }
11187 
11188   if (!Subtarget->isThumb1Only()) {
11189     // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
11190     if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
11191       return Result;
11192 
11193     if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
11194       return Result;
11195   }
11196 
11197   if (Subtarget->isThumb1Only())
11198     if (SDValue Result = CombineANDShift(N, DCI, Subtarget))
11199       return Result;
11200 
11201   return SDValue();
11202 }
11203 
11204 // Try combining OR nodes to SMULWB, SMULWT.
11205 static SDValue PerformORCombineToSMULWBT(SDNode *OR,
11206                                          TargetLowering::DAGCombinerInfo &DCI,
11207                                          const ARMSubtarget *Subtarget) {
11208   if (!Subtarget->hasV6Ops() ||
11209       (Subtarget->isThumb() &&
11210        (!Subtarget->hasThumb2() || !Subtarget->hasDSP())))
11211     return SDValue();
11212 
11213   SDValue SRL = OR->getOperand(0);
11214   SDValue SHL = OR->getOperand(1);
11215 
11216   if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) {
11217     SRL = OR->getOperand(1);
11218     SHL = OR->getOperand(0);
11219   }
11220   if (!isSRL16(SRL) || !isSHL16(SHL))
11221     return SDValue();
11222 
11223   // The first operands to the shifts need to be the two results from the
11224   // same smul_lohi node.
11225   if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
11226        SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI)
11227     return SDValue();
11228 
11229   SDNode *SMULLOHI = SRL.getOperand(0).getNode();
11230   if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) ||
11231       SHL.getOperand(0) != SDValue(SMULLOHI, 1))
11232     return SDValue();
11233 
11234   // Now we have:
11235   // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
11236   // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
11237   // For SMUWB the 16-bit value will signed extended somehow.
11238   // For SMULWT only the SRA is required.
11239   // Check both sides of SMUL_LOHI
11240   SDValue OpS16 = SMULLOHI->getOperand(0);
11241   SDValue OpS32 = SMULLOHI->getOperand(1);
11242 
11243   SelectionDAG &DAG = DCI.DAG;
11244   if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) {
11245     OpS16 = OpS32;
11246     OpS32 = SMULLOHI->getOperand(0);
11247   }
11248 
11249   SDLoc dl(OR);
11250   unsigned Opcode = 0;
11251   if (isS16(OpS16, DAG))
11252     Opcode = ARMISD::SMULWB;
11253   else if (isSRA16(OpS16)) {
11254     Opcode = ARMISD::SMULWT;
11255     OpS16 = OpS16->getOperand(0);
11256   }
11257   else
11258     return SDValue();
11259 
11260   SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16);
11261   DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res);
11262   return SDValue(OR, 0);
11263 }
11264 
11265 static SDValue PerformORCombineToBFI(SDNode *N,
11266                                      TargetLowering::DAGCombinerInfo &DCI,
11267                                      const ARMSubtarget *Subtarget) {
11268   // BFI is only available on V6T2+
11269   if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
11270     return SDValue();
11271 
11272   EVT VT = N->getValueType(0);
11273   SDValue N0 = N->getOperand(0);
11274   SDValue N1 = N->getOperand(1);
11275   SelectionDAG &DAG = DCI.DAG;
11276   SDLoc DL(N);
11277   // 1) or (and A, mask), val => ARMbfi A, val, mask
11278   //      iff (val & mask) == val
11279   //
11280   // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
11281   //  2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
11282   //          && mask == ~mask2
11283   //  2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
11284   //          && ~mask == mask2
11285   //  (i.e., copy a bitfield value into another bitfield of the same width)
11286 
11287   if (VT != MVT::i32)
11288     return SDValue();
11289 
11290   SDValue N00 = N0.getOperand(0);
11291 
11292   // The value and the mask need to be constants so we can verify this is
11293   // actually a bitfield set. If the mask is 0xffff, we can do better
11294   // via a movt instruction, so don't use BFI in that case.
11295   SDValue MaskOp = N0.getOperand(1);
11296   ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
11297   if (!MaskC)
11298     return SDValue();
11299   unsigned Mask = MaskC->getZExtValue();
11300   if (Mask == 0xffff)
11301     return SDValue();
11302   SDValue Res;
11303   // Case (1): or (and A, mask), val => ARMbfi A, val, mask
11304   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
11305   if (N1C) {
11306     unsigned Val = N1C->getZExtValue();
11307     if ((Val & ~Mask) != Val)
11308       return SDValue();
11309 
11310     if (ARM::isBitFieldInvertedMask(Mask)) {
11311       Val >>= countTrailingZeros(~Mask);
11312 
11313       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
11314                         DAG.getConstant(Val, DL, MVT::i32),
11315                         DAG.getConstant(Mask, DL, MVT::i32));
11316 
11317       DCI.CombineTo(N, Res, false);
11318       // Return value from the original node to inform the combiner than N is
11319       // now dead.
11320       return SDValue(N, 0);
11321     }
11322   } else if (N1.getOpcode() == ISD::AND) {
11323     // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
11324     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
11325     if (!N11C)
11326       return SDValue();
11327     unsigned Mask2 = N11C->getZExtValue();
11328 
11329     // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
11330     // as is to match.
11331     if (ARM::isBitFieldInvertedMask(Mask) &&
11332         (Mask == ~Mask2)) {
11333       // The pack halfword instruction works better for masks that fit it,
11334       // so use that when it's available.
11335       if (Subtarget->hasDSP() &&
11336           (Mask == 0xffff || Mask == 0xffff0000))
11337         return SDValue();
11338       // 2a
11339       unsigned amt = countTrailingZeros(Mask2);
11340       Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
11341                         DAG.getConstant(amt, DL, MVT::i32));
11342       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
11343                         DAG.getConstant(Mask, DL, MVT::i32));
11344       DCI.CombineTo(N, Res, false);
11345       // Return value from the original node to inform the combiner than N is
11346       // now dead.
11347       return SDValue(N, 0);
11348     } else if (ARM::isBitFieldInvertedMask(~Mask) &&
11349                (~Mask == Mask2)) {
11350       // The pack halfword instruction works better for masks that fit it,
11351       // so use that when it's available.
11352       if (Subtarget->hasDSP() &&
11353           (Mask2 == 0xffff || Mask2 == 0xffff0000))
11354         return SDValue();
11355       // 2b
11356       unsigned lsb = countTrailingZeros(Mask);
11357       Res = DAG.getNode(ISD::SRL, DL, VT, N00,
11358                         DAG.getConstant(lsb, DL, MVT::i32));
11359       Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
11360                         DAG.getConstant(Mask2, DL, MVT::i32));
11361       DCI.CombineTo(N, Res, false);
11362       // Return value from the original node to inform the combiner than N is
11363       // now dead.
11364       return SDValue(N, 0);
11365     }
11366   }
11367 
11368   if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
11369       N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
11370       ARM::isBitFieldInvertedMask(~Mask)) {
11371     // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
11372     // where lsb(mask) == #shamt and masked bits of B are known zero.
11373     SDValue ShAmt = N00.getOperand(1);
11374     unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
11375     unsigned LSB = countTrailingZeros(Mask);
11376     if (ShAmtC != LSB)
11377       return SDValue();
11378 
11379     Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
11380                       DAG.getConstant(~Mask, DL, MVT::i32));
11381 
11382     DCI.CombineTo(N, Res, false);
11383     // Return value from the original node to inform the combiner than N is
11384     // now dead.
11385     return SDValue(N, 0);
11386   }
11387 
11388   return SDValue();
11389 }
11390 
11391 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
11392 static SDValue PerformORCombine(SDNode *N,
11393                                 TargetLowering::DAGCombinerInfo &DCI,
11394                                 const ARMSubtarget *Subtarget) {
11395   // Attempt to use immediate-form VORR
11396   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
11397   SDLoc dl(N);
11398   EVT VT = N->getValueType(0);
11399   SelectionDAG &DAG = DCI.DAG;
11400 
11401   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
11402     return SDValue();
11403 
11404   APInt SplatBits, SplatUndef;
11405   unsigned SplatBitSize;
11406   bool HasAnyUndefs;
11407   if (BVN && Subtarget->hasNEON() &&
11408       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
11409     if (SplatBitSize <= 64) {
11410       EVT VorrVT;
11411       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
11412                                       SplatUndef.getZExtValue(), SplatBitSize,
11413                                       DAG, dl, VorrVT, VT.is128BitVector(),
11414                                       OtherModImm);
11415       if (Val.getNode()) {
11416         SDValue Input =
11417           DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
11418         SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
11419         return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
11420       }
11421     }
11422   }
11423 
11424   if (!Subtarget->isThumb1Only()) {
11425     // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
11426     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
11427       return Result;
11428     if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget))
11429       return Result;
11430   }
11431 
11432   SDValue N0 = N->getOperand(0);
11433   SDValue N1 = N->getOperand(1);
11434 
11435   // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
11436   if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
11437       DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
11438 
11439     // The code below optimizes (or (and X, Y), Z).
11440     // The AND operand needs to have a single user to make these optimizations
11441     // profitable.
11442     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
11443       return SDValue();
11444 
11445     APInt SplatUndef;
11446     unsigned SplatBitSize;
11447     bool HasAnyUndefs;
11448 
11449     APInt SplatBits0, SplatBits1;
11450     BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
11451     BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
11452     // Ensure that the second operand of both ands are constants
11453     if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
11454                                       HasAnyUndefs) && !HasAnyUndefs) {
11455         if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
11456                                           HasAnyUndefs) && !HasAnyUndefs) {
11457             // Ensure that the bit width of the constants are the same and that
11458             // the splat arguments are logical inverses as per the pattern we
11459             // are trying to simplify.
11460             if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
11461                 SplatBits0 == ~SplatBits1) {
11462                 // Canonicalize the vector type to make instruction selection
11463                 // simpler.
11464                 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
11465                 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
11466                                              N0->getOperand(1),
11467                                              N0->getOperand(0),
11468                                              N1->getOperand(0));
11469                 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
11470             }
11471         }
11472     }
11473   }
11474 
11475   // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
11476   // reasonable.
11477   if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
11478     if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget))
11479       return Res;
11480   }
11481 
11482   if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
11483     return Result;
11484 
11485   return SDValue();
11486 }
11487 
11488 static SDValue PerformXORCombine(SDNode *N,
11489                                  TargetLowering::DAGCombinerInfo &DCI,
11490                                  const ARMSubtarget *Subtarget) {
11491   EVT VT = N->getValueType(0);
11492   SelectionDAG &DAG = DCI.DAG;
11493 
11494   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
11495     return SDValue();
11496 
11497   if (!Subtarget->isThumb1Only()) {
11498     // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
11499     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
11500       return Result;
11501 
11502     if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
11503       return Result;
11504   }
11505 
11506   return SDValue();
11507 }
11508 
11509 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
11510 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
11511 // their position in "to" (Rd).
11512 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
11513   assert(N->getOpcode() == ARMISD::BFI);
11514 
11515   SDValue From = N->getOperand(1);
11516   ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
11517   FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation());
11518 
11519   // If the Base came from a SHR #C, we can deduce that it is really testing bit
11520   // #C in the base of the SHR.
11521   if (From->getOpcode() == ISD::SRL &&
11522       isa<ConstantSDNode>(From->getOperand(1))) {
11523     APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
11524     assert(Shift.getLimitedValue() < 32 && "Shift too large!");
11525     FromMask <<= Shift.getLimitedValue(31);
11526     From = From->getOperand(0);
11527   }
11528 
11529   return From;
11530 }
11531 
11532 // If A and B contain one contiguous set of bits, does A | B == A . B?
11533 //
11534 // Neither A nor B must be zero.
11535 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
11536   unsigned LastActiveBitInA =  A.countTrailingZeros();
11537   unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1;
11538   return LastActiveBitInA - 1 == FirstActiveBitInB;
11539 }
11540 
11541 static SDValue FindBFIToCombineWith(SDNode *N) {
11542   // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
11543   // if one exists.
11544   APInt ToMask, FromMask;
11545   SDValue From = ParseBFI(N, ToMask, FromMask);
11546   SDValue To = N->getOperand(0);
11547 
11548   // Now check for a compatible BFI to merge with. We can pass through BFIs that
11549   // aren't compatible, but not if they set the same bit in their destination as
11550   // we do (or that of any BFI we're going to combine with).
11551   SDValue V = To;
11552   APInt CombinedToMask = ToMask;
11553   while (V.getOpcode() == ARMISD::BFI) {
11554     APInt NewToMask, NewFromMask;
11555     SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
11556     if (NewFrom != From) {
11557       // This BFI has a different base. Keep going.
11558       CombinedToMask |= NewToMask;
11559       V = V.getOperand(0);
11560       continue;
11561     }
11562 
11563     // Do the written bits conflict with any we've seen so far?
11564     if ((NewToMask & CombinedToMask).getBoolValue())
11565       // Conflicting bits - bail out because going further is unsafe.
11566       return SDValue();
11567 
11568     // Are the new bits contiguous when combined with the old bits?
11569     if (BitsProperlyConcatenate(ToMask, NewToMask) &&
11570         BitsProperlyConcatenate(FromMask, NewFromMask))
11571       return V;
11572     if (BitsProperlyConcatenate(NewToMask, ToMask) &&
11573         BitsProperlyConcatenate(NewFromMask, FromMask))
11574       return V;
11575 
11576     // We've seen a write to some bits, so track it.
11577     CombinedToMask |= NewToMask;
11578     // Keep going...
11579     V = V.getOperand(0);
11580   }
11581 
11582   return SDValue();
11583 }
11584 
11585 static SDValue PerformBFICombine(SDNode *N,
11586                                  TargetLowering::DAGCombinerInfo &DCI) {
11587   SDValue N1 = N->getOperand(1);
11588   if (N1.getOpcode() == ISD::AND) {
11589     // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
11590     // the bits being cleared by the AND are not demanded by the BFI.
11591     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
11592     if (!N11C)
11593       return SDValue();
11594     unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
11595     unsigned LSB = countTrailingZeros(~InvMask);
11596     unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
11597     assert(Width <
11598                static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
11599            "undefined behavior");
11600     unsigned Mask = (1u << Width) - 1;
11601     unsigned Mask2 = N11C->getZExtValue();
11602     if ((Mask & (~Mask2)) == 0)
11603       return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
11604                              N->getOperand(0), N1.getOperand(0),
11605                              N->getOperand(2));
11606   } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
11607     // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
11608     // Keep track of any consecutive bits set that all come from the same base
11609     // value. We can combine these together into a single BFI.
11610     SDValue CombineBFI = FindBFIToCombineWith(N);
11611     if (CombineBFI == SDValue())
11612       return SDValue();
11613 
11614     // We've found a BFI.
11615     APInt ToMask1, FromMask1;
11616     SDValue From1 = ParseBFI(N, ToMask1, FromMask1);
11617 
11618     APInt ToMask2, FromMask2;
11619     SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
11620     assert(From1 == From2);
11621     (void)From2;
11622 
11623     // First, unlink CombineBFI.
11624     DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0));
11625     // Then create a new BFI, combining the two together.
11626     APInt NewFromMask = FromMask1 | FromMask2;
11627     APInt NewToMask = ToMask1 | ToMask2;
11628 
11629     EVT VT = N->getValueType(0);
11630     SDLoc dl(N);
11631 
11632     if (NewFromMask[0] == 0)
11633       From1 = DCI.DAG.getNode(
11634         ISD::SRL, dl, VT, From1,
11635         DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT));
11636     return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1,
11637                            DCI.DAG.getConstant(~NewToMask, dl, VT));
11638   }
11639   return SDValue();
11640 }
11641 
11642 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
11643 /// ARMISD::VMOVRRD.
11644 static SDValue PerformVMOVRRDCombine(SDNode *N,
11645                                      TargetLowering::DAGCombinerInfo &DCI,
11646                                      const ARMSubtarget *Subtarget) {
11647   // vmovrrd(vmovdrr x, y) -> x,y
11648   SDValue InDouble = N->getOperand(0);
11649   if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64())
11650     return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
11651 
11652   // vmovrrd(load f64) -> (load i32), (load i32)
11653   SDNode *InNode = InDouble.getNode();
11654   if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
11655       InNode->getValueType(0) == MVT::f64 &&
11656       InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
11657       !cast<LoadSDNode>(InNode)->isVolatile()) {
11658     // TODO: Should this be done for non-FrameIndex operands?
11659     LoadSDNode *LD = cast<LoadSDNode>(InNode);
11660 
11661     SelectionDAG &DAG = DCI.DAG;
11662     SDLoc DL(LD);
11663     SDValue BasePtr = LD->getBasePtr();
11664     SDValue NewLD1 =
11665         DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
11666                     LD->getAlignment(), LD->getMemOperand()->getFlags());
11667 
11668     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
11669                                     DAG.getConstant(4, DL, MVT::i32));
11670     SDValue NewLD2 = DAG.getLoad(
11671         MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(),
11672         std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags());
11673 
11674     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
11675     if (DCI.DAG.getDataLayout().isBigEndian())
11676       std::swap (NewLD1, NewLD2);
11677     SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
11678     return Result;
11679   }
11680 
11681   return SDValue();
11682 }
11683 
11684 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
11685 /// ARMISD::VMOVDRR.  This is also used for BUILD_VECTORs with 2 operands.
11686 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
11687   // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
11688   SDValue Op0 = N->getOperand(0);
11689   SDValue Op1 = N->getOperand(1);
11690   if (Op0.getOpcode() == ISD::BITCAST)
11691     Op0 = Op0.getOperand(0);
11692   if (Op1.getOpcode() == ISD::BITCAST)
11693     Op1 = Op1.getOperand(0);
11694   if (Op0.getOpcode() == ARMISD::VMOVRRD &&
11695       Op0.getNode() == Op1.getNode() &&
11696       Op0.getResNo() == 0 && Op1.getResNo() == 1)
11697     return DAG.getNode(ISD::BITCAST, SDLoc(N),
11698                        N->getValueType(0), Op0.getOperand(0));
11699   return SDValue();
11700 }
11701 
11702 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
11703 /// are normal, non-volatile loads.  If so, it is profitable to bitcast an
11704 /// i64 vector to have f64 elements, since the value can then be loaded
11705 /// directly into a VFP register.
11706 static bool hasNormalLoadOperand(SDNode *N) {
11707   unsigned NumElts = N->getValueType(0).getVectorNumElements();
11708   for (unsigned i = 0; i < NumElts; ++i) {
11709     SDNode *Elt = N->getOperand(i).getNode();
11710     if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
11711       return true;
11712   }
11713   return false;
11714 }
11715 
11716 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
11717 /// ISD::BUILD_VECTOR.
11718 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
11719                                           TargetLowering::DAGCombinerInfo &DCI,
11720                                           const ARMSubtarget *Subtarget) {
11721   // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
11722   // VMOVRRD is introduced when legalizing i64 types.  It forces the i64 value
11723   // into a pair of GPRs, which is fine when the value is used as a scalar,
11724   // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
11725   SelectionDAG &DAG = DCI.DAG;
11726   if (N->getNumOperands() == 2)
11727     if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
11728       return RV;
11729 
11730   // Load i64 elements as f64 values so that type legalization does not split
11731   // them up into i32 values.
11732   EVT VT = N->getValueType(0);
11733   if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
11734     return SDValue();
11735   SDLoc dl(N);
11736   SmallVector<SDValue, 8> Ops;
11737   unsigned NumElts = VT.getVectorNumElements();
11738   for (unsigned i = 0; i < NumElts; ++i) {
11739     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
11740     Ops.push_back(V);
11741     // Make the DAGCombiner fold the bitcast.
11742     DCI.AddToWorklist(V.getNode());
11743   }
11744   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
11745   SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
11746   return DAG.getNode(ISD::BITCAST, dl, VT, BV);
11747 }
11748 
11749 /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
11750 static SDValue
11751 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
11752   // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
11753   // At that time, we may have inserted bitcasts from integer to float.
11754   // If these bitcasts have survived DAGCombine, change the lowering of this
11755   // BUILD_VECTOR in something more vector friendly, i.e., that does not
11756   // force to use floating point types.
11757 
11758   // Make sure we can change the type of the vector.
11759   // This is possible iff:
11760   // 1. The vector is only used in a bitcast to a integer type. I.e.,
11761   //    1.1. Vector is used only once.
11762   //    1.2. Use is a bit convert to an integer type.
11763   // 2. The size of its operands are 32-bits (64-bits are not legal).
11764   EVT VT = N->getValueType(0);
11765   EVT EltVT = VT.getVectorElementType();
11766 
11767   // Check 1.1. and 2.
11768   if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
11769     return SDValue();
11770 
11771   // By construction, the input type must be float.
11772   assert(EltVT == MVT::f32 && "Unexpected type!");
11773 
11774   // Check 1.2.
11775   SDNode *Use = *N->use_begin();
11776   if (Use->getOpcode() != ISD::BITCAST ||
11777       Use->getValueType(0).isFloatingPoint())
11778     return SDValue();
11779 
11780   // Check profitability.
11781   // Model is, if more than half of the relevant operands are bitcast from
11782   // i32, turn the build_vector into a sequence of insert_vector_elt.
11783   // Relevant operands are everything that is not statically
11784   // (i.e., at compile time) bitcasted.
11785   unsigned NumOfBitCastedElts = 0;
11786   unsigned NumElts = VT.getVectorNumElements();
11787   unsigned NumOfRelevantElts = NumElts;
11788   for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
11789     SDValue Elt = N->getOperand(Idx);
11790     if (Elt->getOpcode() == ISD::BITCAST) {
11791       // Assume only bit cast to i32 will go away.
11792       if (Elt->getOperand(0).getValueType() == MVT::i32)
11793         ++NumOfBitCastedElts;
11794     } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
11795       // Constants are statically casted, thus do not count them as
11796       // relevant operands.
11797       --NumOfRelevantElts;
11798   }
11799 
11800   // Check if more than half of the elements require a non-free bitcast.
11801   if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
11802     return SDValue();
11803 
11804   SelectionDAG &DAG = DCI.DAG;
11805   // Create the new vector type.
11806   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
11807   // Check if the type is legal.
11808   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11809   if (!TLI.isTypeLegal(VecVT))
11810     return SDValue();
11811 
11812   // Combine:
11813   // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
11814   // => BITCAST INSERT_VECTOR_ELT
11815   //                      (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
11816   //                      (BITCAST EN), N.
11817   SDValue Vec = DAG.getUNDEF(VecVT);
11818   SDLoc dl(N);
11819   for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
11820     SDValue V = N->getOperand(Idx);
11821     if (V.isUndef())
11822       continue;
11823     if (V.getOpcode() == ISD::BITCAST &&
11824         V->getOperand(0).getValueType() == MVT::i32)
11825       // Fold obvious case.
11826       V = V.getOperand(0);
11827     else {
11828       V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
11829       // Make the DAGCombiner fold the bitcasts.
11830       DCI.AddToWorklist(V.getNode());
11831     }
11832     SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
11833     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
11834   }
11835   Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
11836   // Make the DAGCombiner fold the bitcasts.
11837   DCI.AddToWorklist(Vec.getNode());
11838   return Vec;
11839 }
11840 
11841 /// PerformInsertEltCombine - Target-specific dag combine xforms for
11842 /// ISD::INSERT_VECTOR_ELT.
11843 static SDValue PerformInsertEltCombine(SDNode *N,
11844                                        TargetLowering::DAGCombinerInfo &DCI) {
11845   // Bitcast an i64 load inserted into a vector to f64.
11846   // Otherwise, the i64 value will be legalized to a pair of i32 values.
11847   EVT VT = N->getValueType(0);
11848   SDNode *Elt = N->getOperand(1).getNode();
11849   if (VT.getVectorElementType() != MVT::i64 ||
11850       !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
11851     return SDValue();
11852 
11853   SelectionDAG &DAG = DCI.DAG;
11854   SDLoc dl(N);
11855   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
11856                                  VT.getVectorNumElements());
11857   SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
11858   SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
11859   // Make the DAGCombiner fold the bitcasts.
11860   DCI.AddToWorklist(Vec.getNode());
11861   DCI.AddToWorklist(V.getNode());
11862   SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
11863                                Vec, V, N->getOperand(2));
11864   return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
11865 }
11866 
11867 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
11868 /// ISD::VECTOR_SHUFFLE.
11869 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
11870   // The LLVM shufflevector instruction does not require the shuffle mask
11871   // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
11872   // have that requirement.  When translating to ISD::VECTOR_SHUFFLE, if the
11873   // operands do not match the mask length, they are extended by concatenating
11874   // them with undef vectors.  That is probably the right thing for other
11875   // targets, but for NEON it is better to concatenate two double-register
11876   // size vector operands into a single quad-register size vector.  Do that
11877   // transformation here:
11878   //   shuffle(concat(v1, undef), concat(v2, undef)) ->
11879   //   shuffle(concat(v1, v2), undef)
11880   SDValue Op0 = N->getOperand(0);
11881   SDValue Op1 = N->getOperand(1);
11882   if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
11883       Op1.getOpcode() != ISD::CONCAT_VECTORS ||
11884       Op0.getNumOperands() != 2 ||
11885       Op1.getNumOperands() != 2)
11886     return SDValue();
11887   SDValue Concat0Op1 = Op0.getOperand(1);
11888   SDValue Concat1Op1 = Op1.getOperand(1);
11889   if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
11890     return SDValue();
11891   // Skip the transformation if any of the types are illegal.
11892   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11893   EVT VT = N->getValueType(0);
11894   if (!TLI.isTypeLegal(VT) ||
11895       !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
11896       !TLI.isTypeLegal(Concat1Op1.getValueType()))
11897     return SDValue();
11898 
11899   SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
11900                                   Op0.getOperand(0), Op1.getOperand(0));
11901   // Translate the shuffle mask.
11902   SmallVector<int, 16> NewMask;
11903   unsigned NumElts = VT.getVectorNumElements();
11904   unsigned HalfElts = NumElts/2;
11905   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
11906   for (unsigned n = 0; n < NumElts; ++n) {
11907     int MaskElt = SVN->getMaskElt(n);
11908     int NewElt = -1;
11909     if (MaskElt < (int)HalfElts)
11910       NewElt = MaskElt;
11911     else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
11912       NewElt = HalfElts + MaskElt - NumElts;
11913     NewMask.push_back(NewElt);
11914   }
11915   return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
11916                               DAG.getUNDEF(VT), NewMask);
11917 }
11918 
11919 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
11920 /// NEON load/store intrinsics, and generic vector load/stores, to merge
11921 /// base address updates.
11922 /// For generic load/stores, the memory type is assumed to be a vector.
11923 /// The caller is assumed to have checked legality.
11924 static SDValue CombineBaseUpdate(SDNode *N,
11925                                  TargetLowering::DAGCombinerInfo &DCI) {
11926   SelectionDAG &DAG = DCI.DAG;
11927   const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
11928                             N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
11929   const bool isStore = N->getOpcode() == ISD::STORE;
11930   const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
11931   SDValue Addr = N->getOperand(AddrOpIdx);
11932   MemSDNode *MemN = cast<MemSDNode>(N);
11933   SDLoc dl(N);
11934 
11935   // Search for a use of the address operand that is an increment.
11936   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
11937          UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
11938     SDNode *User = *UI;
11939     if (User->getOpcode() != ISD::ADD ||
11940         UI.getUse().getResNo() != Addr.getResNo())
11941       continue;
11942 
11943     // Check that the add is independent of the load/store.  Otherwise, folding
11944     // it would create a cycle. We can avoid searching through Addr as it's a
11945     // predecessor to both.
11946     SmallPtrSet<const SDNode *, 32> Visited;
11947     SmallVector<const SDNode *, 16> Worklist;
11948     Visited.insert(Addr.getNode());
11949     Worklist.push_back(N);
11950     Worklist.push_back(User);
11951     if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
11952         SDNode::hasPredecessorHelper(User, Visited, Worklist))
11953       continue;
11954 
11955     // Find the new opcode for the updating load/store.
11956     bool isLoadOp = true;
11957     bool isLaneOp = false;
11958     unsigned NewOpc = 0;
11959     unsigned NumVecs = 0;
11960     if (isIntrinsic) {
11961       unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
11962       switch (IntNo) {
11963       default: llvm_unreachable("unexpected intrinsic for Neon base update");
11964       case Intrinsic::arm_neon_vld1:     NewOpc = ARMISD::VLD1_UPD;
11965         NumVecs = 1; break;
11966       case Intrinsic::arm_neon_vld2:     NewOpc = ARMISD::VLD2_UPD;
11967         NumVecs = 2; break;
11968       case Intrinsic::arm_neon_vld3:     NewOpc = ARMISD::VLD3_UPD;
11969         NumVecs = 3; break;
11970       case Intrinsic::arm_neon_vld4:     NewOpc = ARMISD::VLD4_UPD;
11971         NumVecs = 4; break;
11972       case Intrinsic::arm_neon_vld2dup:
11973       case Intrinsic::arm_neon_vld3dup:
11974       case Intrinsic::arm_neon_vld4dup:
11975         // TODO: Support updating VLDxDUP nodes. For now, we just skip
11976         // combining base updates for such intrinsics.
11977         continue;
11978       case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
11979         NumVecs = 2; isLaneOp = true; break;
11980       case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
11981         NumVecs = 3; isLaneOp = true; break;
11982       case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
11983         NumVecs = 4; isLaneOp = true; break;
11984       case Intrinsic::arm_neon_vst1:     NewOpc = ARMISD::VST1_UPD;
11985         NumVecs = 1; isLoadOp = false; break;
11986       case Intrinsic::arm_neon_vst2:     NewOpc = ARMISD::VST2_UPD;
11987         NumVecs = 2; isLoadOp = false; break;
11988       case Intrinsic::arm_neon_vst3:     NewOpc = ARMISD::VST3_UPD;
11989         NumVecs = 3; isLoadOp = false; break;
11990       case Intrinsic::arm_neon_vst4:     NewOpc = ARMISD::VST4_UPD;
11991         NumVecs = 4; isLoadOp = false; break;
11992       case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
11993         NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
11994       case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
11995         NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
11996       case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
11997         NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
11998       }
11999     } else {
12000       isLaneOp = true;
12001       switch (N->getOpcode()) {
12002       default: llvm_unreachable("unexpected opcode for Neon base update");
12003       case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break;
12004       case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
12005       case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
12006       case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
12007       case ISD::LOAD:       NewOpc = ARMISD::VLD1_UPD;
12008         NumVecs = 1; isLaneOp = false; break;
12009       case ISD::STORE:      NewOpc = ARMISD::VST1_UPD;
12010         NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
12011       }
12012     }
12013 
12014     // Find the size of memory referenced by the load/store.
12015     EVT VecTy;
12016     if (isLoadOp) {
12017       VecTy = N->getValueType(0);
12018     } else if (isIntrinsic) {
12019       VecTy = N->getOperand(AddrOpIdx+1).getValueType();
12020     } else {
12021       assert(isStore && "Node has to be a load, a store, or an intrinsic!");
12022       VecTy = N->getOperand(1).getValueType();
12023     }
12024 
12025     unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
12026     if (isLaneOp)
12027       NumBytes /= VecTy.getVectorNumElements();
12028 
12029     // If the increment is a constant, it must match the memory ref size.
12030     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
12031     ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
12032     if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) {
12033       // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
12034       // separate instructions that make it harder to use a non-constant update.
12035       continue;
12036     }
12037 
12038     // OK, we found an ADD we can fold into the base update.
12039     // Now, create a _UPD node, taking care of not breaking alignment.
12040 
12041     EVT AlignedVecTy = VecTy;
12042     unsigned Alignment = MemN->getAlignment();
12043 
12044     // If this is a less-than-standard-aligned load/store, change the type to
12045     // match the standard alignment.
12046     // The alignment is overlooked when selecting _UPD variants; and it's
12047     // easier to introduce bitcasts here than fix that.
12048     // There are 3 ways to get to this base-update combine:
12049     // - intrinsics: they are assumed to be properly aligned (to the standard
12050     //   alignment of the memory type), so we don't need to do anything.
12051     // - ARMISD::VLDx nodes: they are only generated from the aforementioned
12052     //   intrinsics, so, likewise, there's nothing to do.
12053     // - generic load/store instructions: the alignment is specified as an
12054     //   explicit operand, rather than implicitly as the standard alignment
12055     //   of the memory type (like the intrisics).  We need to change the
12056     //   memory type to match the explicit alignment.  That way, we don't
12057     //   generate non-standard-aligned ARMISD::VLDx nodes.
12058     if (isa<LSBaseSDNode>(N)) {
12059       if (Alignment == 0)
12060         Alignment = 1;
12061       if (Alignment < VecTy.getScalarSizeInBits() / 8) {
12062         MVT EltTy = MVT::getIntegerVT(Alignment * 8);
12063         assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
12064         assert(!isLaneOp && "Unexpected generic load/store lane.");
12065         unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
12066         AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
12067       }
12068       // Don't set an explicit alignment on regular load/stores that we want
12069       // to transform to VLD/VST 1_UPD nodes.
12070       // This matches the behavior of regular load/stores, which only get an
12071       // explicit alignment if the MMO alignment is larger than the standard
12072       // alignment of the memory type.
12073       // Intrinsics, however, always get an explicit alignment, set to the
12074       // alignment of the MMO.
12075       Alignment = 1;
12076     }
12077 
12078     // Create the new updating load/store node.
12079     // First, create an SDVTList for the new updating node's results.
12080     EVT Tys[6];
12081     unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
12082     unsigned n;
12083     for (n = 0; n < NumResultVecs; ++n)
12084       Tys[n] = AlignedVecTy;
12085     Tys[n++] = MVT::i32;
12086     Tys[n] = MVT::Other;
12087     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
12088 
12089     // Then, gather the new node's operands.
12090     SmallVector<SDValue, 8> Ops;
12091     Ops.push_back(N->getOperand(0)); // incoming chain
12092     Ops.push_back(N->getOperand(AddrOpIdx));
12093     Ops.push_back(Inc);
12094 
12095     if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
12096       // Try to match the intrinsic's signature
12097       Ops.push_back(StN->getValue());
12098     } else {
12099       // Loads (and of course intrinsics) match the intrinsics' signature,
12100       // so just add all but the alignment operand.
12101       for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
12102         Ops.push_back(N->getOperand(i));
12103     }
12104 
12105     // For all node types, the alignment operand is always the last one.
12106     Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));
12107 
12108     // If this is a non-standard-aligned STORE, the penultimate operand is the
12109     // stored value.  Bitcast it to the aligned type.
12110     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
12111       SDValue &StVal = Ops[Ops.size()-2];
12112       StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
12113     }
12114 
12115     EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
12116     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
12117                                            MemN->getMemOperand());
12118 
12119     // Update the uses.
12120     SmallVector<SDValue, 5> NewResults;
12121     for (unsigned i = 0; i < NumResultVecs; ++i)
12122       NewResults.push_back(SDValue(UpdN.getNode(), i));
12123 
12124     // If this is an non-standard-aligned LOAD, the first result is the loaded
12125     // value.  Bitcast it to the expected result type.
12126     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
12127       SDValue &LdVal = NewResults[0];
12128       LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
12129     }
12130 
12131     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
12132     DCI.CombineTo(N, NewResults);
12133     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
12134 
12135     break;
12136   }
12137   return SDValue();
12138 }
12139 
12140 static SDValue PerformVLDCombine(SDNode *N,
12141                                  TargetLowering::DAGCombinerInfo &DCI) {
12142   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
12143     return SDValue();
12144 
12145   return CombineBaseUpdate(N, DCI);
12146 }
12147 
12148 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
12149 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
12150 /// are also VDUPLANEs.  If so, combine them to a vldN-dup operation and
12151 /// return true.
12152 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
12153   SelectionDAG &DAG = DCI.DAG;
12154   EVT VT = N->getValueType(0);
12155   // vldN-dup instructions only support 64-bit vectors for N > 1.
12156   if (!VT.is64BitVector())
12157     return false;
12158 
12159   // Check if the VDUPLANE operand is a vldN-dup intrinsic.
12160   SDNode *VLD = N->getOperand(0).getNode();
12161   if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
12162     return false;
12163   unsigned NumVecs = 0;
12164   unsigned NewOpc = 0;
12165   unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
12166   if (IntNo == Intrinsic::arm_neon_vld2lane) {
12167     NumVecs = 2;
12168     NewOpc = ARMISD::VLD2DUP;
12169   } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
12170     NumVecs = 3;
12171     NewOpc = ARMISD::VLD3DUP;
12172   } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
12173     NumVecs = 4;
12174     NewOpc = ARMISD::VLD4DUP;
12175   } else {
12176     return false;
12177   }
12178 
12179   // First check that all the vldN-lane uses are VDUPLANEs and that the lane
12180   // numbers match the load.
12181   unsigned VLDLaneNo =
12182     cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
12183   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
12184        UI != UE; ++UI) {
12185     // Ignore uses of the chain result.
12186     if (UI.getUse().getResNo() == NumVecs)
12187       continue;
12188     SDNode *User = *UI;
12189     if (User->getOpcode() != ARMISD::VDUPLANE ||
12190         VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
12191       return false;
12192   }
12193 
12194   // Create the vldN-dup node.
12195   EVT Tys[5];
12196   unsigned n;
12197   for (n = 0; n < NumVecs; ++n)
12198     Tys[n] = VT;
12199   Tys[n] = MVT::Other;
12200   SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1));
12201   SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
12202   MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
12203   SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
12204                                            Ops, VLDMemInt->getMemoryVT(),
12205                                            VLDMemInt->getMemOperand());
12206 
12207   // Update the uses.
12208   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
12209        UI != UE; ++UI) {
12210     unsigned ResNo = UI.getUse().getResNo();
12211     // Ignore uses of the chain result.
12212     if (ResNo == NumVecs)
12213       continue;
12214     SDNode *User = *UI;
12215     DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
12216   }
12217 
12218   // Now the vldN-lane intrinsic is dead except for its chain result.
12219   // Update uses of the chain.
12220   std::vector<SDValue> VLDDupResults;
12221   for (unsigned n = 0; n < NumVecs; ++n)
12222     VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
12223   VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
12224   DCI.CombineTo(VLD, VLDDupResults);
12225 
12226   return true;
12227 }
12228 
12229 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
12230 /// ARMISD::VDUPLANE.
12231 static SDValue PerformVDUPLANECombine(SDNode *N,
12232                                       TargetLowering::DAGCombinerInfo &DCI) {
12233   SDValue Op = N->getOperand(0);
12234 
12235   // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
12236   // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
12237   if (CombineVLDDUP(N, DCI))
12238     return SDValue(N, 0);
12239 
12240   // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
12241   // redundant.  Ignore bit_converts for now; element sizes are checked below.
12242   while (Op.getOpcode() == ISD::BITCAST)
12243     Op = Op.getOperand(0);
12244   if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
12245     return SDValue();
12246 
12247   // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
12248   unsigned EltSize = Op.getScalarValueSizeInBits();
12249   // The canonical VMOV for a zero vector uses a 32-bit element size.
12250   unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
12251   unsigned EltBits;
12252   if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
12253     EltSize = 8;
12254   EVT VT = N->getValueType(0);
12255   if (EltSize > VT.getScalarSizeInBits())
12256     return SDValue();
12257 
12258   return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
12259 }
12260 
12261 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
12262 static SDValue PerformVDUPCombine(SDNode *N,
12263                                   TargetLowering::DAGCombinerInfo &DCI,
12264                                   const ARMSubtarget *Subtarget) {
12265   SelectionDAG &DAG = DCI.DAG;
12266   SDValue Op = N->getOperand(0);
12267 
12268   if (!Subtarget->hasNEON())
12269     return SDValue();
12270 
12271   // Match VDUP(LOAD) -> VLD1DUP.
12272   // We match this pattern here rather than waiting for isel because the
12273   // transform is only legal for unindexed loads.
12274   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
12275   if (LD && Op.hasOneUse() && LD->isUnindexed() &&
12276       LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
12277     SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
12278                       DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) };
12279     SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
12280     SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys,
12281                                              Ops, LD->getMemoryVT(),
12282                                              LD->getMemOperand());
12283     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
12284     return VLDDup;
12285   }
12286 
12287   return SDValue();
12288 }
12289 
12290 static SDValue PerformLOADCombine(SDNode *N,
12291                                   TargetLowering::DAGCombinerInfo &DCI) {
12292   EVT VT = N->getValueType(0);
12293 
12294   // If this is a legal vector load, try to combine it into a VLD1_UPD.
12295   if (ISD::isNormalLoad(N) && VT.isVector() &&
12296       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
12297     return CombineBaseUpdate(N, DCI);
12298 
12299   return SDValue();
12300 }
12301 
12302 /// PerformSTORECombine - Target-specific dag combine xforms for
12303 /// ISD::STORE.
12304 static SDValue PerformSTORECombine(SDNode *N,
12305                                    TargetLowering::DAGCombinerInfo &DCI) {
12306   StoreSDNode *St = cast<StoreSDNode>(N);
12307   if (St->isVolatile())
12308     return SDValue();
12309 
12310   // Optimize trunc store (of multiple scalars) to shuffle and store.  First,
12311   // pack all of the elements in one place.  Next, store to memory in fewer
12312   // chunks.
12313   SDValue StVal = St->getValue();
12314   EVT VT = StVal.getValueType();
12315   if (St->isTruncatingStore() && VT.isVector()) {
12316     SelectionDAG &DAG = DCI.DAG;
12317     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12318     EVT StVT = St->getMemoryVT();
12319     unsigned NumElems = VT.getVectorNumElements();
12320     assert(StVT != VT && "Cannot truncate to the same type");
12321     unsigned FromEltSz = VT.getScalarSizeInBits();
12322     unsigned ToEltSz = StVT.getScalarSizeInBits();
12323 
12324     // From, To sizes and ElemCount must be pow of two
12325     if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
12326 
12327     // We are going to use the original vector elt for storing.
12328     // Accumulated smaller vector elements must be a multiple of the store size.
12329     if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue();
12330 
12331     unsigned SizeRatio  = FromEltSz / ToEltSz;
12332     assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
12333 
12334     // Create a type on which we perform the shuffle.
12335     EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
12336                                      NumElems*SizeRatio);
12337     assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
12338 
12339     SDLoc DL(St);
12340     SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
12341     SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
12342     for (unsigned i = 0; i < NumElems; ++i)
12343       ShuffleVec[i] = DAG.getDataLayout().isBigEndian()
12344                           ? (i + 1) * SizeRatio - 1
12345                           : i * SizeRatio;
12346 
12347     // Can't shuffle using an illegal type.
12348     if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
12349 
12350     SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec,
12351                                 DAG.getUNDEF(WideVec.getValueType()),
12352                                 ShuffleVec);
12353     // At this point all of the data is stored at the bottom of the
12354     // register. We now need to save it to mem.
12355 
12356     // Find the largest store unit
12357     MVT StoreType = MVT::i8;
12358     for (MVT Tp : MVT::integer_valuetypes()) {
12359       if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
12360         StoreType = Tp;
12361     }
12362     // Didn't find a legal store type.
12363     if (!TLI.isTypeLegal(StoreType))
12364       return SDValue();
12365 
12366     // Bitcast the original vector into a vector of store-size units
12367     EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
12368             StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
12369     assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
12370     SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
12371     SmallVector<SDValue, 8> Chains;
12372     SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
12373                                         TLI.getPointerTy(DAG.getDataLayout()));
12374     SDValue BasePtr = St->getBasePtr();
12375 
12376     // Perform one or more big stores into memory.
12377     unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits();
12378     for (unsigned I = 0; I < E; I++) {
12379       SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
12380                                    StoreType, ShuffWide,
12381                                    DAG.getIntPtrConstant(I, DL));
12382       SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
12383                                 St->getPointerInfo(), St->getAlignment(),
12384                                 St->getMemOperand()->getFlags());
12385       BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
12386                             Increment);
12387       Chains.push_back(Ch);
12388     }
12389     return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
12390   }
12391 
12392   if (!ISD::isNormalStore(St))
12393     return SDValue();
12394 
12395   // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
12396   // ARM stores of arguments in the same cache line.
12397   if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
12398       StVal.getNode()->hasOneUse()) {
12399     SelectionDAG  &DAG = DCI.DAG;
12400     bool isBigEndian = DAG.getDataLayout().isBigEndian();
12401     SDLoc DL(St);
12402     SDValue BasePtr = St->getBasePtr();
12403     SDValue NewST1 = DAG.getStore(
12404         St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
12405         BasePtr, St->getPointerInfo(), St->getAlignment(),
12406         St->getMemOperand()->getFlags());
12407 
12408     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
12409                                     DAG.getConstant(4, DL, MVT::i32));
12410     return DAG.getStore(NewST1.getValue(0), DL,
12411                         StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
12412                         OffsetPtr, St->getPointerInfo(),
12413                         std::min(4U, St->getAlignment() / 2),
12414                         St->getMemOperand()->getFlags());
12415   }
12416 
12417   if (StVal.getValueType() == MVT::i64 &&
12418       StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
12419 
12420     // Bitcast an i64 store extracted from a vector to f64.
12421     // Otherwise, the i64 value will be legalized to a pair of i32 values.
12422     SelectionDAG &DAG = DCI.DAG;
12423     SDLoc dl(StVal);
12424     SDValue IntVec = StVal.getOperand(0);
12425     EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
12426                                    IntVec.getValueType().getVectorNumElements());
12427     SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
12428     SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
12429                                  Vec, StVal.getOperand(1));
12430     dl = SDLoc(N);
12431     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
12432     // Make the DAGCombiner fold the bitcasts.
12433     DCI.AddToWorklist(Vec.getNode());
12434     DCI.AddToWorklist(ExtElt.getNode());
12435     DCI.AddToWorklist(V.getNode());
12436     return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
12437                         St->getPointerInfo(), St->getAlignment(),
12438                         St->getMemOperand()->getFlags(), St->getAAInfo());
12439   }
12440 
12441   // If this is a legal vector store, try to combine it into a VST1_UPD.
12442   if (ISD::isNormalStore(N) && VT.isVector() &&
12443       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
12444     return CombineBaseUpdate(N, DCI);
12445 
12446   return SDValue();
12447 }
12448 
12449 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
12450 /// can replace combinations of VMUL and VCVT (floating-point to integer)
12451 /// when the VMUL has a constant operand that is a power of 2.
12452 ///
12453 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
12454 ///  vmul.f32        d16, d17, d16
12455 ///  vcvt.s32.f32    d16, d16
12456 /// becomes:
12457 ///  vcvt.s32.f32    d16, d16, #3
12458 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
12459                                   const ARMSubtarget *Subtarget) {
12460   if (!Subtarget->hasNEON())
12461     return SDValue();
12462 
12463   SDValue Op = N->getOperand(0);
12464   if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
12465       Op.getOpcode() != ISD::FMUL)
12466     return SDValue();
12467 
12468   SDValue ConstVec = Op->getOperand(1);
12469   if (!isa<BuildVectorSDNode>(ConstVec))
12470     return SDValue();
12471 
12472   MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
12473   uint32_t FloatBits = FloatTy.getSizeInBits();
12474   MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
12475   uint32_t IntBits = IntTy.getSizeInBits();
12476   unsigned NumLanes = Op.getValueType().getVectorNumElements();
12477   if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
12478     // These instructions only exist converting from f32 to i32. We can handle
12479     // smaller integers by generating an extra truncate, but larger ones would
12480     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
12481     // these intructions only support v2i32/v4i32 types.
12482     return SDValue();
12483   }
12484 
12485   BitVector UndefElements;
12486   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
12487   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
12488   if (C == -1 || C == 0 || C > 32)
12489     return SDValue();
12490 
12491   SDLoc dl(N);
12492   bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
12493   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
12494     Intrinsic::arm_neon_vcvtfp2fxu;
12495   SDValue FixConv = DAG.getNode(
12496       ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
12497       DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
12498       DAG.getConstant(C, dl, MVT::i32));
12499 
12500   if (IntBits < FloatBits)
12501     FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
12502 
12503   return FixConv;
12504 }
12505 
12506 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
12507 /// can replace combinations of VCVT (integer to floating-point) and VDIV
12508 /// when the VDIV has a constant operand that is a power of 2.
12509 ///
12510 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
12511 ///  vcvt.f32.s32    d16, d16
12512 ///  vdiv.f32        d16, d17, d16
12513 /// becomes:
12514 ///  vcvt.f32.s32    d16, d16, #3
12515 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
12516                                   const ARMSubtarget *Subtarget) {
12517   if (!Subtarget->hasNEON())
12518     return SDValue();
12519 
12520   SDValue Op = N->getOperand(0);
12521   unsigned OpOpcode = Op.getNode()->getOpcode();
12522   if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
12523       (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
12524     return SDValue();
12525 
12526   SDValue ConstVec = N->getOperand(1);
12527   if (!isa<BuildVectorSDNode>(ConstVec))
12528     return SDValue();
12529 
12530   MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
12531   uint32_t FloatBits = FloatTy.getSizeInBits();
12532   MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
12533   uint32_t IntBits = IntTy.getSizeInBits();
12534   unsigned NumLanes = Op.getValueType().getVectorNumElements();
12535   if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
12536     // These instructions only exist converting from i32 to f32. We can handle
12537     // smaller integers by generating an extra extend, but larger ones would
12538     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
12539     // these intructions only support v2i32/v4i32 types.
12540     return SDValue();
12541   }
12542 
12543   BitVector UndefElements;
12544   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
12545   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
12546   if (C == -1 || C == 0 || C > 32)
12547     return SDValue();
12548 
12549   SDLoc dl(N);
12550   bool isSigned = OpOpcode == ISD::SINT_TO_FP;
12551   SDValue ConvInput = Op.getOperand(0);
12552   if (IntBits < FloatBits)
12553     ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
12554                             dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
12555                             ConvInput);
12556 
12557   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
12558     Intrinsic::arm_neon_vcvtfxu2fp;
12559   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
12560                      Op.getValueType(),
12561                      DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
12562                      ConvInput, DAG.getConstant(C, dl, MVT::i32));
12563 }
12564 
12565 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
12566 /// operand of a vector shift operation, where all the elements of the
12567 /// build_vector must have the same constant integer value.
12568 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
12569   // Ignore bit_converts.
12570   while (Op.getOpcode() == ISD::BITCAST)
12571     Op = Op.getOperand(0);
12572   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
12573   APInt SplatBits, SplatUndef;
12574   unsigned SplatBitSize;
12575   bool HasAnyUndefs;
12576   if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
12577                                       HasAnyUndefs, ElementBits) ||
12578       SplatBitSize > ElementBits)
12579     return false;
12580   Cnt = SplatBits.getSExtValue();
12581   return true;
12582 }
12583 
12584 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
12585 /// operand of a vector shift left operation.  That value must be in the range:
12586 ///   0 <= Value < ElementBits for a left shift; or
12587 ///   0 <= Value <= ElementBits for a long left shift.
12588 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
12589   assert(VT.isVector() && "vector shift count is not a vector type");
12590   int64_t ElementBits = VT.getScalarSizeInBits();
12591   if (! getVShiftImm(Op, ElementBits, Cnt))
12592     return false;
12593   return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
12594 }
12595 
12596 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
12597 /// operand of a vector shift right operation.  For a shift opcode, the value
12598 /// is positive, but for an intrinsic the value count must be negative. The
12599 /// absolute value must be in the range:
12600 ///   1 <= |Value| <= ElementBits for a right shift; or
12601 ///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
12602 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
12603                          int64_t &Cnt) {
12604   assert(VT.isVector() && "vector shift count is not a vector type");
12605   int64_t ElementBits = VT.getScalarSizeInBits();
12606   if (! getVShiftImm(Op, ElementBits, Cnt))
12607     return false;
12608   if (!isIntrinsic)
12609     return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
12610   if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) {
12611     Cnt = -Cnt;
12612     return true;
12613   }
12614   return false;
12615 }
12616 
12617 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
12618 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
12619   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
12620   switch (IntNo) {
12621   default:
12622     // Don't do anything for most intrinsics.
12623     break;
12624 
12625   // Vector shifts: check for immediate versions and lower them.
12626   // Note: This is done during DAG combining instead of DAG legalizing because
12627   // the build_vectors for 64-bit vector element shift counts are generally
12628   // not legal, and it is hard to see their values after they get legalized to
12629   // loads from a constant pool.
12630   case Intrinsic::arm_neon_vshifts:
12631   case Intrinsic::arm_neon_vshiftu:
12632   case Intrinsic::arm_neon_vrshifts:
12633   case Intrinsic::arm_neon_vrshiftu:
12634   case Intrinsic::arm_neon_vrshiftn:
12635   case Intrinsic::arm_neon_vqshifts:
12636   case Intrinsic::arm_neon_vqshiftu:
12637   case Intrinsic::arm_neon_vqshiftsu:
12638   case Intrinsic::arm_neon_vqshiftns:
12639   case Intrinsic::arm_neon_vqshiftnu:
12640   case Intrinsic::arm_neon_vqshiftnsu:
12641   case Intrinsic::arm_neon_vqrshiftns:
12642   case Intrinsic::arm_neon_vqrshiftnu:
12643   case Intrinsic::arm_neon_vqrshiftnsu: {
12644     EVT VT = N->getOperand(1).getValueType();
12645     int64_t Cnt;
12646     unsigned VShiftOpc = 0;
12647 
12648     switch (IntNo) {
12649     case Intrinsic::arm_neon_vshifts:
12650     case Intrinsic::arm_neon_vshiftu:
12651       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
12652         VShiftOpc = ARMISD::VSHL;
12653         break;
12654       }
12655       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
12656         VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
12657                      ARMISD::VSHRs : ARMISD::VSHRu);
12658         break;
12659       }
12660       return SDValue();
12661 
12662     case Intrinsic::arm_neon_vrshifts:
12663     case Intrinsic::arm_neon_vrshiftu:
12664       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
12665         break;
12666       return SDValue();
12667 
12668     case Intrinsic::arm_neon_vqshifts:
12669     case Intrinsic::arm_neon_vqshiftu:
12670       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
12671         break;
12672       return SDValue();
12673 
12674     case Intrinsic::arm_neon_vqshiftsu:
12675       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
12676         break;
12677       llvm_unreachable("invalid shift count for vqshlu intrinsic");
12678 
12679     case Intrinsic::arm_neon_vrshiftn:
12680     case Intrinsic::arm_neon_vqshiftns:
12681     case Intrinsic::arm_neon_vqshiftnu:
12682     case Intrinsic::arm_neon_vqshiftnsu:
12683     case Intrinsic::arm_neon_vqrshiftns:
12684     case Intrinsic::arm_neon_vqrshiftnu:
12685     case Intrinsic::arm_neon_vqrshiftnsu:
12686       // Narrowing shifts require an immediate right shift.
12687       if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
12688         break;
12689       llvm_unreachable("invalid shift count for narrowing vector shift "
12690                        "intrinsic");
12691 
12692     default:
12693       llvm_unreachable("unhandled vector shift");
12694     }
12695 
12696     switch (IntNo) {
12697     case Intrinsic::arm_neon_vshifts:
12698     case Intrinsic::arm_neon_vshiftu:
12699       // Opcode already set above.
12700       break;
12701     case Intrinsic::arm_neon_vrshifts:
12702       VShiftOpc = ARMISD::VRSHRs; break;
12703     case Intrinsic::arm_neon_vrshiftu:
12704       VShiftOpc = ARMISD::VRSHRu; break;
12705     case Intrinsic::arm_neon_vrshiftn:
12706       VShiftOpc = ARMISD::VRSHRN; break;
12707     case Intrinsic::arm_neon_vqshifts:
12708       VShiftOpc = ARMISD::VQSHLs; break;
12709     case Intrinsic::arm_neon_vqshiftu:
12710       VShiftOpc = ARMISD::VQSHLu; break;
12711     case Intrinsic::arm_neon_vqshiftsu:
12712       VShiftOpc = ARMISD::VQSHLsu; break;
12713     case Intrinsic::arm_neon_vqshiftns:
12714       VShiftOpc = ARMISD::VQSHRNs; break;
12715     case Intrinsic::arm_neon_vqshiftnu:
12716       VShiftOpc = ARMISD::VQSHRNu; break;
12717     case Intrinsic::arm_neon_vqshiftnsu:
12718       VShiftOpc = ARMISD::VQSHRNsu; break;
12719     case Intrinsic::arm_neon_vqrshiftns:
12720       VShiftOpc = ARMISD::VQRSHRNs; break;
12721     case Intrinsic::arm_neon_vqrshiftnu:
12722       VShiftOpc = ARMISD::VQRSHRNu; break;
12723     case Intrinsic::arm_neon_vqrshiftnsu:
12724       VShiftOpc = ARMISD::VQRSHRNsu; break;
12725     }
12726 
12727     SDLoc dl(N);
12728     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
12729                        N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
12730   }
12731 
12732   case Intrinsic::arm_neon_vshiftins: {
12733     EVT VT = N->getOperand(1).getValueType();
12734     int64_t Cnt;
12735     unsigned VShiftOpc = 0;
12736 
12737     if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
12738       VShiftOpc = ARMISD::VSLI;
12739     else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
12740       VShiftOpc = ARMISD::VSRI;
12741     else {
12742       llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
12743     }
12744 
12745     SDLoc dl(N);
12746     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
12747                        N->getOperand(1), N->getOperand(2),
12748                        DAG.getConstant(Cnt, dl, MVT::i32));
12749   }
12750 
12751   case Intrinsic::arm_neon_vqrshifts:
12752   case Intrinsic::arm_neon_vqrshiftu:
12753     // No immediate versions of these to check for.
12754     break;
12755   }
12756 
12757   return SDValue();
12758 }
12759 
12760 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
12761 /// lowers them.  As with the vector shift intrinsics, this is done during DAG
12762 /// combining instead of DAG legalizing because the build_vectors for 64-bit
12763 /// vector element shift counts are generally not legal, and it is hard to see
12764 /// their values after they get legalized to loads from a constant pool.
12765 static SDValue PerformShiftCombine(SDNode *N,
12766                                    TargetLowering::DAGCombinerInfo &DCI,
12767                                    const ARMSubtarget *ST) {
12768   SelectionDAG &DAG = DCI.DAG;
12769   EVT VT = N->getValueType(0);
12770   if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
12771     // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
12772     // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
12773     SDValue N1 = N->getOperand(1);
12774     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
12775       SDValue N0 = N->getOperand(0);
12776       if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
12777           DAG.MaskedValueIsZero(N0.getOperand(0),
12778                                 APInt::getHighBitsSet(32, 16)))
12779         return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
12780     }
12781   }
12782 
12783   if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 &&
12784       N->getOperand(0)->getOpcode() == ISD::AND &&
12785       N->getOperand(0)->hasOneUse()) {
12786     if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
12787       return SDValue();
12788     // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't
12789     // usually show up because instcombine prefers to canonicalize it to
12790     // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come
12791     // out of GEP lowering in some cases.
12792     SDValue N0 = N->getOperand(0);
12793     ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
12794     if (!ShiftAmtNode)
12795       return SDValue();
12796     uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue());
12797     ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1));
12798     if (!AndMaskNode)
12799       return SDValue();
12800     uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue());
12801     // Don't transform uxtb/uxth.
12802     if (AndMask == 255 || AndMask == 65535)
12803       return SDValue();
12804     if (isMask_32(AndMask)) {
12805       uint32_t MaskedBits = countLeadingZeros(AndMask);
12806       if (MaskedBits > ShiftAmt) {
12807         SDLoc DL(N);
12808         SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
12809                                   DAG.getConstant(MaskedBits, DL, MVT::i32));
12810         return DAG.getNode(
12811             ISD::SRL, DL, MVT::i32, SHL,
12812             DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32));
12813       }
12814     }
12815   }
12816 
12817   // Nothing to be done for scalar shifts.
12818   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12819   if (!VT.isVector() || !TLI.isTypeLegal(VT))
12820     return SDValue();
12821 
12822   assert(ST->hasNEON() && "unexpected vector shift");
12823   int64_t Cnt;
12824 
12825   switch (N->getOpcode()) {
12826   default: llvm_unreachable("unexpected shift opcode");
12827 
12828   case ISD::SHL:
12829     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
12830       SDLoc dl(N);
12831       return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0),
12832                          DAG.getConstant(Cnt, dl, MVT::i32));
12833     }
12834     break;
12835 
12836   case ISD::SRA:
12837   case ISD::SRL:
12838     if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
12839       unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
12840                             ARMISD::VSHRs : ARMISD::VSHRu);
12841       SDLoc dl(N);
12842       return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
12843                          DAG.getConstant(Cnt, dl, MVT::i32));
12844     }
12845   }
12846   return SDValue();
12847 }
12848 
12849 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
12850 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
12851 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
12852                                     const ARMSubtarget *ST) {
12853   SDValue N0 = N->getOperand(0);
12854 
12855   // Check for sign- and zero-extensions of vector extract operations of 8-
12856   // and 16-bit vector elements.  NEON supports these directly.  They are
12857   // handled during DAG combining because type legalization will promote them
12858   // to 32-bit types and it is messy to recognize the operations after that.
12859   if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
12860     SDValue Vec = N0.getOperand(0);
12861     SDValue Lane = N0.getOperand(1);
12862     EVT VT = N->getValueType(0);
12863     EVT EltVT = N0.getValueType();
12864     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12865 
12866     if (VT == MVT::i32 &&
12867         (EltVT == MVT::i8 || EltVT == MVT::i16) &&
12868         TLI.isTypeLegal(Vec.getValueType()) &&
12869         isa<ConstantSDNode>(Lane)) {
12870 
12871       unsigned Opc = 0;
12872       switch (N->getOpcode()) {
12873       default: llvm_unreachable("unexpected opcode");
12874       case ISD::SIGN_EXTEND:
12875         Opc = ARMISD::VGETLANEs;
12876         break;
12877       case ISD::ZERO_EXTEND:
12878       case ISD::ANY_EXTEND:
12879         Opc = ARMISD::VGETLANEu;
12880         break;
12881       }
12882       return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
12883     }
12884   }
12885 
12886   return SDValue();
12887 }
12888 
12889 static const APInt *isPowerOf2Constant(SDValue V) {
12890   ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
12891   if (!C)
12892     return nullptr;
12893   const APInt *CV = &C->getAPIntValue();
12894   return CV->isPowerOf2() ? CV : nullptr;
12895 }
12896 
12897 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
12898   // If we have a CMOV, OR and AND combination such as:
12899   //   if (x & CN)
12900   //     y |= CM;
12901   //
12902   // And:
12903   //   * CN is a single bit;
12904   //   * All bits covered by CM are known zero in y
12905   //
12906   // Then we can convert this into a sequence of BFI instructions. This will
12907   // always be a win if CM is a single bit, will always be no worse than the
12908   // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
12909   // three bits (due to the extra IT instruction).
12910 
12911   SDValue Op0 = CMOV->getOperand(0);
12912   SDValue Op1 = CMOV->getOperand(1);
12913   auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
12914   auto CC = CCNode->getAPIntValue().getLimitedValue();
12915   SDValue CmpZ = CMOV->getOperand(4);
12916 
12917   // The compare must be against zero.
12918   if (!isNullConstant(CmpZ->getOperand(1)))
12919     return SDValue();
12920 
12921   assert(CmpZ->getOpcode() == ARMISD::CMPZ);
12922   SDValue And = CmpZ->getOperand(0);
12923   if (And->getOpcode() != ISD::AND)
12924     return SDValue();
12925   const APInt *AndC = isPowerOf2Constant(And->getOperand(1));
12926   if (!AndC)
12927     return SDValue();
12928   SDValue X = And->getOperand(0);
12929 
12930   if (CC == ARMCC::EQ) {
12931     // We're performing an "equal to zero" compare. Swap the operands so we
12932     // canonicalize on a "not equal to zero" compare.
12933     std::swap(Op0, Op1);
12934   } else {
12935     assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
12936   }
12937 
12938   if (Op1->getOpcode() != ISD::OR)
12939     return SDValue();
12940 
12941   ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
12942   if (!OrC)
12943     return SDValue();
12944   SDValue Y = Op1->getOperand(0);
12945 
12946   if (Op0 != Y)
12947     return SDValue();
12948 
12949   // Now, is it profitable to continue?
12950   APInt OrCI = OrC->getAPIntValue();
12951   unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
12952   if (OrCI.countPopulation() > Heuristic)
12953     return SDValue();
12954 
12955   // Lastly, can we determine that the bits defined by OrCI
12956   // are zero in Y?
12957   KnownBits Known = DAG.computeKnownBits(Y);
12958   if ((OrCI & Known.Zero) != OrCI)
12959     return SDValue();
12960 
12961   // OK, we can do the combine.
12962   SDValue V = Y;
12963   SDLoc dl(X);
12964   EVT VT = X.getValueType();
12965   unsigned BitInX = AndC->logBase2();
12966 
12967   if (BitInX != 0) {
12968     // We must shift X first.
12969     X = DAG.getNode(ISD::SRL, dl, VT, X,
12970                     DAG.getConstant(BitInX, dl, VT));
12971   }
12972 
12973   for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
12974        BitInY < NumActiveBits; ++BitInY) {
12975     if (OrCI[BitInY] == 0)
12976       continue;
12977     APInt Mask(VT.getSizeInBits(), 0);
12978     Mask.setBit(BitInY);
12979     V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
12980                     // Confusingly, the operand is an *inverted* mask.
12981                     DAG.getConstant(~Mask, dl, VT));
12982   }
12983 
12984   return V;
12985 }
12986 
12987 static SDValue PerformHWLoopCombine(SDNode *N,
12988                                     TargetLowering::DAGCombinerInfo &DCI,
12989                                     const ARMSubtarget *ST) {
12990   // Look for (brcond (xor test.set.loop.iterations, -1)
12991   SDValue CC = N->getOperand(1);
12992   unsigned Opc = CC->getOpcode();
12993   SDValue Int;
12994 
12995   if ((Opc == ISD::XOR || Opc == ISD::SETCC) &&
12996       (CC->getOperand(0)->getOpcode() == ISD::INTRINSIC_W_CHAIN)) {
12997 
12998     assert((isa<ConstantSDNode>(CC->getOperand(1)) &&
12999             cast<ConstantSDNode>(CC->getOperand(1))->isOne()) &&
13000             "Expected to compare against 1");
13001 
13002     Int = CC->getOperand(0);
13003   } else if (CC->getOpcode() == ISD::INTRINSIC_W_CHAIN)
13004     Int = CC;
13005   else
13006     return SDValue();
13007 
13008   unsigned IntOp = cast<ConstantSDNode>(Int.getOperand(1))->getZExtValue();
13009   if (IntOp != Intrinsic::test_set_loop_iterations)
13010     return SDValue();
13011 
13012   SDLoc dl(Int);
13013   SDValue Chain = N->getOperand(0);
13014   SDValue Elements = Int.getOperand(2);
13015   SDValue ExitBlock = N->getOperand(2);
13016 
13017   // TODO: Once we start supporting tail predication, we can add another
13018   // operand to WLS for the number of elements processed in a vector loop.
13019 
13020   SDValue Ops[] = { Chain, Elements, ExitBlock };
13021   SDValue Res = DCI.DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
13022   DCI.DAG.ReplaceAllUsesOfValueWith(Int.getValue(1), Int.getOperand(0));
13023   return Res;
13024 }
13025 
13026 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
13027 SDValue
13028 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
13029   SDValue Cmp = N->getOperand(4);
13030   if (Cmp.getOpcode() != ARMISD::CMPZ)
13031     // Only looking at NE cases.
13032     return SDValue();
13033 
13034   EVT VT = N->getValueType(0);
13035   SDLoc dl(N);
13036   SDValue LHS = Cmp.getOperand(0);
13037   SDValue RHS = Cmp.getOperand(1);
13038   SDValue Chain = N->getOperand(0);
13039   SDValue BB = N->getOperand(1);
13040   SDValue ARMcc = N->getOperand(2);
13041   ARMCC::CondCodes CC =
13042     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
13043 
13044   // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
13045   // -> (brcond Chain BB CC CPSR Cmp)
13046   if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
13047       LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
13048       LHS->getOperand(0)->hasOneUse()) {
13049     auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
13050     auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
13051     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
13052     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
13053     if ((LHS00C && LHS00C->getZExtValue() == 0) &&
13054         (LHS01C && LHS01C->getZExtValue() == 1) &&
13055         (LHS1C && LHS1C->getZExtValue() == 1) &&
13056         (RHSC && RHSC->getZExtValue() == 0)) {
13057       return DAG.getNode(
13058           ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
13059           LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
13060     }
13061   }
13062 
13063   return SDValue();
13064 }
13065 
13066 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
13067 SDValue
13068 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
13069   SDValue Cmp = N->getOperand(4);
13070   if (Cmp.getOpcode() != ARMISD::CMPZ)
13071     // Only looking at EQ and NE cases.
13072     return SDValue();
13073 
13074   EVT VT = N->getValueType(0);
13075   SDLoc dl(N);
13076   SDValue LHS = Cmp.getOperand(0);
13077   SDValue RHS = Cmp.getOperand(1);
13078   SDValue FalseVal = N->getOperand(0);
13079   SDValue TrueVal = N->getOperand(1);
13080   SDValue ARMcc = N->getOperand(2);
13081   ARMCC::CondCodes CC =
13082     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
13083 
13084   // BFI is only available on V6T2+.
13085   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
13086     SDValue R = PerformCMOVToBFICombine(N, DAG);
13087     if (R)
13088       return R;
13089   }
13090 
13091   // Simplify
13092   //   mov     r1, r0
13093   //   cmp     r1, x
13094   //   mov     r0, y
13095   //   moveq   r0, x
13096   // to
13097   //   cmp     r0, x
13098   //   movne   r0, y
13099   //
13100   //   mov     r1, r0
13101   //   cmp     r1, x
13102   //   mov     r0, x
13103   //   movne   r0, y
13104   // to
13105   //   cmp     r0, x
13106   //   movne   r0, y
13107   /// FIXME: Turn this into a target neutral optimization?
13108   SDValue Res;
13109   if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
13110     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
13111                       N->getOperand(3), Cmp);
13112   } else if (CC == ARMCC::EQ && TrueVal == RHS) {
13113     SDValue ARMcc;
13114     SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
13115     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
13116                       N->getOperand(3), NewCmp);
13117   }
13118 
13119   // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
13120   // -> (cmov F T CC CPSR Cmp)
13121   if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
13122     auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
13123     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
13124     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
13125     if ((LHS0C && LHS0C->getZExtValue() == 0) &&
13126         (LHS1C && LHS1C->getZExtValue() == 1) &&
13127         (RHSC && RHSC->getZExtValue() == 0)) {
13128       return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
13129                          LHS->getOperand(2), LHS->getOperand(3),
13130                          LHS->getOperand(4));
13131     }
13132   }
13133 
13134   if (!VT.isInteger())
13135       return SDValue();
13136 
13137   // Materialize a boolean comparison for integers so we can avoid branching.
13138   if (isNullConstant(FalseVal)) {
13139     if (CC == ARMCC::EQ && isOneConstant(TrueVal)) {
13140       if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) {
13141         // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it
13142         // right 5 bits will make that 32 be 1, otherwise it will be 0.
13143         // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5
13144         SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
13145         Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub),
13146                           DAG.getConstant(5, dl, MVT::i32));
13147       } else {
13148         // CMOV 0, 1, ==, (CMPZ x, y) ->
13149         //     (ADDCARRY (SUB x, y), t:0, t:1)
13150         // where t = (SUBCARRY 0, (SUB x, y), 0)
13151         //
13152         // The SUBCARRY computes 0 - (x - y) and this will give a borrow when
13153         // x != y. In other words, a carry C == 1 when x == y, C == 0
13154         // otherwise.
13155         // The final ADDCARRY computes
13156         //     x - y + (0 - (x - y)) + C == C
13157         SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
13158         SDVTList VTs = DAG.getVTList(VT, MVT::i32);
13159         SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub);
13160         // ISD::SUBCARRY returns a borrow but we want the carry here
13161         // actually.
13162         SDValue Carry =
13163             DAG.getNode(ISD::SUB, dl, MVT::i32,
13164                         DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1));
13165         Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry);
13166       }
13167     } else if (CC == ARMCC::NE && !isNullConstant(RHS) &&
13168                (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) {
13169       // This seems pointless but will allow us to combine it further below.
13170       // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
13171       SDValue Sub =
13172           DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
13173       SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
13174                                           Sub.getValue(1), SDValue());
13175       Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc,
13176                         N->getOperand(3), CPSRGlue.getValue(1));
13177       FalseVal = Sub;
13178     }
13179   } else if (isNullConstant(TrueVal)) {
13180     if (CC == ARMCC::EQ && !isNullConstant(RHS) &&
13181         (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) {
13182       // This seems pointless but will allow us to combine it further below
13183       // Note that we change == for != as this is the dual for the case above.
13184       // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
13185       SDValue Sub =
13186           DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
13187       SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
13188                                           Sub.getValue(1), SDValue());
13189       Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal,
13190                         DAG.getConstant(ARMCC::NE, dl, MVT::i32),
13191                         N->getOperand(3), CPSRGlue.getValue(1));
13192       FalseVal = Sub;
13193     }
13194   }
13195 
13196   // On Thumb1, the DAG above may be further combined if z is a power of 2
13197   // (z == 2 ^ K).
13198   // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 ->
13199   // t1 = (USUBO (SUB x, y), 1)
13200   // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1)
13201   // Result = if K != 0 then (SHL t2:0, K) else t2:0
13202   //
13203   // This also handles the special case of comparing against zero; it's
13204   // essentially, the same pattern, except there's no SUBS:
13205   // CMOV x, z, !=, (CMPZ x, 0) ->
13206   // t1 = (USUBO x, 1)
13207   // t2 = (SUBCARRY x, t1:0, t1:1)
13208   // Result = if K != 0 then (SHL t2:0, K) else t2:0
13209   const APInt *TrueConst;
13210   if (Subtarget->isThumb1Only() && CC == ARMCC::NE &&
13211       ((FalseVal.getOpcode() == ARMISD::SUBS &&
13212         FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) ||
13213        (FalseVal == LHS && isNullConstant(RHS))) &&
13214       (TrueConst = isPowerOf2Constant(TrueVal))) {
13215     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
13216     unsigned ShiftAmount = TrueConst->logBase2();
13217     if (ShiftAmount)
13218       TrueVal = DAG.getConstant(1, dl, VT);
13219     SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal);
13220     Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1));
13221 
13222     if (ShiftAmount)
13223       Res = DAG.getNode(ISD::SHL, dl, VT, Res,
13224                         DAG.getConstant(ShiftAmount, dl, MVT::i32));
13225   }
13226 
13227   if (Res.getNode()) {
13228     KnownBits Known = DAG.computeKnownBits(SDValue(N,0));
13229     // Capture demanded bits information that would be otherwise lost.
13230     if (Known.Zero == 0xfffffffe)
13231       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
13232                         DAG.getValueType(MVT::i1));
13233     else if (Known.Zero == 0xffffff00)
13234       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
13235                         DAG.getValueType(MVT::i8));
13236     else if (Known.Zero == 0xffff0000)
13237       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
13238                         DAG.getValueType(MVT::i16));
13239   }
13240 
13241   return Res;
13242 }
13243 
13244 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
13245                                              DAGCombinerInfo &DCI) const {
13246   switch (N->getOpcode()) {
13247   default: break;
13248   case ISD::ABS:        return PerformABSCombine(N, DCI, Subtarget);
13249   case ARMISD::ADDE:    return PerformADDECombine(N, DCI, Subtarget);
13250   case ARMISD::UMLAL:   return PerformUMLALCombine(N, DCI.DAG, Subtarget);
13251   case ISD::ADD:        return PerformADDCombine(N, DCI, Subtarget);
13252   case ISD::SUB:        return PerformSUBCombine(N, DCI);
13253   case ISD::MUL:        return PerformMULCombine(N, DCI, Subtarget);
13254   case ISD::OR:         return PerformORCombine(N, DCI, Subtarget);
13255   case ISD::XOR:        return PerformXORCombine(N, DCI, Subtarget);
13256   case ISD::AND:        return PerformANDCombine(N, DCI, Subtarget);
13257   case ISD::BRCOND:     return PerformHWLoopCombine(N, DCI, Subtarget);
13258   case ARMISD::ADDC:
13259   case ARMISD::SUBC:    return PerformAddcSubcCombine(N, DCI, Subtarget);
13260   case ARMISD::SUBE:    return PerformAddeSubeCombine(N, DCI, Subtarget);
13261   case ARMISD::BFI:     return PerformBFICombine(N, DCI);
13262   case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
13263   case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
13264   case ISD::STORE:      return PerformSTORECombine(N, DCI);
13265   case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
13266   case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
13267   case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
13268   case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
13269   case ARMISD::VDUP: return PerformVDUPCombine(N, DCI, Subtarget);
13270   case ISD::FP_TO_SINT:
13271   case ISD::FP_TO_UINT:
13272     return PerformVCVTCombine(N, DCI.DAG, Subtarget);
13273   case ISD::FDIV:
13274     return PerformVDIVCombine(N, DCI.DAG, Subtarget);
13275   case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
13276   case ISD::SHL:
13277   case ISD::SRA:
13278   case ISD::SRL:
13279     return PerformShiftCombine(N, DCI, Subtarget);
13280   case ISD::SIGN_EXTEND:
13281   case ISD::ZERO_EXTEND:
13282   case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
13283   case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
13284   case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
13285   case ISD::LOAD:       return PerformLOADCombine(N, DCI);
13286   case ARMISD::VLD1DUP:
13287   case ARMISD::VLD2DUP:
13288   case ARMISD::VLD3DUP:
13289   case ARMISD::VLD4DUP:
13290     return PerformVLDCombine(N, DCI);
13291   case ARMISD::BUILD_VECTOR:
13292     return PerformARMBUILD_VECTORCombine(N, DCI);
13293   case ARMISD::SMULWB: {
13294     unsigned BitWidth = N->getValueType(0).getSizeInBits();
13295     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
13296     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
13297       return SDValue();
13298     break;
13299   }
13300   case ARMISD::SMULWT: {
13301     unsigned BitWidth = N->getValueType(0).getSizeInBits();
13302     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
13303     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
13304       return SDValue();
13305     break;
13306   }
13307   case ARMISD::SMLALBB: {
13308     unsigned BitWidth = N->getValueType(0).getSizeInBits();
13309     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
13310     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
13311         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
13312       return SDValue();
13313     break;
13314   }
13315   case ARMISD::SMLALBT: {
13316     unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits();
13317     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
13318     unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits();
13319     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
13320     if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) ||
13321         (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI)))
13322       return SDValue();
13323     break;
13324   }
13325   case ARMISD::SMLALTB: {
13326     unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits();
13327     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
13328     unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits();
13329     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
13330     if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) ||
13331         (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI)))
13332       return SDValue();
13333     break;
13334   }
13335   case ARMISD::SMLALTT: {
13336     unsigned BitWidth = N->getValueType(0).getSizeInBits();
13337     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
13338     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
13339         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
13340       return SDValue();
13341     break;
13342   }
13343   case ISD::INTRINSIC_VOID:
13344   case ISD::INTRINSIC_W_CHAIN:
13345     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
13346     case Intrinsic::arm_neon_vld1:
13347     case Intrinsic::arm_neon_vld1x2:
13348     case Intrinsic::arm_neon_vld1x3:
13349     case Intrinsic::arm_neon_vld1x4:
13350     case Intrinsic::arm_neon_vld2:
13351     case Intrinsic::arm_neon_vld3:
13352     case Intrinsic::arm_neon_vld4:
13353     case Intrinsic::arm_neon_vld2lane:
13354     case Intrinsic::arm_neon_vld3lane:
13355     case Intrinsic::arm_neon_vld4lane:
13356     case Intrinsic::arm_neon_vld2dup:
13357     case Intrinsic::arm_neon_vld3dup:
13358     case Intrinsic::arm_neon_vld4dup:
13359     case Intrinsic::arm_neon_vst1:
13360     case Intrinsic::arm_neon_vst1x2:
13361     case Intrinsic::arm_neon_vst1x3:
13362     case Intrinsic::arm_neon_vst1x4:
13363     case Intrinsic::arm_neon_vst2:
13364     case Intrinsic::arm_neon_vst3:
13365     case Intrinsic::arm_neon_vst4:
13366     case Intrinsic::arm_neon_vst2lane:
13367     case Intrinsic::arm_neon_vst3lane:
13368     case Intrinsic::arm_neon_vst4lane:
13369       return PerformVLDCombine(N, DCI);
13370     default: break;
13371     }
13372     break;
13373   }
13374   return SDValue();
13375 }
13376 
13377 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
13378                                                           EVT VT) const {
13379   return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
13380 }
13381 
13382 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
13383                                                        unsigned Alignment,
13384                                                        MachineMemOperand::Flags,
13385                                                        bool *Fast) const {
13386   // Depends what it gets converted into if the type is weird.
13387   if (!VT.isSimple())
13388     return false;
13389 
13390   // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
13391   bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
13392   auto Ty = VT.getSimpleVT().SimpleTy;
13393 
13394   if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) {
13395     // Unaligned access can use (for example) LRDB, LRDH, LDR
13396     if (AllowsUnaligned) {
13397       if (Fast)
13398         *Fast = Subtarget->hasV7Ops();
13399       return true;
13400     }
13401   }
13402 
13403   if (Ty == MVT::f64 || Ty == MVT::v2f64) {
13404     // For any little-endian targets with neon, we can support unaligned ld/st
13405     // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
13406     // A big-endian target may also explicitly support unaligned accesses
13407     if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
13408       if (Fast)
13409         *Fast = true;
13410       return true;
13411     }
13412   }
13413 
13414   if (!Subtarget->hasMVEIntegerOps())
13415     return false;
13416   if (Ty != MVT::v16i8 && Ty != MVT::v8i16 && Ty != MVT::v8f16 &&
13417       Ty != MVT::v4i32 && Ty != MVT::v4f32 && Ty != MVT::v2i64 &&
13418       Ty != MVT::v2f64 &&
13419       // These are for truncated stores
13420       Ty != MVT::v4i8 && Ty != MVT::v8i8 && Ty != MVT::v4i16)
13421     return false;
13422 
13423   if (Subtarget->isLittle()) {
13424     // In little-endian MVE, the store instructions VSTRB.U8,
13425     // VSTRH.U16 and VSTRW.U32 all store the vector register in
13426     // exactly the same format, and differ only in the range of
13427     // their immediate offset field and the required alignment.
13428     //
13429     // In particular, VSTRB.U8 can store a vector at byte alignment.
13430     // So at this stage we can simply say that loads/stores of all
13431     // 128-bit wide vector types are permitted at any alignment,
13432     // because we know at least _one_ instruction can manage that.
13433     //
13434     // Later on we might find that some of those loads are better
13435     // generated as VLDRW.U32 if alignment permits, to take
13436     // advantage of the larger immediate range. But for the moment,
13437     // all that matters is that if we don't lower the load then
13438     // _some_ instruction can handle it.
13439     if (Fast)
13440       *Fast = true;
13441     return true;
13442   } else {
13443     // In big-endian MVE, those instructions aren't so similar
13444     // after all, because they reorder the bytes of the vector
13445     // differently. So this time we can only store a particular
13446     // kind of vector if its alignment is at least the element
13447     // type. And we can't store vectors of i64 or f64 at all
13448     // without having to do some postprocessing, because there's
13449     // no VSTRD.U64.
13450     if (Ty == MVT::v16i8 ||
13451         ((Ty == MVT::v8i16 || Ty == MVT::v8f16) && Alignment >= 2) ||
13452         ((Ty == MVT::v4i32 || Ty == MVT::v4f32) && Alignment >= 4)) {
13453       if (Fast)
13454         *Fast = true;
13455       return true;
13456     }
13457   }
13458 
13459   return false;
13460 }
13461 
13462 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign,
13463                        unsigned AlignCheck) {
13464   return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
13465           (DstAlign == 0 || DstAlign % AlignCheck == 0));
13466 }
13467 
13468 EVT ARMTargetLowering::getOptimalMemOpType(
13469     uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
13470     bool ZeroMemset, bool MemcpyStrSrc,
13471     const AttributeList &FuncAttributes) const {
13472   // See if we can use NEON instructions for this...
13473   if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() &&
13474       !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
13475     bool Fast;
13476     if (Size >= 16 &&
13477         (memOpAlign(SrcAlign, DstAlign, 16) ||
13478          (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1,
13479                                          MachineMemOperand::MONone, &Fast) &&
13480           Fast))) {
13481       return MVT::v2f64;
13482     } else if (Size >= 8 &&
13483                (memOpAlign(SrcAlign, DstAlign, 8) ||
13484                 (allowsMisalignedMemoryAccesses(
13485                      MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) &&
13486                  Fast))) {
13487       return MVT::f64;
13488     }
13489   }
13490 
13491   // Let the target-independent logic figure it out.
13492   return MVT::Other;
13493 }
13494 
13495 // 64-bit integers are split into their high and low parts and held in two
13496 // different registers, so the trunc is free since the low register can just
13497 // be used.
13498 bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
13499   if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
13500     return false;
13501   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
13502   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
13503   return (SrcBits == 64 && DestBits == 32);
13504 }
13505 
13506 bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
13507   if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() ||
13508       !DstVT.isInteger())
13509     return false;
13510   unsigned SrcBits = SrcVT.getSizeInBits();
13511   unsigned DestBits = DstVT.getSizeInBits();
13512   return (SrcBits == 64 && DestBits == 32);
13513 }
13514 
13515 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
13516   if (Val.getOpcode() != ISD::LOAD)
13517     return false;
13518 
13519   EVT VT1 = Val.getValueType();
13520   if (!VT1.isSimple() || !VT1.isInteger() ||
13521       !VT2.isSimple() || !VT2.isInteger())
13522     return false;
13523 
13524   switch (VT1.getSimpleVT().SimpleTy) {
13525   default: break;
13526   case MVT::i1:
13527   case MVT::i8:
13528   case MVT::i16:
13529     // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
13530     return true;
13531   }
13532 
13533   return false;
13534 }
13535 
13536 bool ARMTargetLowering::isFNegFree(EVT VT) const {
13537   if (!VT.isSimple())
13538     return false;
13539 
13540   // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that
13541   // negate values directly (fneg is free). So, we don't want to let the DAG
13542   // combiner rewrite fneg into xors and some other instructions.  For f16 and
13543   // FullFP16 argument passing, some bitcast nodes may be introduced,
13544   // triggering this DAG combine rewrite, so we are avoiding that with this.
13545   switch (VT.getSimpleVT().SimpleTy) {
13546   default: break;
13547   case MVT::f16:
13548     return Subtarget->hasFullFP16();
13549   }
13550 
13551   return false;
13552 }
13553 
13554 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
13555 /// of the vector elements.
13556 static bool areExtractExts(Value *Ext1, Value *Ext2) {
13557   auto areExtDoubled = [](Instruction *Ext) {
13558     return Ext->getType()->getScalarSizeInBits() ==
13559            2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
13560   };
13561 
13562   if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
13563       !match(Ext2, m_ZExtOrSExt(m_Value())) ||
13564       !areExtDoubled(cast<Instruction>(Ext1)) ||
13565       !areExtDoubled(cast<Instruction>(Ext2)))
13566     return false;
13567 
13568   return true;
13569 }
13570 
13571 /// Check if sinking \p I's operands to I's basic block is profitable, because
13572 /// the operands can be folded into a target instruction, e.g.
13573 /// sext/zext can be folded into vsubl.
13574 bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
13575                                            SmallVectorImpl<Use *> &Ops) const {
13576   if (!Subtarget->hasNEON() || !I->getType()->isVectorTy())
13577     return false;
13578 
13579   switch (I->getOpcode()) {
13580   case Instruction::Sub:
13581   case Instruction::Add: {
13582     if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
13583       return false;
13584     Ops.push_back(&I->getOperandUse(0));
13585     Ops.push_back(&I->getOperandUse(1));
13586     return true;
13587   }
13588   default:
13589     return false;
13590   }
13591   return false;
13592 }
13593 
13594 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
13595   EVT VT = ExtVal.getValueType();
13596 
13597   if (!isTypeLegal(VT))
13598     return false;
13599 
13600   // Don't create a loadext if we can fold the extension into a wide/long
13601   // instruction.
13602   // If there's more than one user instruction, the loadext is desirable no
13603   // matter what.  There can be two uses by the same instruction.
13604   if (ExtVal->use_empty() ||
13605       !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
13606     return true;
13607 
13608   SDNode *U = *ExtVal->use_begin();
13609   if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
13610        U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL))
13611     return false;
13612 
13613   return true;
13614 }
13615 
13616 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
13617   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
13618     return false;
13619 
13620   if (!isTypeLegal(EVT::getEVT(Ty1)))
13621     return false;
13622 
13623   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
13624 
13625   // Assuming the caller doesn't have a zeroext or signext return parameter,
13626   // truncation all the way down to i1 is valid.
13627   return true;
13628 }
13629 
13630 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
13631                                                 const AddrMode &AM, Type *Ty,
13632                                                 unsigned AS) const {
13633   if (isLegalAddressingMode(DL, AM, Ty, AS)) {
13634     if (Subtarget->hasFPAO())
13635       return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
13636     return 0;
13637   }
13638   return -1;
13639 }
13640 
13641 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
13642   if (V < 0)
13643     return false;
13644 
13645   unsigned Scale = 1;
13646   switch (VT.getSimpleVT().SimpleTy) {
13647   case MVT::i1:
13648   case MVT::i8:
13649     // Scale == 1;
13650     break;
13651   case MVT::i16:
13652     // Scale == 2;
13653     Scale = 2;
13654     break;
13655   default:
13656     // On thumb1 we load most things (i32, i64, floats, etc) with a LDR
13657     // Scale == 4;
13658     Scale = 4;
13659     break;
13660   }
13661 
13662   if ((V & (Scale - 1)) != 0)
13663     return false;
13664   return isUInt<5>(V / Scale);
13665 }
13666 
13667 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
13668                                       const ARMSubtarget *Subtarget) {
13669   if (!VT.isInteger() && !VT.isFloatingPoint())
13670     return false;
13671   if (VT.isVector() && Subtarget->hasNEON())
13672     return false;
13673   if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() &&
13674       !Subtarget->hasMVEFloatOps())
13675     return false;
13676 
13677   bool IsNeg = false;
13678   if (V < 0) {
13679     IsNeg = true;
13680     V = -V;
13681   }
13682 
13683   unsigned NumBytes = std::max(VT.getSizeInBits() / 8, 1U);
13684 
13685   // MVE: size * imm7
13686   if (VT.isVector() && Subtarget->hasMVEIntegerOps()) {
13687     switch (VT.getSimpleVT().getVectorElementType().SimpleTy) {
13688     case MVT::i32:
13689     case MVT::f32:
13690       return isShiftedUInt<7,2>(V);
13691     case MVT::i16:
13692     case MVT::f16:
13693       return isShiftedUInt<7,1>(V);
13694     case MVT::i8:
13695       return isUInt<7>(V);
13696     default:
13697       return false;
13698     }
13699   }
13700 
13701   // half VLDR: 2 * imm8
13702   if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16())
13703     return isShiftedUInt<8, 1>(V);
13704   // VLDR and LDRD: 4 * imm8
13705   if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8)
13706     return isShiftedUInt<8, 2>(V);
13707 
13708   if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) {
13709     // + imm12 or - imm8
13710     if (IsNeg)
13711       return isUInt<8>(V);
13712     return isUInt<12>(V);
13713   }
13714 
13715   return false;
13716 }
13717 
13718 /// isLegalAddressImmediate - Return true if the integer value can be used
13719 /// as the offset of the target addressing mode for load / store of the
13720 /// given type.
13721 static bool isLegalAddressImmediate(int64_t V, EVT VT,
13722                                     const ARMSubtarget *Subtarget) {
13723   if (V == 0)
13724     return true;
13725 
13726   if (!VT.isSimple())
13727     return false;
13728 
13729   if (Subtarget->isThumb1Only())
13730     return isLegalT1AddressImmediate(V, VT);
13731   else if (Subtarget->isThumb2())
13732     return isLegalT2AddressImmediate(V, VT, Subtarget);
13733 
13734   // ARM mode.
13735   if (V < 0)
13736     V = - V;
13737   switch (VT.getSimpleVT().SimpleTy) {
13738   default: return false;
13739   case MVT::i1:
13740   case MVT::i8:
13741   case MVT::i32:
13742     // +- imm12
13743     return isUInt<12>(V);
13744   case MVT::i16:
13745     // +- imm8
13746     return isUInt<8>(V);
13747   case MVT::f32:
13748   case MVT::f64:
13749     if (!Subtarget->hasVFP2Base()) // FIXME: NEON?
13750       return false;
13751     return isShiftedUInt<8, 2>(V);
13752   }
13753 }
13754 
13755 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
13756                                                       EVT VT) const {
13757   int Scale = AM.Scale;
13758   if (Scale < 0)
13759     return false;
13760 
13761   switch (VT.getSimpleVT().SimpleTy) {
13762   default: return false;
13763   case MVT::i1:
13764   case MVT::i8:
13765   case MVT::i16:
13766   case MVT::i32:
13767     if (Scale == 1)
13768       return true;
13769     // r + r << imm
13770     Scale = Scale & ~1;
13771     return Scale == 2 || Scale == 4 || Scale == 8;
13772   case MVT::i64:
13773     // FIXME: What are we trying to model here? ldrd doesn't have an r + r
13774     // version in Thumb mode.
13775     // r + r
13776     if (Scale == 1)
13777       return true;
13778     // r * 2 (this can be lowered to r + r).
13779     if (!AM.HasBaseReg && Scale == 2)
13780       return true;
13781     return false;
13782   case MVT::isVoid:
13783     // Note, we allow "void" uses (basically, uses that aren't loads or
13784     // stores), because arm allows folding a scale into many arithmetic
13785     // operations.  This should be made more precise and revisited later.
13786 
13787     // Allow r << imm, but the imm has to be a multiple of two.
13788     if (Scale & 1) return false;
13789     return isPowerOf2_32(Scale);
13790   }
13791 }
13792 
13793 bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
13794                                                       EVT VT) const {
13795   const int Scale = AM.Scale;
13796 
13797   // Negative scales are not supported in Thumb1.
13798   if (Scale < 0)
13799     return false;
13800 
13801   // Thumb1 addressing modes do not support register scaling excepting the
13802   // following cases:
13803   // 1. Scale == 1 means no scaling.
13804   // 2. Scale == 2 this can be lowered to r + r if there is no base register.
13805   return (Scale == 1) || (!AM.HasBaseReg && Scale == 2);
13806 }
13807 
13808 /// isLegalAddressingMode - Return true if the addressing mode represented
13809 /// by AM is legal for this target, for a load/store of the specified type.
13810 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
13811                                               const AddrMode &AM, Type *Ty,
13812                                               unsigned AS, Instruction *I) const {
13813   EVT VT = getValueType(DL, Ty, true);
13814   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
13815     return false;
13816 
13817   // Can never fold addr of global into load/store.
13818   if (AM.BaseGV)
13819     return false;
13820 
13821   switch (AM.Scale) {
13822   case 0:  // no scale reg, must be "r+i" or "r", or "i".
13823     break;
13824   default:
13825     // ARM doesn't support any R+R*scale+imm addr modes.
13826     if (AM.BaseOffs)
13827       return false;
13828 
13829     if (!VT.isSimple())
13830       return false;
13831 
13832     if (Subtarget->isThumb1Only())
13833       return isLegalT1ScaledAddressingMode(AM, VT);
13834 
13835     if (Subtarget->isThumb2())
13836       return isLegalT2ScaledAddressingMode(AM, VT);
13837 
13838     int Scale = AM.Scale;
13839     switch (VT.getSimpleVT().SimpleTy) {
13840     default: return false;
13841     case MVT::i1:
13842     case MVT::i8:
13843     case MVT::i32:
13844       if (Scale < 0) Scale = -Scale;
13845       if (Scale == 1)
13846         return true;
13847       // r + r << imm
13848       return isPowerOf2_32(Scale & ~1);
13849     case MVT::i16:
13850     case MVT::i64:
13851       // r +/- r
13852       if (Scale == 1 || (AM.HasBaseReg && Scale == -1))
13853         return true;
13854       // r * 2 (this can be lowered to r + r).
13855       if (!AM.HasBaseReg && Scale == 2)
13856         return true;
13857       return false;
13858 
13859     case MVT::isVoid:
13860       // Note, we allow "void" uses (basically, uses that aren't loads or
13861       // stores), because arm allows folding a scale into many arithmetic
13862       // operations.  This should be made more precise and revisited later.
13863 
13864       // Allow r << imm, but the imm has to be a multiple of two.
13865       if (Scale & 1) return false;
13866       return isPowerOf2_32(Scale);
13867     }
13868   }
13869   return true;
13870 }
13871 
13872 /// isLegalICmpImmediate - Return true if the specified immediate is legal
13873 /// icmp immediate, that is the target has icmp instructions which can compare
13874 /// a register against the immediate without having to materialize the
13875 /// immediate into a register.
13876 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
13877   // Thumb2 and ARM modes can use cmn for negative immediates.
13878   if (!Subtarget->isThumb())
13879     return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 ||
13880            ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1;
13881   if (Subtarget->isThumb2())
13882     return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 ||
13883            ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1;
13884   // Thumb1 doesn't have cmn, and only 8-bit immediates.
13885   return Imm >= 0 && Imm <= 255;
13886 }
13887 
13888 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
13889 /// *or sub* immediate, that is the target has add or sub instructions which can
13890 /// add a register with the immediate without having to materialize the
13891 /// immediate into a register.
13892 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
13893   // Same encoding for add/sub, just flip the sign.
13894   int64_t AbsImm = std::abs(Imm);
13895   if (!Subtarget->isThumb())
13896     return ARM_AM::getSOImmVal(AbsImm) != -1;
13897   if (Subtarget->isThumb2())
13898     return ARM_AM::getT2SOImmVal(AbsImm) != -1;
13899   // Thumb1 only has 8-bit unsigned immediate.
13900   return AbsImm >= 0 && AbsImm <= 255;
13901 }
13902 
13903 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
13904                                       bool isSEXTLoad, SDValue &Base,
13905                                       SDValue &Offset, bool &isInc,
13906                                       SelectionDAG &DAG) {
13907   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
13908     return false;
13909 
13910   if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
13911     // AddressingMode 3
13912     Base = Ptr->getOperand(0);
13913     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
13914       int RHSC = (int)RHS->getZExtValue();
13915       if (RHSC < 0 && RHSC > -256) {
13916         assert(Ptr->getOpcode() == ISD::ADD);
13917         isInc = false;
13918         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
13919         return true;
13920       }
13921     }
13922     isInc = (Ptr->getOpcode() == ISD::ADD);
13923     Offset = Ptr->getOperand(1);
13924     return true;
13925   } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
13926     // AddressingMode 2
13927     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
13928       int RHSC = (int)RHS->getZExtValue();
13929       if (RHSC < 0 && RHSC > -0x1000) {
13930         assert(Ptr->getOpcode() == ISD::ADD);
13931         isInc = false;
13932         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
13933         Base = Ptr->getOperand(0);
13934         return true;
13935       }
13936     }
13937 
13938     if (Ptr->getOpcode() == ISD::ADD) {
13939       isInc = true;
13940       ARM_AM::ShiftOpc ShOpcVal=
13941         ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
13942       if (ShOpcVal != ARM_AM::no_shift) {
13943         Base = Ptr->getOperand(1);
13944         Offset = Ptr->getOperand(0);
13945       } else {
13946         Base = Ptr->getOperand(0);
13947         Offset = Ptr->getOperand(1);
13948       }
13949       return true;
13950     }
13951 
13952     isInc = (Ptr->getOpcode() == ISD::ADD);
13953     Base = Ptr->getOperand(0);
13954     Offset = Ptr->getOperand(1);
13955     return true;
13956   }
13957 
13958   // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
13959   return false;
13960 }
13961 
13962 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
13963                                      bool isSEXTLoad, SDValue &Base,
13964                                      SDValue &Offset, bool &isInc,
13965                                      SelectionDAG &DAG) {
13966   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
13967     return false;
13968 
13969   Base = Ptr->getOperand(0);
13970   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
13971     int RHSC = (int)RHS->getZExtValue();
13972     if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
13973       assert(Ptr->getOpcode() == ISD::ADD);
13974       isInc = false;
13975       Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
13976       return true;
13977     } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
13978       isInc = Ptr->getOpcode() == ISD::ADD;
13979       Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
13980       return true;
13981     }
13982   }
13983 
13984   return false;
13985 }
13986 
13987 /// getPreIndexedAddressParts - returns true by value, base pointer and
13988 /// offset pointer and addressing mode by reference if the node's address
13989 /// can be legally represented as pre-indexed load / store address.
13990 bool
13991 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
13992                                              SDValue &Offset,
13993                                              ISD::MemIndexedMode &AM,
13994                                              SelectionDAG &DAG) const {
13995   if (Subtarget->isThumb1Only())
13996     return false;
13997 
13998   EVT VT;
13999   SDValue Ptr;
14000   bool isSEXTLoad = false;
14001   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
14002     Ptr = LD->getBasePtr();
14003     VT  = LD->getMemoryVT();
14004     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
14005   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
14006     Ptr = ST->getBasePtr();
14007     VT  = ST->getMemoryVT();
14008   } else
14009     return false;
14010 
14011   bool isInc;
14012   bool isLegal = false;
14013   if (Subtarget->isThumb2())
14014     isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
14015                                        Offset, isInc, DAG);
14016   else
14017     isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
14018                                         Offset, isInc, DAG);
14019   if (!isLegal)
14020     return false;
14021 
14022   AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
14023   return true;
14024 }
14025 
14026 /// getPostIndexedAddressParts - returns true by value, base pointer and
14027 /// offset pointer and addressing mode by reference if this node can be
14028 /// combined with a load / store to form a post-indexed load / store.
14029 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
14030                                                    SDValue &Base,
14031                                                    SDValue &Offset,
14032                                                    ISD::MemIndexedMode &AM,
14033                                                    SelectionDAG &DAG) const {
14034   EVT VT;
14035   SDValue Ptr;
14036   bool isSEXTLoad = false, isNonExt;
14037   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
14038     VT  = LD->getMemoryVT();
14039     Ptr = LD->getBasePtr();
14040     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
14041     isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
14042   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
14043     VT  = ST->getMemoryVT();
14044     Ptr = ST->getBasePtr();
14045     isNonExt = !ST->isTruncatingStore();
14046   } else
14047     return false;
14048 
14049   if (Subtarget->isThumb1Only()) {
14050     // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
14051     // must be non-extending/truncating, i32, with an offset of 4.
14052     assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
14053     if (Op->getOpcode() != ISD::ADD || !isNonExt)
14054       return false;
14055     auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
14056     if (!RHS || RHS->getZExtValue() != 4)
14057       return false;
14058 
14059     Offset = Op->getOperand(1);
14060     Base = Op->getOperand(0);
14061     AM = ISD::POST_INC;
14062     return true;
14063   }
14064 
14065   bool isInc;
14066   bool isLegal = false;
14067   if (Subtarget->isThumb2())
14068     isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
14069                                        isInc, DAG);
14070   else
14071     isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
14072                                         isInc, DAG);
14073   if (!isLegal)
14074     return false;
14075 
14076   if (Ptr != Base) {
14077     // Swap base ptr and offset to catch more post-index load / store when
14078     // it's legal. In Thumb2 mode, offset must be an immediate.
14079     if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
14080         !Subtarget->isThumb2())
14081       std::swap(Base, Offset);
14082 
14083     // Post-indexed load / store update the base pointer.
14084     if (Ptr != Base)
14085       return false;
14086   }
14087 
14088   AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
14089   return true;
14090 }
14091 
14092 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
14093                                                       KnownBits &Known,
14094                                                       const APInt &DemandedElts,
14095                                                       const SelectionDAG &DAG,
14096                                                       unsigned Depth) const {
14097   unsigned BitWidth = Known.getBitWidth();
14098   Known.resetAll();
14099   switch (Op.getOpcode()) {
14100   default: break;
14101   case ARMISD::ADDC:
14102   case ARMISD::ADDE:
14103   case ARMISD::SUBC:
14104   case ARMISD::SUBE:
14105     // Special cases when we convert a carry to a boolean.
14106     if (Op.getResNo() == 0) {
14107       SDValue LHS = Op.getOperand(0);
14108       SDValue RHS = Op.getOperand(1);
14109       // (ADDE 0, 0, C) will give us a single bit.
14110       if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) &&
14111           isNullConstant(RHS)) {
14112         Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
14113         return;
14114       }
14115     }
14116     break;
14117   case ARMISD::CMOV: {
14118     // Bits are known zero/one if known on the LHS and RHS.
14119     Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
14120     if (Known.isUnknown())
14121       return;
14122 
14123     KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
14124     Known.Zero &= KnownRHS.Zero;
14125     Known.One  &= KnownRHS.One;
14126     return;
14127   }
14128   case ISD::INTRINSIC_W_CHAIN: {
14129     ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
14130     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
14131     switch (IntID) {
14132     default: return;
14133     case Intrinsic::arm_ldaex:
14134     case Intrinsic::arm_ldrex: {
14135       EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
14136       unsigned MemBits = VT.getScalarSizeInBits();
14137       Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
14138       return;
14139     }
14140     }
14141   }
14142   case ARMISD::BFI: {
14143     // Conservatively, we can recurse down the first operand
14144     // and just mask out all affected bits.
14145     Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
14146 
14147     // The operand to BFI is already a mask suitable for removing the bits it
14148     // sets.
14149     ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
14150     const APInt &Mask = CI->getAPIntValue();
14151     Known.Zero &= Mask;
14152     Known.One &= Mask;
14153     return;
14154   }
14155   case ARMISD::VGETLANEs:
14156   case ARMISD::VGETLANEu: {
14157     const SDValue &SrcSV = Op.getOperand(0);
14158     EVT VecVT = SrcSV.getValueType();
14159     assert(VecVT.isVector() && "VGETLANE expected a vector type");
14160     const unsigned NumSrcElts = VecVT.getVectorNumElements();
14161     ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode());
14162     assert(Pos->getAPIntValue().ult(NumSrcElts) &&
14163            "VGETLANE index out of bounds");
14164     unsigned Idx = Pos->getZExtValue();
14165     APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
14166     Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1);
14167 
14168     EVT VT = Op.getValueType();
14169     const unsigned DstSz = VT.getScalarSizeInBits();
14170     const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits();
14171     (void)SrcSz;
14172     assert(SrcSz == Known.getBitWidth());
14173     assert(DstSz > SrcSz);
14174     if (Op.getOpcode() == ARMISD::VGETLANEs)
14175       Known = Known.sext(DstSz);
14176     else {
14177       Known = Known.zext(DstSz, true /* extended bits are known zero */);
14178     }
14179     assert(DstSz == Known.getBitWidth());
14180     break;
14181   }
14182   }
14183 }
14184 
14185 bool
14186 ARMTargetLowering::targetShrinkDemandedConstant(SDValue Op,
14187                                                 const APInt &DemandedAPInt,
14188                                                 TargetLoweringOpt &TLO) const {
14189   // Delay optimization, so we don't have to deal with illegal types, or block
14190   // optimizations.
14191   if (!TLO.LegalOps)
14192     return false;
14193 
14194   // Only optimize AND for now.
14195   if (Op.getOpcode() != ISD::AND)
14196     return false;
14197 
14198   EVT VT = Op.getValueType();
14199 
14200   // Ignore vectors.
14201   if (VT.isVector())
14202     return false;
14203 
14204   assert(VT == MVT::i32 && "Unexpected integer type");
14205 
14206   // Make sure the RHS really is a constant.
14207   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
14208   if (!C)
14209     return false;
14210 
14211   unsigned Mask = C->getZExtValue();
14212 
14213   unsigned Demanded = DemandedAPInt.getZExtValue();
14214   unsigned ShrunkMask = Mask & Demanded;
14215   unsigned ExpandedMask = Mask | ~Demanded;
14216 
14217   // If the mask is all zeros, let the target-independent code replace the
14218   // result with zero.
14219   if (ShrunkMask == 0)
14220     return false;
14221 
14222   // If the mask is all ones, erase the AND. (Currently, the target-independent
14223   // code won't do this, so we have to do it explicitly to avoid an infinite
14224   // loop in obscure cases.)
14225   if (ExpandedMask == ~0U)
14226     return TLO.CombineTo(Op, Op.getOperand(0));
14227 
14228   auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
14229     return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
14230   };
14231   auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool {
14232     if (NewMask == Mask)
14233       return true;
14234     SDLoc DL(Op);
14235     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
14236     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
14237     return TLO.CombineTo(Op, NewOp);
14238   };
14239 
14240   // Prefer uxtb mask.
14241   if (IsLegalMask(0xFF))
14242     return UseMask(0xFF);
14243 
14244   // Prefer uxth mask.
14245   if (IsLegalMask(0xFFFF))
14246     return UseMask(0xFFFF);
14247 
14248   // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
14249   // FIXME: Prefer a contiguous sequence of bits for other optimizations.
14250   if (ShrunkMask < 256)
14251     return UseMask(ShrunkMask);
14252 
14253   // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
14254   // FIXME: Prefer a contiguous sequence of bits for other optimizations.
14255   if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
14256     return UseMask(ExpandedMask);
14257 
14258   // Potential improvements:
14259   //
14260   // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
14261   // We could try to prefer Thumb1 immediates which can be lowered to a
14262   // two-instruction sequence.
14263   // We could try to recognize more legal ARM/Thumb2 immediates here.
14264 
14265   return false;
14266 }
14267 
14268 
14269 //===----------------------------------------------------------------------===//
14270 //                           ARM Inline Assembly Support
14271 //===----------------------------------------------------------------------===//
14272 
14273 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
14274   // Looking for "rev" which is V6+.
14275   if (!Subtarget->hasV6Ops())
14276     return false;
14277 
14278   InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
14279   std::string AsmStr = IA->getAsmString();
14280   SmallVector<StringRef, 4> AsmPieces;
14281   SplitString(AsmStr, AsmPieces, ";\n");
14282 
14283   switch (AsmPieces.size()) {
14284   default: return false;
14285   case 1:
14286     AsmStr = AsmPieces[0];
14287     AsmPieces.clear();
14288     SplitString(AsmStr, AsmPieces, " \t,");
14289 
14290     // rev $0, $1
14291     if (AsmPieces.size() == 3 &&
14292         AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
14293         IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
14294       IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
14295       if (Ty && Ty->getBitWidth() == 32)
14296         return IntrinsicLowering::LowerToByteSwap(CI);
14297     }
14298     break;
14299   }
14300 
14301   return false;
14302 }
14303 
14304 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
14305   // At this point, we have to lower this constraint to something else, so we
14306   // lower it to an "r" or "w". However, by doing this we will force the result
14307   // to be in register, while the X constraint is much more permissive.
14308   //
14309   // Although we are correct (we are free to emit anything, without
14310   // constraints), we might break use cases that would expect us to be more
14311   // efficient and emit something else.
14312   if (!Subtarget->hasVFP2Base())
14313     return "r";
14314   if (ConstraintVT.isFloatingPoint())
14315     return "w";
14316   if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
14317      (ConstraintVT.getSizeInBits() == 64 ||
14318       ConstraintVT.getSizeInBits() == 128))
14319     return "w";
14320 
14321   return "r";
14322 }
14323 
14324 /// getConstraintType - Given a constraint letter, return the type of
14325 /// constraint it is for this target.
14326 ARMTargetLowering::ConstraintType
14327 ARMTargetLowering::getConstraintType(StringRef Constraint) const {
14328   if (Constraint.size() == 1) {
14329     switch (Constraint[0]) {
14330     default:  break;
14331     case 'l': return C_RegisterClass;
14332     case 'w': return C_RegisterClass;
14333     case 'h': return C_RegisterClass;
14334     case 'x': return C_RegisterClass;
14335     case 't': return C_RegisterClass;
14336     case 'j': return C_Other; // Constant for movw.
14337       // An address with a single base register. Due to the way we
14338       // currently handle addresses it is the same as an 'r' memory constraint.
14339     case 'Q': return C_Memory;
14340     }
14341   } else if (Constraint.size() == 2) {
14342     switch (Constraint[0]) {
14343     default: break;
14344     case 'T': return C_RegisterClass;
14345     // All 'U+' constraints are addresses.
14346     case 'U': return C_Memory;
14347     }
14348   }
14349   return TargetLowering::getConstraintType(Constraint);
14350 }
14351 
14352 /// Examine constraint type and operand type and determine a weight value.
14353 /// This object must already have been set up with the operand type
14354 /// and the current alternative constraint selected.
14355 TargetLowering::ConstraintWeight
14356 ARMTargetLowering::getSingleConstraintMatchWeight(
14357     AsmOperandInfo &info, const char *constraint) const {
14358   ConstraintWeight weight = CW_Invalid;
14359   Value *CallOperandVal = info.CallOperandVal;
14360     // If we don't have a value, we can't do a match,
14361     // but allow it at the lowest weight.
14362   if (!CallOperandVal)
14363     return CW_Default;
14364   Type *type = CallOperandVal->getType();
14365   // Look at the constraint type.
14366   switch (*constraint) {
14367   default:
14368     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
14369     break;
14370   case 'l':
14371     if (type->isIntegerTy()) {
14372       if (Subtarget->isThumb())
14373         weight = CW_SpecificReg;
14374       else
14375         weight = CW_Register;
14376     }
14377     break;
14378   case 'w':
14379     if (type->isFloatingPointTy())
14380       weight = CW_Register;
14381     break;
14382   }
14383   return weight;
14384 }
14385 
14386 using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
14387 
14388 RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
14389     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
14390   switch (Constraint.size()) {
14391   case 1:
14392     // GCC ARM Constraint Letters
14393     switch (Constraint[0]) {
14394     case 'l': // Low regs or general regs.
14395       if (Subtarget->isThumb())
14396         return RCPair(0U, &ARM::tGPRRegClass);
14397       return RCPair(0U, &ARM::GPRRegClass);
14398     case 'h': // High regs or no regs.
14399       if (Subtarget->isThumb())
14400         return RCPair(0U, &ARM::hGPRRegClass);
14401       break;
14402     case 'r':
14403       if (Subtarget->isThumb1Only())
14404         return RCPair(0U, &ARM::tGPRRegClass);
14405       return RCPair(0U, &ARM::GPRRegClass);
14406     case 'w':
14407       if (VT == MVT::Other)
14408         break;
14409       if (VT == MVT::f32)
14410         return RCPair(0U, &ARM::SPRRegClass);
14411       if (VT.getSizeInBits() == 64)
14412         return RCPair(0U, &ARM::DPRRegClass);
14413       if (VT.getSizeInBits() == 128)
14414         return RCPair(0U, &ARM::QPRRegClass);
14415       break;
14416     case 'x':
14417       if (VT == MVT::Other)
14418         break;
14419       if (VT == MVT::f32)
14420         return RCPair(0U, &ARM::SPR_8RegClass);
14421       if (VT.getSizeInBits() == 64)
14422         return RCPair(0U, &ARM::DPR_8RegClass);
14423       if (VT.getSizeInBits() == 128)
14424         return RCPair(0U, &ARM::QPR_8RegClass);
14425       break;
14426     case 't':
14427       if (VT == MVT::Other)
14428         break;
14429       if (VT == MVT::f32 || VT == MVT::i32)
14430         return RCPair(0U, &ARM::SPRRegClass);
14431       if (VT.getSizeInBits() == 64)
14432         return RCPair(0U, &ARM::DPR_VFP2RegClass);
14433       if (VT.getSizeInBits() == 128)
14434         return RCPair(0U, &ARM::QPR_VFP2RegClass);
14435       break;
14436     }
14437     break;
14438 
14439   case 2:
14440     if (Constraint[0] == 'T') {
14441       switch (Constraint[1]) {
14442       default:
14443         break;
14444       case 'e':
14445         return RCPair(0U, &ARM::tGPREvenRegClass);
14446       case 'o':
14447         return RCPair(0U, &ARM::tGPROddRegClass);
14448       }
14449     }
14450     break;
14451 
14452   default:
14453     break;
14454   }
14455 
14456   if (StringRef("{cc}").equals_lower(Constraint))
14457     return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
14458 
14459   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
14460 }
14461 
14462 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
14463 /// vector.  If it is invalid, don't add anything to Ops.
14464 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
14465                                                      std::string &Constraint,
14466                                                      std::vector<SDValue>&Ops,
14467                                                      SelectionDAG &DAG) const {
14468   SDValue Result;
14469 
14470   // Currently only support length 1 constraints.
14471   if (Constraint.length() != 1) return;
14472 
14473   char ConstraintLetter = Constraint[0];
14474   switch (ConstraintLetter) {
14475   default: break;
14476   case 'j':
14477   case 'I': case 'J': case 'K': case 'L':
14478   case 'M': case 'N': case 'O':
14479     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
14480     if (!C)
14481       return;
14482 
14483     int64_t CVal64 = C->getSExtValue();
14484     int CVal = (int) CVal64;
14485     // None of these constraints allow values larger than 32 bits.  Check
14486     // that the value fits in an int.
14487     if (CVal != CVal64)
14488       return;
14489 
14490     switch (ConstraintLetter) {
14491       case 'j':
14492         // Constant suitable for movw, must be between 0 and
14493         // 65535.
14494         if (Subtarget->hasV6T2Ops())
14495           if (CVal >= 0 && CVal <= 65535)
14496             break;
14497         return;
14498       case 'I':
14499         if (Subtarget->isThumb1Only()) {
14500           // This must be a constant between 0 and 255, for ADD
14501           // immediates.
14502           if (CVal >= 0 && CVal <= 255)
14503             break;
14504         } else if (Subtarget->isThumb2()) {
14505           // A constant that can be used as an immediate value in a
14506           // data-processing instruction.
14507           if (ARM_AM::getT2SOImmVal(CVal) != -1)
14508             break;
14509         } else {
14510           // A constant that can be used as an immediate value in a
14511           // data-processing instruction.
14512           if (ARM_AM::getSOImmVal(CVal) != -1)
14513             break;
14514         }
14515         return;
14516 
14517       case 'J':
14518         if (Subtarget->isThumb1Only()) {
14519           // This must be a constant between -255 and -1, for negated ADD
14520           // immediates. This can be used in GCC with an "n" modifier that
14521           // prints the negated value, for use with SUB instructions. It is
14522           // not useful otherwise but is implemented for compatibility.
14523           if (CVal >= -255 && CVal <= -1)
14524             break;
14525         } else {
14526           // This must be a constant between -4095 and 4095. It is not clear
14527           // what this constraint is intended for. Implemented for
14528           // compatibility with GCC.
14529           if (CVal >= -4095 && CVal <= 4095)
14530             break;
14531         }
14532         return;
14533 
14534       case 'K':
14535         if (Subtarget->isThumb1Only()) {
14536           // A 32-bit value where only one byte has a nonzero value. Exclude
14537           // zero to match GCC. This constraint is used by GCC internally for
14538           // constants that can be loaded with a move/shift combination.
14539           // It is not useful otherwise but is implemented for compatibility.
14540           if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
14541             break;
14542         } else if (Subtarget->isThumb2()) {
14543           // A constant whose bitwise inverse can be used as an immediate
14544           // value in a data-processing instruction. This can be used in GCC
14545           // with a "B" modifier that prints the inverted value, for use with
14546           // BIC and MVN instructions. It is not useful otherwise but is
14547           // implemented for compatibility.
14548           if (ARM_AM::getT2SOImmVal(~CVal) != -1)
14549             break;
14550         } else {
14551           // A constant whose bitwise inverse can be used as an immediate
14552           // value in a data-processing instruction. This can be used in GCC
14553           // with a "B" modifier that prints the inverted value, for use with
14554           // BIC and MVN instructions. It is not useful otherwise but is
14555           // implemented for compatibility.
14556           if (ARM_AM::getSOImmVal(~CVal) != -1)
14557             break;
14558         }
14559         return;
14560 
14561       case 'L':
14562         if (Subtarget->isThumb1Only()) {
14563           // This must be a constant between -7 and 7,
14564           // for 3-operand ADD/SUB immediate instructions.
14565           if (CVal >= -7 && CVal < 7)
14566             break;
14567         } else if (Subtarget->isThumb2()) {
14568           // A constant whose negation can be used as an immediate value in a
14569           // data-processing instruction. This can be used in GCC with an "n"
14570           // modifier that prints the negated value, for use with SUB
14571           // instructions. It is not useful otherwise but is implemented for
14572           // compatibility.
14573           if (ARM_AM::getT2SOImmVal(-CVal) != -1)
14574             break;
14575         } else {
14576           // A constant whose negation can be used as an immediate value in a
14577           // data-processing instruction. This can be used in GCC with an "n"
14578           // modifier that prints the negated value, for use with SUB
14579           // instructions. It is not useful otherwise but is implemented for
14580           // compatibility.
14581           if (ARM_AM::getSOImmVal(-CVal) != -1)
14582             break;
14583         }
14584         return;
14585 
14586       case 'M':
14587         if (Subtarget->isThumb1Only()) {
14588           // This must be a multiple of 4 between 0 and 1020, for
14589           // ADD sp + immediate.
14590           if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
14591             break;
14592         } else {
14593           // A power of two or a constant between 0 and 32.  This is used in
14594           // GCC for the shift amount on shifted register operands, but it is
14595           // useful in general for any shift amounts.
14596           if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
14597             break;
14598         }
14599         return;
14600 
14601       case 'N':
14602         if (Subtarget->isThumb()) {  // FIXME thumb2
14603           // This must be a constant between 0 and 31, for shift amounts.
14604           if (CVal >= 0 && CVal <= 31)
14605             break;
14606         }
14607         return;
14608 
14609       case 'O':
14610         if (Subtarget->isThumb()) {  // FIXME thumb2
14611           // This must be a multiple of 4 between -508 and 508, for
14612           // ADD/SUB sp = sp + immediate.
14613           if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
14614             break;
14615         }
14616         return;
14617     }
14618     Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
14619     break;
14620   }
14621 
14622   if (Result.getNode()) {
14623     Ops.push_back(Result);
14624     return;
14625   }
14626   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
14627 }
14628 
14629 static RTLIB::Libcall getDivRemLibcall(
14630     const SDNode *N, MVT::SimpleValueType SVT) {
14631   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
14632           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
14633          "Unhandled Opcode in getDivRemLibcall");
14634   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
14635                   N->getOpcode() == ISD::SREM;
14636   RTLIB::Libcall LC;
14637   switch (SVT) {
14638   default: llvm_unreachable("Unexpected request for libcall!");
14639   case MVT::i8:  LC = isSigned ? RTLIB::SDIVREM_I8  : RTLIB::UDIVREM_I8;  break;
14640   case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
14641   case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
14642   case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
14643   }
14644   return LC;
14645 }
14646 
14647 static TargetLowering::ArgListTy getDivRemArgList(
14648     const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
14649   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
14650           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
14651          "Unhandled Opcode in getDivRemArgList");
14652   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
14653                   N->getOpcode() == ISD::SREM;
14654   TargetLowering::ArgListTy Args;
14655   TargetLowering::ArgListEntry Entry;
14656   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
14657     EVT ArgVT = N->getOperand(i).getValueType();
14658     Type *ArgTy = ArgVT.getTypeForEVT(*Context);
14659     Entry.Node = N->getOperand(i);
14660     Entry.Ty = ArgTy;
14661     Entry.IsSExt = isSigned;
14662     Entry.IsZExt = !isSigned;
14663     Args.push_back(Entry);
14664   }
14665   if (Subtarget->isTargetWindows() && Args.size() >= 2)
14666     std::swap(Args[0], Args[1]);
14667   return Args;
14668 }
14669 
14670 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
14671   assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
14672           Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
14673           Subtarget->isTargetWindows()) &&
14674          "Register-based DivRem lowering only");
14675   unsigned Opcode = Op->getOpcode();
14676   assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
14677          "Invalid opcode for Div/Rem lowering");
14678   bool isSigned = (Opcode == ISD::SDIVREM);
14679   EVT VT = Op->getValueType(0);
14680   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
14681   SDLoc dl(Op);
14682 
14683   // If the target has hardware divide, use divide + multiply + subtract:
14684   //     div = a / b
14685   //     rem = a - b * div
14686   //     return {div, rem}
14687   // This should be lowered into UDIV/SDIV + MLS later on.
14688   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
14689                                         : Subtarget->hasDivideInARMMode();
14690   if (hasDivide && Op->getValueType(0).isSimple() &&
14691       Op->getSimpleValueType(0) == MVT::i32) {
14692     unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
14693     const SDValue Dividend = Op->getOperand(0);
14694     const SDValue Divisor = Op->getOperand(1);
14695     SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
14696     SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
14697     SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
14698 
14699     SDValue Values[2] = {Div, Rem};
14700     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
14701   }
14702 
14703   RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
14704                                        VT.getSimpleVT().SimpleTy);
14705   SDValue InChain = DAG.getEntryNode();
14706 
14707   TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
14708                                                     DAG.getContext(),
14709                                                     Subtarget);
14710 
14711   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
14712                                          getPointerTy(DAG.getDataLayout()));
14713 
14714   Type *RetTy = StructType::get(Ty, Ty);
14715 
14716   if (Subtarget->isTargetWindows())
14717     InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);
14718 
14719   TargetLowering::CallLoweringInfo CLI(DAG);
14720   CLI.setDebugLoc(dl).setChain(InChain)
14721     .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
14722     .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
14723 
14724   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
14725   return CallInfo.first;
14726 }
14727 
14728 // Lowers REM using divmod helpers
14729 // see RTABI section 4.2/4.3
14730 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
14731   // Build return types (div and rem)
14732   std::vector<Type*> RetTyParams;
14733   Type *RetTyElement;
14734 
14735   switch (N->getValueType(0).getSimpleVT().SimpleTy) {
14736   default: llvm_unreachable("Unexpected request for libcall!");
14737   case MVT::i8:   RetTyElement = Type::getInt8Ty(*DAG.getContext());  break;
14738   case MVT::i16:  RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
14739   case MVT::i32:  RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
14740   case MVT::i64:  RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
14741   }
14742 
14743   RetTyParams.push_back(RetTyElement);
14744   RetTyParams.push_back(RetTyElement);
14745   ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
14746   Type *RetTy = StructType::get(*DAG.getContext(), ret);
14747 
14748   RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
14749                                                              SimpleTy);
14750   SDValue InChain = DAG.getEntryNode();
14751   TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
14752                                                     Subtarget);
14753   bool isSigned = N->getOpcode() == ISD::SREM;
14754   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
14755                                          getPointerTy(DAG.getDataLayout()));
14756 
14757   if (Subtarget->isTargetWindows())
14758     InChain = WinDBZCheckDenominator(DAG, N, InChain);
14759 
14760   // Lower call
14761   CallLoweringInfo CLI(DAG);
14762   CLI.setChain(InChain)
14763      .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
14764      .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
14765   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
14766 
14767   // Return second (rem) result operand (first contains div)
14768   SDNode *ResNode = CallResult.first.getNode();
14769   assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
14770   return ResNode->getOperand(1);
14771 }
14772 
14773 SDValue
14774 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
14775   assert(Subtarget->isTargetWindows() && "unsupported target platform");
14776   SDLoc DL(Op);
14777 
14778   // Get the inputs.
14779   SDValue Chain = Op.getOperand(0);
14780   SDValue Size  = Op.getOperand(1);
14781 
14782   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
14783           "no-stack-arg-probe")) {
14784     unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
14785     SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
14786     Chain = SP.getValue(1);
14787     SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size);
14788     if (Align)
14789       SP = DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0),
14790                        DAG.getConstant(-(uint64_t)Align, DL, MVT::i32));
14791     Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP);
14792     SDValue Ops[2] = { SP, Chain };
14793     return DAG.getMergeValues(Ops, DL);
14794   }
14795 
14796   SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
14797                               DAG.getConstant(2, DL, MVT::i32));
14798 
14799   SDValue Flag;
14800   Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
14801   Flag = Chain.getValue(1);
14802 
14803   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
14804   Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);
14805 
14806   SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
14807   Chain = NewSP.getValue(1);
14808 
14809   SDValue Ops[2] = { NewSP, Chain };
14810   return DAG.getMergeValues(Ops, DL);
14811 }
14812 
14813 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
14814   SDValue SrcVal = Op.getOperand(0);
14815   const unsigned DstSz = Op.getValueType().getSizeInBits();
14816   const unsigned SrcSz = SrcVal.getValueType().getSizeInBits();
14817   assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 &&
14818          "Unexpected type for custom-lowering FP_EXTEND");
14819 
14820   assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
14821          "With both FP DP and 16, any FP conversion is legal!");
14822 
14823   assert(!(DstSz == 32 && Subtarget->hasFP16()) &&
14824          "With FP16, 16 to 32 conversion is legal!");
14825 
14826   // Either we are converting from 16 -> 64, without FP16 and/or
14827   // FP.double-precision or without Armv8-fp. So we must do it in two
14828   // steps.
14829   // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32
14830   // without FP16. So we must do a function call.
14831   SDLoc Loc(Op);
14832   RTLIB::Libcall LC;
14833   if (SrcSz == 16) {
14834     // Instruction from 16 -> 32
14835     if (Subtarget->hasFP16())
14836       SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, MVT::f32, SrcVal);
14837     // Lib call from 16 -> 32
14838     else {
14839       LC = RTLIB::getFPEXT(MVT::f16, MVT::f32);
14840       assert(LC != RTLIB::UNKNOWN_LIBCALL &&
14841              "Unexpected type for custom-lowering FP_EXTEND");
14842       SrcVal =
14843         makeLibCall(DAG, LC, MVT::f32, SrcVal, /*isSigned*/ false, Loc).first;
14844     }
14845   }
14846 
14847   if (DstSz != 64)
14848     return SrcVal;
14849   // For sure now SrcVal is 32 bits
14850   if (Subtarget->hasFP64()) // Instruction from 32 -> 64
14851     return DAG.getNode(ISD::FP_EXTEND, Loc, MVT::f64, SrcVal);
14852 
14853   LC = RTLIB::getFPEXT(MVT::f32, MVT::f64);
14854   assert(LC != RTLIB::UNKNOWN_LIBCALL &&
14855          "Unexpected type for custom-lowering FP_EXTEND");
14856   return makeLibCall(DAG, LC, MVT::f64, SrcVal, /*isSigned*/ false, Loc).first;
14857 }
14858 
14859 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
14860   SDValue SrcVal = Op.getOperand(0);
14861   EVT SrcVT = SrcVal.getValueType();
14862   EVT DstVT = Op.getValueType();
14863   const unsigned DstSz = Op.getValueType().getSizeInBits();
14864   const unsigned SrcSz = SrcVT.getSizeInBits();
14865   (void)DstSz;
14866   assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 &&
14867          "Unexpected type for custom-lowering FP_ROUND");
14868 
14869   assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
14870          "With both FP DP and 16, any FP conversion is legal!");
14871 
14872   SDLoc Loc(Op);
14873 
14874   // Instruction from 32 -> 16 if hasFP16 is valid
14875   if (SrcSz == 32 && Subtarget->hasFP16())
14876     return Op;
14877 
14878   // Lib call from 32 -> 16 / 64 -> [32, 16]
14879   RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT);
14880   assert(LC != RTLIB::UNKNOWN_LIBCALL &&
14881          "Unexpected type for custom-lowering FP_ROUND");
14882   return makeLibCall(DAG, LC, DstVT, SrcVal, /*isSigned*/ false, Loc).first;
14883 }
14884 
14885 void ARMTargetLowering::lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
14886                                  SelectionDAG &DAG) const {
14887   assert(N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS.");
14888   MVT HalfT = MVT::i32;
14889   SDLoc dl(N);
14890   SDValue Hi, Lo, Tmp;
14891 
14892   if (!isOperationLegalOrCustom(ISD::ADDCARRY, HalfT) ||
14893       !isOperationLegalOrCustom(ISD::UADDO, HalfT))
14894     return ;
14895 
14896   unsigned OpTypeBits = HalfT.getScalarSizeInBits();
14897   SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
14898 
14899   Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
14900                    DAG.getConstant(0, dl, HalfT));
14901   Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
14902                    DAG.getConstant(1, dl, HalfT));
14903 
14904   Tmp = DAG.getNode(ISD::SRA, dl, HalfT, Hi,
14905                     DAG.getConstant(OpTypeBits - 1, dl,
14906                     getShiftAmountTy(HalfT, DAG.getDataLayout())));
14907   Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
14908   Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
14909                    SDValue(Lo.getNode(), 1));
14910   Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
14911   Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
14912 
14913   Results.push_back(Lo);
14914   Results.push_back(Hi);
14915 }
14916 
14917 bool
14918 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
14919   // The ARM target isn't yet aware of offsets.
14920   return false;
14921 }
14922 
14923 bool ARM::isBitFieldInvertedMask(unsigned v) {
14924   if (v == 0xffffffff)
14925     return false;
14926 
14927   // there can be 1's on either or both "outsides", all the "inside"
14928   // bits must be 0's
14929   return isShiftedMask_32(~v);
14930 }
14931 
14932 /// isFPImmLegal - Returns true if the target can instruction select the
14933 /// specified FP immediate natively. If false, the legalizer will
14934 /// materialize the FP immediate as a load from a constant pool.
14935 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
14936                                      bool ForCodeSize) const {
14937   if (!Subtarget->hasVFP3Base())
14938     return false;
14939   if (VT == MVT::f16 && Subtarget->hasFullFP16())
14940     return ARM_AM::getFP16Imm(Imm) != -1;
14941   if (VT == MVT::f32)
14942     return ARM_AM::getFP32Imm(Imm) != -1;
14943   if (VT == MVT::f64 && Subtarget->hasFP64())
14944     return ARM_AM::getFP64Imm(Imm) != -1;
14945   return false;
14946 }
14947 
14948 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
14949 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
14950 /// specified in the intrinsic calls.
14951 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
14952                                            const CallInst &I,
14953                                            MachineFunction &MF,
14954                                            unsigned Intrinsic) const {
14955   switch (Intrinsic) {
14956   case Intrinsic::arm_neon_vld1:
14957   case Intrinsic::arm_neon_vld2:
14958   case Intrinsic::arm_neon_vld3:
14959   case Intrinsic::arm_neon_vld4:
14960   case Intrinsic::arm_neon_vld2lane:
14961   case Intrinsic::arm_neon_vld3lane:
14962   case Intrinsic::arm_neon_vld4lane:
14963   case Intrinsic::arm_neon_vld2dup:
14964   case Intrinsic::arm_neon_vld3dup:
14965   case Intrinsic::arm_neon_vld4dup: {
14966     Info.opc = ISD::INTRINSIC_W_CHAIN;
14967     // Conservatively set memVT to the entire set of vectors loaded.
14968     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
14969     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
14970     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
14971     Info.ptrVal = I.getArgOperand(0);
14972     Info.offset = 0;
14973     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
14974     Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
14975     // volatile loads with NEON intrinsics not supported
14976     Info.flags = MachineMemOperand::MOLoad;
14977     return true;
14978   }
14979   case Intrinsic::arm_neon_vld1x2:
14980   case Intrinsic::arm_neon_vld1x3:
14981   case Intrinsic::arm_neon_vld1x4: {
14982     Info.opc = ISD::INTRINSIC_W_CHAIN;
14983     // Conservatively set memVT to the entire set of vectors loaded.
14984     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
14985     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
14986     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
14987     Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
14988     Info.offset = 0;
14989     Info.align = 0;
14990     // volatile loads with NEON intrinsics not supported
14991     Info.flags = MachineMemOperand::MOLoad;
14992     return true;
14993   }
14994   case Intrinsic::arm_neon_vst1:
14995   case Intrinsic::arm_neon_vst2:
14996   case Intrinsic::arm_neon_vst3:
14997   case Intrinsic::arm_neon_vst4:
14998   case Intrinsic::arm_neon_vst2lane:
14999   case Intrinsic::arm_neon_vst3lane:
15000   case Intrinsic::arm_neon_vst4lane: {
15001     Info.opc = ISD::INTRINSIC_VOID;
15002     // Conservatively set memVT to the entire set of vectors stored.
15003     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
15004     unsigned NumElts = 0;
15005     for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
15006       Type *ArgTy = I.getArgOperand(ArgI)->getType();
15007       if (!ArgTy->isVectorTy())
15008         break;
15009       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
15010     }
15011     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
15012     Info.ptrVal = I.getArgOperand(0);
15013     Info.offset = 0;
15014     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
15015     Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
15016     // volatile stores with NEON intrinsics not supported
15017     Info.flags = MachineMemOperand::MOStore;
15018     return true;
15019   }
15020   case Intrinsic::arm_neon_vst1x2:
15021   case Intrinsic::arm_neon_vst1x3:
15022   case Intrinsic::arm_neon_vst1x4: {
15023     Info.opc = ISD::INTRINSIC_VOID;
15024     // Conservatively set memVT to the entire set of vectors stored.
15025     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
15026     unsigned NumElts = 0;
15027     for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
15028       Type *ArgTy = I.getArgOperand(ArgI)->getType();
15029       if (!ArgTy->isVectorTy())
15030         break;
15031       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
15032     }
15033     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
15034     Info.ptrVal = I.getArgOperand(0);
15035     Info.offset = 0;
15036     Info.align = 0;
15037     // volatile stores with NEON intrinsics not supported
15038     Info.flags = MachineMemOperand::MOStore;
15039     return true;
15040   }
15041   case Intrinsic::arm_ldaex:
15042   case Intrinsic::arm_ldrex: {
15043     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
15044     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
15045     Info.opc = ISD::INTRINSIC_W_CHAIN;
15046     Info.memVT = MVT::getVT(PtrTy->getElementType());
15047     Info.ptrVal = I.getArgOperand(0);
15048     Info.offset = 0;
15049     Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
15050     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
15051     return true;
15052   }
15053   case Intrinsic::arm_stlex:
15054   case Intrinsic::arm_strex: {
15055     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
15056     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
15057     Info.opc = ISD::INTRINSIC_W_CHAIN;
15058     Info.memVT = MVT::getVT(PtrTy->getElementType());
15059     Info.ptrVal = I.getArgOperand(1);
15060     Info.offset = 0;
15061     Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
15062     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
15063     return true;
15064   }
15065   case Intrinsic::arm_stlexd:
15066   case Intrinsic::arm_strexd:
15067     Info.opc = ISD::INTRINSIC_W_CHAIN;
15068     Info.memVT = MVT::i64;
15069     Info.ptrVal = I.getArgOperand(2);
15070     Info.offset = 0;
15071     Info.align = 8;
15072     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
15073     return true;
15074 
15075   case Intrinsic::arm_ldaexd:
15076   case Intrinsic::arm_ldrexd:
15077     Info.opc = ISD::INTRINSIC_W_CHAIN;
15078     Info.memVT = MVT::i64;
15079     Info.ptrVal = I.getArgOperand(0);
15080     Info.offset = 0;
15081     Info.align = 8;
15082     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
15083     return true;
15084 
15085   default:
15086     break;
15087   }
15088 
15089   return false;
15090 }
15091 
15092 /// Returns true if it is beneficial to convert a load of a constant
15093 /// to just the constant itself.
15094 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15095                                                           Type *Ty) const {
15096   assert(Ty->isIntegerTy());
15097 
15098   unsigned Bits = Ty->getPrimitiveSizeInBits();
15099   if (Bits == 0 || Bits > 32)
15100     return false;
15101   return true;
15102 }
15103 
15104 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
15105                                                 unsigned Index) const {
15106   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
15107     return false;
15108 
15109   return (Index == 0 || Index == ResVT.getVectorNumElements());
15110 }
15111 
15112 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
15113                                         ARM_MB::MemBOpt Domain) const {
15114   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
15115 
15116   // First, if the target has no DMB, see what fallback we can use.
15117   if (!Subtarget->hasDataBarrier()) {
15118     // Some ARMv6 cpus can support data barriers with an mcr instruction.
15119     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
15120     // here.
15121     if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
15122       Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
15123       Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
15124                         Builder.getInt32(0), Builder.getInt32(7),
15125                         Builder.getInt32(10), Builder.getInt32(5)};
15126       return Builder.CreateCall(MCR, args);
15127     } else {
15128       // Instead of using barriers, atomic accesses on these subtargets use
15129       // libcalls.
15130       llvm_unreachable("makeDMB on a target so old that it has no barriers");
15131     }
15132   } else {
15133     Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
15134     // Only a full system barrier exists in the M-class architectures.
15135     Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
15136     Constant *CDomain = Builder.getInt32(Domain);
15137     return Builder.CreateCall(DMB, CDomain);
15138   }
15139 }
15140 
15141 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
15142 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
15143                                                  Instruction *Inst,
15144                                                  AtomicOrdering Ord) const {
15145   switch (Ord) {
15146   case AtomicOrdering::NotAtomic:
15147   case AtomicOrdering::Unordered:
15148     llvm_unreachable("Invalid fence: unordered/non-atomic");
15149   case AtomicOrdering::Monotonic:
15150   case AtomicOrdering::Acquire:
15151     return nullptr; // Nothing to do
15152   case AtomicOrdering::SequentiallyConsistent:
15153     if (!Inst->hasAtomicStore())
15154       return nullptr; // Nothing to do
15155     LLVM_FALLTHROUGH;
15156   case AtomicOrdering::Release:
15157   case AtomicOrdering::AcquireRelease:
15158     if (Subtarget->preferISHSTBarriers())
15159       return makeDMB(Builder, ARM_MB::ISHST);
15160     // FIXME: add a comment with a link to documentation justifying this.
15161     else
15162       return makeDMB(Builder, ARM_MB::ISH);
15163   }
15164   llvm_unreachable("Unknown fence ordering in emitLeadingFence");
15165 }
15166 
15167 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
15168                                                   Instruction *Inst,
15169                                                   AtomicOrdering Ord) const {
15170   switch (Ord) {
15171   case AtomicOrdering::NotAtomic:
15172   case AtomicOrdering::Unordered:
15173     llvm_unreachable("Invalid fence: unordered/not-atomic");
15174   case AtomicOrdering::Monotonic:
15175   case AtomicOrdering::Release:
15176     return nullptr; // Nothing to do
15177   case AtomicOrdering::Acquire:
15178   case AtomicOrdering::AcquireRelease:
15179   case AtomicOrdering::SequentiallyConsistent:
15180     return makeDMB(Builder, ARM_MB::ISH);
15181   }
15182   llvm_unreachable("Unknown fence ordering in emitTrailingFence");
15183 }
15184 
15185 // Loads and stores less than 64-bits are already atomic; ones above that
15186 // are doomed anyway, so defer to the default libcall and blame the OS when
15187 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
15188 // anything for those.
15189 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
15190   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
15191   return (Size == 64) && !Subtarget->isMClass();
15192 }
15193 
15194 // Loads and stores less than 64-bits are already atomic; ones above that
15195 // are doomed anyway, so defer to the default libcall and blame the OS when
15196 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
15197 // anything for those.
15198 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
15199 // guarantee, see DDI0406C ARM architecture reference manual,
15200 // sections A8.8.72-74 LDRD)
15201 TargetLowering::AtomicExpansionKind
15202 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
15203   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
15204   return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
15205                                                   : AtomicExpansionKind::None;
15206 }
15207 
15208 // For the real atomic operations, we have ldrex/strex up to 32 bits,
15209 // and up to 64 bits on the non-M profiles
15210 TargetLowering::AtomicExpansionKind
15211 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
15212   if (AI->isFloatingPointOperation())
15213     return AtomicExpansionKind::CmpXChg;
15214 
15215   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
15216   bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
15217   return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
15218              ? AtomicExpansionKind::LLSC
15219              : AtomicExpansionKind::None;
15220 }
15221 
15222 TargetLowering::AtomicExpansionKind
15223 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
15224   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
15225   // implement cmpxchg without spilling. If the address being exchanged is also
15226   // on the stack and close enough to the spill slot, this can lead to a
15227   // situation where the monitor always gets cleared and the atomic operation
15228   // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
15229   bool HasAtomicCmpXchg =
15230       !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
15231   if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg)
15232     return AtomicExpansionKind::LLSC;
15233   return AtomicExpansionKind::None;
15234 }
15235 
15236 bool ARMTargetLowering::shouldInsertFencesForAtomic(
15237     const Instruction *I) const {
15238   return InsertFencesForAtomic;
15239 }
15240 
15241 // This has so far only been implemented for MachO.
15242 bool ARMTargetLowering::useLoadStackGuardNode() const {
15243   return Subtarget->isTargetMachO();
15244 }
15245 
15246 void ARMTargetLowering::insertSSPDeclarations(Module &M) const {
15247   if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
15248     return TargetLowering::insertSSPDeclarations(M);
15249 
15250   // MSVC CRT has a global variable holding security cookie.
15251   M.getOrInsertGlobal("__security_cookie",
15252                       Type::getInt8PtrTy(M.getContext()));
15253 
15254   // MSVC CRT has a function to validate security cookie.
15255   FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
15256       "__security_check_cookie", Type::getVoidTy(M.getContext()),
15257       Type::getInt8PtrTy(M.getContext()));
15258   if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee()))
15259     F->addAttribute(1, Attribute::AttrKind::InReg);
15260 }
15261 
15262 Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const {
15263   // MSVC CRT has a global variable holding security cookie.
15264   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
15265     return M.getGlobalVariable("__security_cookie");
15266   return TargetLowering::getSDagStackGuard(M);
15267 }
15268 
15269 Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const {
15270   // MSVC CRT has a function to validate security cookie.
15271   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
15272     return M.getFunction("__security_check_cookie");
15273   return TargetLowering::getSSPStackGuardCheck(M);
15274 }
15275 
15276 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
15277                                                   unsigned &Cost) const {
15278   // If we do not have NEON, vector types are not natively supported.
15279   if (!Subtarget->hasNEON())
15280     return false;
15281 
15282   // Floating point values and vector values map to the same register file.
15283   // Therefore, although we could do a store extract of a vector type, this is
15284   // better to leave at float as we have more freedom in the addressing mode for
15285   // those.
15286   if (VectorTy->isFPOrFPVectorTy())
15287     return false;
15288 
15289   // If the index is unknown at compile time, this is very expensive to lower
15290   // and it is not possible to combine the store with the extract.
15291   if (!isa<ConstantInt>(Idx))
15292     return false;
15293 
15294   assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
15295   unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth();
15296   // We can do a store + vector extract on any vector that fits perfectly in a D
15297   // or Q register.
15298   if (BitWidth == 64 || BitWidth == 128) {
15299     Cost = 0;
15300     return true;
15301   }
15302   return false;
15303 }
15304 
15305 bool ARMTargetLowering::isCheapToSpeculateCttz() const {
15306   return Subtarget->hasV6T2Ops();
15307 }
15308 
15309 bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
15310   return Subtarget->hasV6T2Ops();
15311 }
15312 
15313 bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
15314   return !Subtarget->hasMinSize();
15315 }
15316 
15317 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
15318                                          AtomicOrdering Ord) const {
15319   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
15320   Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
15321   bool IsAcquire = isAcquireOrStronger(Ord);
15322 
15323   // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
15324   // intrinsic must return {i32, i32} and we have to recombine them into a
15325   // single i64 here.
15326   if (ValTy->getPrimitiveSizeInBits() == 64) {
15327     Intrinsic::ID Int =
15328         IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
15329     Function *Ldrex = Intrinsic::getDeclaration(M, Int);
15330 
15331     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
15332     Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
15333 
15334     Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
15335     Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
15336     if (!Subtarget->isLittle())
15337       std::swap (Lo, Hi);
15338     Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
15339     Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
15340     return Builder.CreateOr(
15341         Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
15342   }
15343 
15344   Type *Tys[] = { Addr->getType() };
15345   Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
15346   Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys);
15347 
15348   return Builder.CreateTruncOrBitCast(
15349       Builder.CreateCall(Ldrex, Addr),
15350       cast<PointerType>(Addr->getType())->getElementType());
15351 }
15352 
15353 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
15354     IRBuilder<> &Builder) const {
15355   if (!Subtarget->hasV7Ops())
15356     return;
15357   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
15358   Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
15359 }
15360 
15361 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
15362                                                Value *Addr,
15363                                                AtomicOrdering Ord) const {
15364   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
15365   bool IsRelease = isReleaseOrStronger(Ord);
15366 
15367   // Since the intrinsics must have legal type, the i64 intrinsics take two
15368   // parameters: "i32, i32". We must marshal Val into the appropriate form
15369   // before the call.
15370   if (Val->getType()->getPrimitiveSizeInBits() == 64) {
15371     Intrinsic::ID Int =
15372         IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
15373     Function *Strex = Intrinsic::getDeclaration(M, Int);
15374     Type *Int32Ty = Type::getInt32Ty(M->getContext());
15375 
15376     Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
15377     Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
15378     if (!Subtarget->isLittle())
15379       std::swap(Lo, Hi);
15380     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
15381     return Builder.CreateCall(Strex, {Lo, Hi, Addr});
15382   }
15383 
15384   Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
15385   Type *Tys[] = { Addr->getType() };
15386   Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
15387 
15388   return Builder.CreateCall(
15389       Strex, {Builder.CreateZExtOrBitCast(
15390                   Val, Strex->getFunctionType()->getParamType(0)),
15391               Addr});
15392 }
15393 
15394 
15395 bool ARMTargetLowering::alignLoopsWithOptSize() const {
15396   return Subtarget->isMClass();
15397 }
15398 
15399 /// A helper function for determining the number of interleaved accesses we
15400 /// will generate when lowering accesses of the given type.
15401 unsigned
15402 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
15403                                              const DataLayout &DL) const {
15404   return (DL.getTypeSizeInBits(VecTy) + 127) / 128;
15405 }
15406 
15407 bool ARMTargetLowering::isLegalInterleavedAccessType(
15408     VectorType *VecTy, const DataLayout &DL) const {
15409 
15410   unsigned VecSize = DL.getTypeSizeInBits(VecTy);
15411   unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
15412 
15413   // Ensure the vector doesn't have f16 elements. Even though we could do an
15414   // i16 vldN, we can't hold the f16 vectors and will end up converting via
15415   // f32.
15416   if (VecTy->getElementType()->isHalfTy())
15417     return false;
15418 
15419   // Ensure the number of vector elements is greater than 1.
15420   if (VecTy->getNumElements() < 2)
15421     return false;
15422 
15423   // Ensure the element type is legal.
15424   if (ElSize != 8 && ElSize != 16 && ElSize != 32)
15425     return false;
15426 
15427   // Ensure the total vector size is 64 or a multiple of 128. Types larger than
15428   // 128 will be split into multiple interleaved accesses.
15429   return VecSize == 64 || VecSize % 128 == 0;
15430 }
15431 
15432 /// Lower an interleaved load into a vldN intrinsic.
15433 ///
15434 /// E.g. Lower an interleaved load (Factor = 2):
15435 ///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
15436 ///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
15437 ///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
15438 ///
15439 ///      Into:
15440 ///        %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
15441 ///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
15442 ///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
15443 bool ARMTargetLowering::lowerInterleavedLoad(
15444     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
15445     ArrayRef<unsigned> Indices, unsigned Factor) const {
15446   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
15447          "Invalid interleave factor");
15448   assert(!Shuffles.empty() && "Empty shufflevector input");
15449   assert(Shuffles.size() == Indices.size() &&
15450          "Unmatched number of shufflevectors and indices");
15451 
15452   VectorType *VecTy = Shuffles[0]->getType();
15453   Type *EltTy = VecTy->getVectorElementType();
15454 
15455   const DataLayout &DL = LI->getModule()->getDataLayout();
15456 
15457   // Skip if we do not have NEON and skip illegal vector types. We can
15458   // "legalize" wide vector types into multiple interleaved accesses as long as
15459   // the vector types are divisible by 128.
15460   if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VecTy, DL))
15461     return false;
15462 
15463   unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL);
15464 
15465   // A pointer vector can not be the return type of the ldN intrinsics. Need to
15466   // load integer vectors first and then convert to pointer vectors.
15467   if (EltTy->isPointerTy())
15468     VecTy =
15469         VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
15470 
15471   IRBuilder<> Builder(LI);
15472 
15473   // The base address of the load.
15474   Value *BaseAddr = LI->getPointerOperand();
15475 
15476   if (NumLoads > 1) {
15477     // If we're going to generate more than one load, reset the sub-vector type
15478     // to something legal.
15479     VecTy = VectorType::get(VecTy->getVectorElementType(),
15480                             VecTy->getVectorNumElements() / NumLoads);
15481 
15482     // We will compute the pointer operand of each load from the original base
15483     // address using GEPs. Cast the base address to a pointer to the scalar
15484     // element type.
15485     BaseAddr = Builder.CreateBitCast(
15486         BaseAddr, VecTy->getVectorElementType()->getPointerTo(
15487                       LI->getPointerAddressSpace()));
15488   }
15489 
15490   assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!");
15491 
15492   Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
15493   Type *Tys[] = {VecTy, Int8Ptr};
15494   static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
15495                                             Intrinsic::arm_neon_vld3,
15496                                             Intrinsic::arm_neon_vld4};
15497   Function *VldnFunc =
15498       Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
15499 
15500   // Holds sub-vectors extracted from the load intrinsic return values. The
15501   // sub-vectors are associated with the shufflevector instructions they will
15502   // replace.
15503   DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
15504 
15505   for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
15506     // If we're generating more than one load, compute the base address of
15507     // subsequent loads as an offset from the previous.
15508     if (LoadCount > 0)
15509       BaseAddr =
15510           Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr,
15511                                      VecTy->getVectorNumElements() * Factor);
15512 
15513     SmallVector<Value *, 2> Ops;
15514     Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
15515     Ops.push_back(Builder.getInt32(LI->getAlignment()));
15516 
15517     CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN");
15518 
15519     // Replace uses of each shufflevector with the corresponding vector loaded
15520     // by ldN.
15521     for (unsigned i = 0; i < Shuffles.size(); i++) {
15522       ShuffleVectorInst *SV = Shuffles[i];
15523       unsigned Index = Indices[i];
15524 
15525       Value *SubVec = Builder.CreateExtractValue(VldN, Index);
15526 
15527       // Convert the integer vector to pointer vector if the element is pointer.
15528       if (EltTy->isPointerTy())
15529         SubVec = Builder.CreateIntToPtr(
15530             SubVec, VectorType::get(SV->getType()->getVectorElementType(),
15531                                     VecTy->getVectorNumElements()));
15532 
15533       SubVecs[SV].push_back(SubVec);
15534     }
15535   }
15536 
15537   // Replace uses of the shufflevector instructions with the sub-vectors
15538   // returned by the load intrinsic. If a shufflevector instruction is
15539   // associated with more than one sub-vector, those sub-vectors will be
15540   // concatenated into a single wide vector.
15541   for (ShuffleVectorInst *SVI : Shuffles) {
15542     auto &SubVec = SubVecs[SVI];
15543     auto *WideVec =
15544         SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
15545     SVI->replaceAllUsesWith(WideVec);
15546   }
15547 
15548   return true;
15549 }
15550 
15551 /// Lower an interleaved store into a vstN intrinsic.
15552 ///
15553 /// E.g. Lower an interleaved store (Factor = 3):
15554 ///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
15555 ///                                  <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
15556 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
15557 ///
15558 ///      Into:
15559 ///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
15560 ///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
15561 ///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
15562 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
15563 ///
15564 /// Note that the new shufflevectors will be removed and we'll only generate one
15565 /// vst3 instruction in CodeGen.
15566 ///
15567 /// Example for a more general valid mask (Factor 3). Lower:
15568 ///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
15569 ///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
15570 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
15571 ///
15572 ///      Into:
15573 ///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
15574 ///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
15575 ///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
15576 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
15577 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
15578                                               ShuffleVectorInst *SVI,
15579                                               unsigned Factor) const {
15580   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
15581          "Invalid interleave factor");
15582 
15583   VectorType *VecTy = SVI->getType();
15584   assert(VecTy->getVectorNumElements() % Factor == 0 &&
15585          "Invalid interleaved store");
15586 
15587   unsigned LaneLen = VecTy->getVectorNumElements() / Factor;
15588   Type *EltTy = VecTy->getVectorElementType();
15589   VectorType *SubVecTy = VectorType::get(EltTy, LaneLen);
15590 
15591   const DataLayout &DL = SI->getModule()->getDataLayout();
15592 
15593   // Skip if we do not have NEON and skip illegal vector types. We can
15594   // "legalize" wide vector types into multiple interleaved accesses as long as
15595   // the vector types are divisible by 128.
15596   if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL))
15597     return false;
15598 
15599   unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL);
15600 
15601   Value *Op0 = SVI->getOperand(0);
15602   Value *Op1 = SVI->getOperand(1);
15603   IRBuilder<> Builder(SI);
15604 
15605   // StN intrinsics don't support pointer vectors as arguments. Convert pointer
15606   // vectors to integer vectors.
15607   if (EltTy->isPointerTy()) {
15608     Type *IntTy = DL.getIntPtrType(EltTy);
15609 
15610     // Convert to the corresponding integer vector.
15611     Type *IntVecTy =
15612         VectorType::get(IntTy, Op0->getType()->getVectorNumElements());
15613     Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
15614     Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
15615 
15616     SubVecTy = VectorType::get(IntTy, LaneLen);
15617   }
15618 
15619   // The base address of the store.
15620   Value *BaseAddr = SI->getPointerOperand();
15621 
15622   if (NumStores > 1) {
15623     // If we're going to generate more than one store, reset the lane length
15624     // and sub-vector type to something legal.
15625     LaneLen /= NumStores;
15626     SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen);
15627 
15628     // We will compute the pointer operand of each store from the original base
15629     // address using GEPs. Cast the base address to a pointer to the scalar
15630     // element type.
15631     BaseAddr = Builder.CreateBitCast(
15632         BaseAddr, SubVecTy->getVectorElementType()->getPointerTo(
15633                       SI->getPointerAddressSpace()));
15634   }
15635 
15636   assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");
15637 
15638   auto Mask = SVI->getShuffleMask();
15639 
15640   Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
15641   Type *Tys[] = {Int8Ptr, SubVecTy};
15642   static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
15643                                              Intrinsic::arm_neon_vst3,
15644                                              Intrinsic::arm_neon_vst4};
15645 
15646   for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
15647     // If we generating more than one store, we compute the base address of
15648     // subsequent stores as an offset from the previous.
15649     if (StoreCount > 0)
15650       BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType(),
15651                                             BaseAddr, LaneLen * Factor);
15652 
15653     SmallVector<Value *, 6> Ops;
15654     Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
15655 
15656     Function *VstNFunc =
15657         Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys);
15658 
15659     // Split the shufflevector operands into sub vectors for the new vstN call.
15660     for (unsigned i = 0; i < Factor; i++) {
15661       unsigned IdxI = StoreCount * LaneLen * Factor + i;
15662       if (Mask[IdxI] >= 0) {
15663         Ops.push_back(Builder.CreateShuffleVector(
15664             Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0)));
15665       } else {
15666         unsigned StartMask = 0;
15667         for (unsigned j = 1; j < LaneLen; j++) {
15668           unsigned IdxJ = StoreCount * LaneLen * Factor + j;
15669           if (Mask[IdxJ * Factor + IdxI] >= 0) {
15670             StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
15671             break;
15672           }
15673         }
15674         // Note: If all elements in a chunk are undefs, StartMask=0!
15675         // Note: Filling undef gaps with random elements is ok, since
15676         // those elements were being written anyway (with undefs).
15677         // In the case of all undefs we're defaulting to using elems from 0
15678         // Note: StartMask cannot be negative, it's checked in
15679         // isReInterleaveMask
15680         Ops.push_back(Builder.CreateShuffleVector(
15681             Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0)));
15682       }
15683     }
15684 
15685     Ops.push_back(Builder.getInt32(SI->getAlignment()));
15686     Builder.CreateCall(VstNFunc, Ops);
15687   }
15688   return true;
15689 }
15690 
15691 enum HABaseType {
15692   HA_UNKNOWN = 0,
15693   HA_FLOAT,
15694   HA_DOUBLE,
15695   HA_VECT64,
15696   HA_VECT128
15697 };
15698 
15699 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
15700                                    uint64_t &Members) {
15701   if (auto *ST = dyn_cast<StructType>(Ty)) {
15702     for (unsigned i = 0; i < ST->getNumElements(); ++i) {
15703       uint64_t SubMembers = 0;
15704       if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
15705         return false;
15706       Members += SubMembers;
15707     }
15708   } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
15709     uint64_t SubMembers = 0;
15710     if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
15711       return false;
15712     Members += SubMembers * AT->getNumElements();
15713   } else if (Ty->isFloatTy()) {
15714     if (Base != HA_UNKNOWN && Base != HA_FLOAT)
15715       return false;
15716     Members = 1;
15717     Base = HA_FLOAT;
15718   } else if (Ty->isDoubleTy()) {
15719     if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
15720       return false;
15721     Members = 1;
15722     Base = HA_DOUBLE;
15723   } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
15724     Members = 1;
15725     switch (Base) {
15726     case HA_FLOAT:
15727     case HA_DOUBLE:
15728       return false;
15729     case HA_VECT64:
15730       return VT->getBitWidth() == 64;
15731     case HA_VECT128:
15732       return VT->getBitWidth() == 128;
15733     case HA_UNKNOWN:
15734       switch (VT->getBitWidth()) {
15735       case 64:
15736         Base = HA_VECT64;
15737         return true;
15738       case 128:
15739         Base = HA_VECT128;
15740         return true;
15741       default:
15742         return false;
15743       }
15744     }
15745   }
15746 
15747   return (Members > 0 && Members <= 4);
15748 }
15749 
15750 /// Return the correct alignment for the current calling convention.
15751 unsigned
15752 ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy,
15753                                                  DataLayout DL) const {
15754   if (!ArgTy->isVectorTy())
15755     return DL.getABITypeAlignment(ArgTy);
15756 
15757   // Avoid over-aligning vector parameters. It would require realigning the
15758   // stack and waste space for no real benefit.
15759   return std::min(DL.getABITypeAlignment(ArgTy), DL.getStackAlignment());
15760 }
15761 
15762 /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
15763 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
15764 /// passing according to AAPCS rules.
15765 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
15766     Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
15767   if (getEffectiveCallingConv(CallConv, isVarArg) !=
15768       CallingConv::ARM_AAPCS_VFP)
15769     return false;
15770 
15771   HABaseType Base = HA_UNKNOWN;
15772   uint64_t Members = 0;
15773   bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
15774   LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
15775 
15776   bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
15777   return IsHA || IsIntArray;
15778 }
15779 
15780 unsigned ARMTargetLowering::getExceptionPointerRegister(
15781     const Constant *PersonalityFn) const {
15782   // Platforms which do not use SjLj EH may return values in these registers
15783   // via the personality function.
15784   return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0;
15785 }
15786 
15787 unsigned ARMTargetLowering::getExceptionSelectorRegister(
15788     const Constant *PersonalityFn) const {
15789   // Platforms which do not use SjLj EH may return values in these registers
15790   // via the personality function.
15791   return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1;
15792 }
15793 
15794 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
15795   // Update IsSplitCSR in ARMFunctionInfo.
15796   ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
15797   AFI->setIsSplitCSR(true);
15798 }
15799 
15800 void ARMTargetLowering::insertCopiesSplitCSR(
15801     MachineBasicBlock *Entry,
15802     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
15803   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
15804   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
15805   if (!IStart)
15806     return;
15807 
15808   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
15809   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
15810   MachineBasicBlock::iterator MBBI = Entry->begin();
15811   for (const MCPhysReg *I = IStart; *I; ++I) {
15812     const TargetRegisterClass *RC = nullptr;
15813     if (ARM::GPRRegClass.contains(*I))
15814       RC = &ARM::GPRRegClass;
15815     else if (ARM::DPRRegClass.contains(*I))
15816       RC = &ARM::DPRRegClass;
15817     else
15818       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
15819 
15820     unsigned NewVR = MRI->createVirtualRegister(RC);
15821     // Create copy from CSR to a virtual register.
15822     // FIXME: this currently does not emit CFI pseudo-instructions, it works
15823     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
15824     // nounwind. If we want to generalize this later, we may need to emit
15825     // CFI pseudo-instructions.
15826     assert(Entry->getParent()->getFunction().hasFnAttribute(
15827                Attribute::NoUnwind) &&
15828            "Function should be nounwind in insertCopiesSplitCSR!");
15829     Entry->addLiveIn(*I);
15830     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
15831         .addReg(*I);
15832 
15833     // Insert the copy-back instructions right before the terminator.
15834     for (auto *Exit : Exits)
15835       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
15836               TII->get(TargetOpcode::COPY), *I)
15837           .addReg(NewVR);
15838   }
15839 }
15840 
15841 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
15842   MF.getFrameInfo().computeMaxCallFrameSize(MF);
15843   TargetLoweringBase::finalizeLowering(MF);
15844 }
15845