1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "ARMISelLowering.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMCallingConv.h"
19 #include "ARMConstantPoolValue.h"
20 #include "ARMMachineFunctionInfo.h"
21 #include "ARMPerfectShuffle.h"
22 #include "ARMRegisterInfo.h"
23 #include "ARMSelectionDAGInfo.h"
24 #include "ARMSubtarget.h"
25 #include "MCTargetDesc/ARMAddressingModes.h"
26 #include "MCTargetDesc/ARMBaseInfo.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/ArrayRef.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/DenseMap.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/ADT/StringRef.h"
38 #include "llvm/ADT/StringSwitch.h"
39 #include "llvm/ADT/Triple.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/CodeGen/CallingConvLower.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/IntrinsicLowering.h"
45 #include "llvm/CodeGen/MachineBasicBlock.h"
46 #include "llvm/CodeGen/MachineConstantPool.h"
47 #include "llvm/CodeGen/MachineFrameInfo.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineInstr.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineJumpTableInfo.h"
52 #include "llvm/CodeGen/MachineMemOperand.h"
53 #include "llvm/CodeGen/MachineOperand.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/MachineValueType.h"
56 #include "llvm/CodeGen/RuntimeLibcalls.h"
57 #include "llvm/CodeGen/SelectionDAG.h"
58 #include "llvm/CodeGen/SelectionDAGNodes.h"
59 #include "llvm/CodeGen/ValueTypes.h"
60 #include "llvm/IR/Attributes.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/Constants.h"
64 #include "llvm/IR/DataLayout.h"
65 #include "llvm/IR/DebugLoc.h"
66 #include "llvm/IR/DerivedTypes.h"
67 #include "llvm/IR/Function.h"
68 #include "llvm/IR/GlobalAlias.h"
69 #include "llvm/IR/GlobalValue.h"
70 #include "llvm/IR/GlobalVariable.h"
71 #include "llvm/IR/IRBuilder.h"
72 #include "llvm/IR/InlineAsm.h"
73 #include "llvm/IR/Instruction.h"
74 #include "llvm/IR/Instructions.h"
75 #include "llvm/IR/IntrinsicInst.h"
76 #include "llvm/IR/Intrinsics.h"
77 #include "llvm/IR/Module.h"
78 #include "llvm/IR/Type.h"
79 #include "llvm/IR/User.h"
80 #include "llvm/IR/Value.h"
81 #include "llvm/MC/MCInstrDesc.h"
82 #include "llvm/MC/MCInstrItineraries.h"
83 #include "llvm/MC/MCRegisterInfo.h"
84 #include "llvm/MC/MCSchedule.h"
85 #include "llvm/Support/AtomicOrdering.h"
86 #include "llvm/Support/BranchProbability.h"
87 #include "llvm/Support/Casting.h"
88 #include "llvm/Support/CodeGen.h"
89 #include "llvm/Support/CommandLine.h"
90 #include "llvm/Support/Compiler.h"
91 #include "llvm/Support/Debug.h"
92 #include "llvm/Support/ErrorHandling.h"
93 #include "llvm/Support/KnownBits.h"
94 #include "llvm/Support/MathExtras.h"
95 #include "llvm/Support/raw_ostream.h"
96 #include "llvm/Target/TargetInstrInfo.h"
97 #include "llvm/Target/TargetMachine.h"
98 #include "llvm/Target/TargetOptions.h"
99 #include <algorithm>
100 #include <cassert>
101 #include <cstdint>
102 #include <cstdlib>
103 #include <iterator>
104 #include <limits>
105 #include <string>
106 #include <tuple>
107 #include <utility>
108 #include <vector>
109 
110 using namespace llvm;
111 
112 #define DEBUG_TYPE "arm-isel"
113 
114 STATISTIC(NumTailCalls, "Number of tail calls");
115 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
116 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
117 STATISTIC(NumConstpoolPromoted,
118   "Number of constants with their storage promoted into constant pools");
119 
120 static cl::opt<bool>
121 ARMInterworking("arm-interworking", cl::Hidden,
122   cl::desc("Enable / disable ARM interworking (for debugging only)"),
123   cl::init(true));
124 
125 static cl::opt<bool> EnableConstpoolPromotion(
126     "arm-promote-constant", cl::Hidden,
127     cl::desc("Enable / disable promotion of unnamed_addr constants into "
128              "constant pools"),
129     cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
130 static cl::opt<unsigned> ConstpoolPromotionMaxSize(
131     "arm-promote-constant-max-size", cl::Hidden,
132     cl::desc("Maximum size of constant to promote into a constant pool"),
133     cl::init(64));
134 static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
135     "arm-promote-constant-max-total", cl::Hidden,
136     cl::desc("Maximum size of ALL constants to promote into a constant pool"),
137     cl::init(128));
138 
139 // The APCS parameter registers.
140 static const MCPhysReg GPRArgRegs[] = {
141   ARM::R0, ARM::R1, ARM::R2, ARM::R3
142 };
143 
144 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
145                                        MVT PromotedBitwiseVT) {
146   if (VT != PromotedLdStVT) {
147     setOperationAction(ISD::LOAD, VT, Promote);
148     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
149 
150     setOperationAction(ISD::STORE, VT, Promote);
151     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
152   }
153 
154   MVT ElemTy = VT.getVectorElementType();
155   if (ElemTy != MVT::f64)
156     setOperationAction(ISD::SETCC, VT, Custom);
157   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
158   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
159   if (ElemTy == MVT::i32) {
160     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
161     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
162     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
163     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
164   } else {
165     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
166     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
167     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
168     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
169   }
170   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
171   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
172   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
173   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
174   setOperationAction(ISD::SELECT,            VT, Expand);
175   setOperationAction(ISD::SELECT_CC,         VT, Expand);
176   setOperationAction(ISD::VSELECT,           VT, Expand);
177   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
178   if (VT.isInteger()) {
179     setOperationAction(ISD::SHL, VT, Custom);
180     setOperationAction(ISD::SRA, VT, Custom);
181     setOperationAction(ISD::SRL, VT, Custom);
182   }
183 
184   // Promote all bit-wise operations.
185   if (VT.isInteger() && VT != PromotedBitwiseVT) {
186     setOperationAction(ISD::AND, VT, Promote);
187     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
188     setOperationAction(ISD::OR,  VT, Promote);
189     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
190     setOperationAction(ISD::XOR, VT, Promote);
191     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
192   }
193 
194   // Neon does not support vector divide/remainder operations.
195   setOperationAction(ISD::SDIV, VT, Expand);
196   setOperationAction(ISD::UDIV, VT, Expand);
197   setOperationAction(ISD::FDIV, VT, Expand);
198   setOperationAction(ISD::SREM, VT, Expand);
199   setOperationAction(ISD::UREM, VT, Expand);
200   setOperationAction(ISD::FREM, VT, Expand);
201 
202   if (!VT.isFloatingPoint() &&
203       VT != MVT::v2i64 && VT != MVT::v1i64)
204     for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
205       setOperationAction(Opcode, VT, Legal);
206 }
207 
208 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
209   addRegisterClass(VT, &ARM::DPRRegClass);
210   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
211 }
212 
213 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
214   addRegisterClass(VT, &ARM::DPairRegClass);
215   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
216 }
217 
218 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
219                                      const ARMSubtarget &STI)
220     : TargetLowering(TM), Subtarget(&STI) {
221   RegInfo = Subtarget->getRegisterInfo();
222   Itins = Subtarget->getInstrItineraryData();
223 
224   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
225 
226   if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() &&
227       !Subtarget->isTargetWatchOS()) {
228     const auto &E = Subtarget->getTargetTriple().getEnvironment();
229 
230     bool IsHFTarget = E == Triple::EABIHF || E == Triple::GNUEABIHF ||
231                       E == Triple::MuslEABIHF;
232     // Windows is a special case.  Technically, we will replace all of the "GNU"
233     // calls with calls to MSVCRT if appropriate and adjust the calling
234     // convention then.
235     IsHFTarget = IsHFTarget || Subtarget->isTargetWindows();
236 
237     for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
238       setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID),
239                             IsHFTarget ? CallingConv::ARM_AAPCS_VFP
240                                        : CallingConv::ARM_AAPCS);
241   }
242 
243   if (Subtarget->isTargetMachO()) {
244     // Uses VFP for Thumb libfuncs if available.
245     if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
246         Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
247       static const struct {
248         const RTLIB::Libcall Op;
249         const char * const Name;
250         const ISD::CondCode Cond;
251       } LibraryCalls[] = {
252         // Single-precision floating-point arithmetic.
253         { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
254         { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
255         { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
256         { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },
257 
258         // Double-precision floating-point arithmetic.
259         { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
260         { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
261         { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
262         { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },
263 
264         // Single-precision comparisons.
265         { RTLIB::OEQ_F32, "__eqsf2vfp",    ISD::SETNE },
266         { RTLIB::UNE_F32, "__nesf2vfp",    ISD::SETNE },
267         { RTLIB::OLT_F32, "__ltsf2vfp",    ISD::SETNE },
268         { RTLIB::OLE_F32, "__lesf2vfp",    ISD::SETNE },
269         { RTLIB::OGE_F32, "__gesf2vfp",    ISD::SETNE },
270         { RTLIB::OGT_F32, "__gtsf2vfp",    ISD::SETNE },
271         { RTLIB::UO_F32,  "__unordsf2vfp", ISD::SETNE },
272         { RTLIB::O_F32,   "__unordsf2vfp", ISD::SETEQ },
273 
274         // Double-precision comparisons.
275         { RTLIB::OEQ_F64, "__eqdf2vfp",    ISD::SETNE },
276         { RTLIB::UNE_F64, "__nedf2vfp",    ISD::SETNE },
277         { RTLIB::OLT_F64, "__ltdf2vfp",    ISD::SETNE },
278         { RTLIB::OLE_F64, "__ledf2vfp",    ISD::SETNE },
279         { RTLIB::OGE_F64, "__gedf2vfp",    ISD::SETNE },
280         { RTLIB::OGT_F64, "__gtdf2vfp",    ISD::SETNE },
281         { RTLIB::UO_F64,  "__unorddf2vfp", ISD::SETNE },
282         { RTLIB::O_F64,   "__unorddf2vfp", ISD::SETEQ },
283 
284         // Floating-point to integer conversions.
285         // i64 conversions are done via library routines even when generating VFP
286         // instructions, so use the same ones.
287         { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp",    ISD::SETCC_INVALID },
288         { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
289         { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp",    ISD::SETCC_INVALID },
290         { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },
291 
292         // Conversions between floating types.
293         { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp",  ISD::SETCC_INVALID },
294         { RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp", ISD::SETCC_INVALID },
295 
296         // Integer to floating-point conversions.
297         // i64 conversions are done via library routines even when generating VFP
298         // instructions, so use the same ones.
299         // FIXME: There appears to be some naming inconsistency in ARM libgcc:
300         // e.g., __floatunsidf vs. __floatunssidfvfp.
301         { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp",    ISD::SETCC_INVALID },
302         { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
303         { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp",    ISD::SETCC_INVALID },
304         { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
305       };
306 
307       for (const auto &LC : LibraryCalls) {
308         setLibcallName(LC.Op, LC.Name);
309         if (LC.Cond != ISD::SETCC_INVALID)
310           setCmpLibcallCC(LC.Op, LC.Cond);
311       }
312     }
313 
314     // Set the correct calling convention for ARMv7k WatchOS. It's just
315     // AAPCS_VFP for functions as simple as libcalls.
316     if (Subtarget->isTargetWatchABI()) {
317       for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i)
318         setLibcallCallingConv((RTLIB::Libcall)i, CallingConv::ARM_AAPCS_VFP);
319     }
320   }
321 
322   // These libcalls are not available in 32-bit.
323   setLibcallName(RTLIB::SHL_I128, nullptr);
324   setLibcallName(RTLIB::SRL_I128, nullptr);
325   setLibcallName(RTLIB::SRA_I128, nullptr);
326 
327   // RTLIB
328   if (Subtarget->isAAPCS_ABI() &&
329       (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
330        Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
331     static const struct {
332       const RTLIB::Libcall Op;
333       const char * const Name;
334       const CallingConv::ID CC;
335       const ISD::CondCode Cond;
336     } LibraryCalls[] = {
337       // Double-precision floating-point arithmetic helper functions
338       // RTABI chapter 4.1.2, Table 2
339       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
340       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
341       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
342       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
343 
344       // Double-precision floating-point comparison helper functions
345       // RTABI chapter 4.1.2, Table 3
346       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
347       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
348       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
349       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
350       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
351       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
352       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
353       { RTLIB::O_F64,   "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
354 
355       // Single-precision floating-point arithmetic helper functions
356       // RTABI chapter 4.1.2, Table 4
357       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
358       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
359       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
360       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
361 
362       // Single-precision floating-point comparison helper functions
363       // RTABI chapter 4.1.2, Table 5
364       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
365       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
366       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
367       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
368       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
369       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
370       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
371       { RTLIB::O_F32,   "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
372 
373       // Floating-point to integer conversions.
374       // RTABI chapter 4.1.2, Table 6
375       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
376       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
377       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
378       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
379       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
380       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
381       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
382       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
383 
384       // Conversions between floating types.
385       // RTABI chapter 4.1.2, Table 7
386       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
387       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
388       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
389 
390       // Integer to floating-point conversions.
391       // RTABI chapter 4.1.2, Table 8
392       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
393       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
394       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
395       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
396       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
397       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
398       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
399       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
400 
401       // Long long helper functions
402       // RTABI chapter 4.2, Table 9
403       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
404       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
405       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
406       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
407 
408       // Integer division functions
409       // RTABI chapter 4.3.1
410       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
411       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
412       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
413       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
414       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
415       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
416       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
417       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
418     };
419 
420     for (const auto &LC : LibraryCalls) {
421       setLibcallName(LC.Op, LC.Name);
422       setLibcallCallingConv(LC.Op, LC.CC);
423       if (LC.Cond != ISD::SETCC_INVALID)
424         setCmpLibcallCC(LC.Op, LC.Cond);
425     }
426 
427     // EABI dependent RTLIB
428     if (TM.Options.EABIVersion == EABI::EABI4 ||
429         TM.Options.EABIVersion == EABI::EABI5) {
430       static const struct {
431         const RTLIB::Libcall Op;
432         const char *const Name;
433         const CallingConv::ID CC;
434         const ISD::CondCode Cond;
435       } MemOpsLibraryCalls[] = {
436         // Memory operations
437         // RTABI chapter 4.3.4
438         { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
439         { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
440         { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
441       };
442 
443       for (const auto &LC : MemOpsLibraryCalls) {
444         setLibcallName(LC.Op, LC.Name);
445         setLibcallCallingConv(LC.Op, LC.CC);
446         if (LC.Cond != ISD::SETCC_INVALID)
447           setCmpLibcallCC(LC.Op, LC.Cond);
448       }
449     }
450   }
451 
452   if (Subtarget->isTargetWindows()) {
453     static const struct {
454       const RTLIB::Libcall Op;
455       const char * const Name;
456       const CallingConv::ID CC;
457     } LibraryCalls[] = {
458       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
459       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
460       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
461       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
462       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
463       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
464       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
465       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
466     };
467 
468     for (const auto &LC : LibraryCalls) {
469       setLibcallName(LC.Op, LC.Name);
470       setLibcallCallingConv(LC.Op, LC.CC);
471     }
472   }
473 
474   // Use divmod compiler-rt calls for iOS 5.0 and later.
475   if (Subtarget->isTargetMachO() &&
476       !(Subtarget->isTargetIOS() &&
477         Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
478     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
479     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
480   }
481 
482   // The half <-> float conversion functions are always soft-float on
483   // non-watchos platforms, but are needed for some targets which use a
484   // hard-float calling convention by default.
485   if (!Subtarget->isTargetWatchABI()) {
486     if (Subtarget->isAAPCS_ABI()) {
487       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
488       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
489       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
490     } else {
491       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
492       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
493       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
494     }
495   }
496 
497   // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
498   // a __gnu_ prefix (which is the default).
499   if (Subtarget->isTargetAEABI()) {
500     static const struct {
501       const RTLIB::Libcall Op;
502       const char * const Name;
503       const CallingConv::ID CC;
504     } LibraryCalls[] = {
505       { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS },
506       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS },
507       { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS },
508     };
509 
510     for (const auto &LC : LibraryCalls) {
511       setLibcallName(LC.Op, LC.Name);
512       setLibcallCallingConv(LC.Op, LC.CC);
513     }
514   }
515 
516   if (Subtarget->isThumb1Only())
517     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
518   else
519     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
520 
521   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
522       !Subtarget->isThumb1Only()) {
523     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
524     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
525   }
526 
527   for (MVT VT : MVT::vector_valuetypes()) {
528     for (MVT InnerVT : MVT::vector_valuetypes()) {
529       setTruncStoreAction(VT, InnerVT, Expand);
530       setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
531       setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
532       setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
533     }
534 
535     setOperationAction(ISD::MULHS, VT, Expand);
536     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
537     setOperationAction(ISD::MULHU, VT, Expand);
538     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
539 
540     setOperationAction(ISD::BSWAP, VT, Expand);
541   }
542 
543   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
544   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
545 
546   setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
547   setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
548 
549   if (Subtarget->hasNEON()) {
550     addDRTypeForNEON(MVT::v2f32);
551     addDRTypeForNEON(MVT::v8i8);
552     addDRTypeForNEON(MVT::v4i16);
553     addDRTypeForNEON(MVT::v2i32);
554     addDRTypeForNEON(MVT::v1i64);
555 
556     addQRTypeForNEON(MVT::v4f32);
557     addQRTypeForNEON(MVT::v2f64);
558     addQRTypeForNEON(MVT::v16i8);
559     addQRTypeForNEON(MVT::v8i16);
560     addQRTypeForNEON(MVT::v4i32);
561     addQRTypeForNEON(MVT::v2i64);
562 
563     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
564     // neither Neon nor VFP support any arithmetic operations on it.
565     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
566     // supported for v4f32.
567     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
568     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
569     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
570     // FIXME: Code duplication: FDIV and FREM are expanded always, see
571     // ARMTargetLowering::addTypeForNEON method for details.
572     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
573     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
574     // FIXME: Create unittest.
575     // In another words, find a way when "copysign" appears in DAG with vector
576     // operands.
577     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
578     // FIXME: Code duplication: SETCC has custom operation action, see
579     // ARMTargetLowering::addTypeForNEON method for details.
580     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
581     // FIXME: Create unittest for FNEG and for FABS.
582     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
583     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
584     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
585     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
586     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
587     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
588     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
589     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
590     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
591     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
592     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
593     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
594     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
595     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
596     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
597     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
598     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
599     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
600 
601     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
602     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
603     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
604     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
605     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
606     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
607     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
608     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
609     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
610     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
611     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
612     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
613     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
614     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
615 
616     // Mark v2f32 intrinsics.
617     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
618     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
619     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
620     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
621     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
622     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
623     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
624     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
625     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
626     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
627     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
628     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
629     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
630     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
631 
632     // Neon does not support some operations on v1i64 and v2i64 types.
633     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
634     // Custom handling for some quad-vector types to detect VMULL.
635     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
636     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
637     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
638     // Custom handling for some vector types to avoid expensive expansions
639     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
640     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
641     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
642     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
643     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
644     // a destination type that is wider than the source, and nor does
645     // it have a FP_TO_[SU]INT instruction with a narrower destination than
646     // source.
647     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
648     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
649     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
650     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
651 
652     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
653     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
654 
655     // NEON does not have single instruction CTPOP for vectors with element
656     // types wider than 8-bits.  However, custom lowering can leverage the
657     // v8i8/v16i8 vcnt instruction.
658     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
659     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
660     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
661     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
662     setOperationAction(ISD::CTPOP,      MVT::v1i64, Expand);
663     setOperationAction(ISD::CTPOP,      MVT::v2i64, Expand);
664 
665     setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
666     setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);
667 
668     // NEON does not have single instruction CTTZ for vectors.
669     setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
670     setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
671     setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
672     setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
673 
674     setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
675     setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
676     setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
677     setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);
678 
679     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
680     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
681     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
682     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);
683 
684     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
685     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
686     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
687     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
688 
689     // NEON only has FMA instructions as of VFP4.
690     if (!Subtarget->hasVFP4()) {
691       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
692       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
693     }
694 
695     setTargetDAGCombine(ISD::INTRINSIC_VOID);
696     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
697     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
698     setTargetDAGCombine(ISD::SHL);
699     setTargetDAGCombine(ISD::SRL);
700     setTargetDAGCombine(ISD::SRA);
701     setTargetDAGCombine(ISD::SIGN_EXTEND);
702     setTargetDAGCombine(ISD::ZERO_EXTEND);
703     setTargetDAGCombine(ISD::ANY_EXTEND);
704     setTargetDAGCombine(ISD::BUILD_VECTOR);
705     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
706     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
707     setTargetDAGCombine(ISD::STORE);
708     setTargetDAGCombine(ISD::FP_TO_SINT);
709     setTargetDAGCombine(ISD::FP_TO_UINT);
710     setTargetDAGCombine(ISD::FDIV);
711     setTargetDAGCombine(ISD::LOAD);
712 
713     // It is legal to extload from v4i8 to v4i16 or v4i32.
714     for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
715                    MVT::v2i32}) {
716       for (MVT VT : MVT::integer_vector_valuetypes()) {
717         setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
718         setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
719         setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
720       }
721     }
722   }
723 
724   if (Subtarget->isFPOnlySP()) {
725     // When targeting a floating-point unit with only single-precision
726     // operations, f64 is legal for the few double-precision instructions which
727     // are present However, no double-precision operations other than moves,
728     // loads and stores are provided by the hardware.
729     setOperationAction(ISD::FADD,       MVT::f64, Expand);
730     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
731     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
732     setOperationAction(ISD::FMA,        MVT::f64, Expand);
733     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
734     setOperationAction(ISD::FREM,       MVT::f64, Expand);
735     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
736     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
737     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
738     setOperationAction(ISD::FABS,       MVT::f64, Expand);
739     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
740     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
741     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
742     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
743     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
744     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
745     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
746     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
747     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
748     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
749     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
750     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
751     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
752     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
753     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
754     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
755     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
756     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
757     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
758     setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
759     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
760     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
761   }
762 
763   computeRegisterProperties(Subtarget->getRegisterInfo());
764 
765   // ARM does not have floating-point extending loads.
766   for (MVT VT : MVT::fp_valuetypes()) {
767     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
768     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
769   }
770 
771   // ... or truncating stores
772   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
773   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
774   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
775 
776   // ARM does not have i1 sign extending load.
777   for (MVT VT : MVT::integer_valuetypes())
778     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
779 
780   // ARM supports all 4 flavors of integer indexed load / store.
781   if (!Subtarget->isThumb1Only()) {
782     for (unsigned im = (unsigned)ISD::PRE_INC;
783          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
784       setIndexedLoadAction(im,  MVT::i1,  Legal);
785       setIndexedLoadAction(im,  MVT::i8,  Legal);
786       setIndexedLoadAction(im,  MVT::i16, Legal);
787       setIndexedLoadAction(im,  MVT::i32, Legal);
788       setIndexedStoreAction(im, MVT::i1,  Legal);
789       setIndexedStoreAction(im, MVT::i8,  Legal);
790       setIndexedStoreAction(im, MVT::i16, Legal);
791       setIndexedStoreAction(im, MVT::i32, Legal);
792     }
793   } else {
794     // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
795     setIndexedLoadAction(ISD::POST_INC, MVT::i32,  Legal);
796     setIndexedStoreAction(ISD::POST_INC, MVT::i32,  Legal);
797   }
798 
799   setOperationAction(ISD::SADDO, MVT::i32, Custom);
800   setOperationAction(ISD::UADDO, MVT::i32, Custom);
801   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
802   setOperationAction(ISD::USUBO, MVT::i32, Custom);
803 
804   // i64 operation support.
805   setOperationAction(ISD::MUL,     MVT::i64, Expand);
806   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
807   if (Subtarget->isThumb1Only()) {
808     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
809     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
810   }
811   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
812       || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
813     setOperationAction(ISD::MULHS, MVT::i32, Expand);
814 
815   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
816   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
817   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
818   setOperationAction(ISD::SRL,       MVT::i64, Custom);
819   setOperationAction(ISD::SRA,       MVT::i64, Custom);
820   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
821 
822   setOperationAction(ISD::ADDC,      MVT::i32, Custom);
823   setOperationAction(ISD::ADDE,      MVT::i32, Custom);
824   setOperationAction(ISD::SUBC,      MVT::i32, Custom);
825   setOperationAction(ISD::SUBE,      MVT::i32, Custom);
826 
827   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
828     setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
829 
830   // ARM does not have ROTL.
831   setOperationAction(ISD::ROTL, MVT::i32, Expand);
832   for (MVT VT : MVT::vector_valuetypes()) {
833     setOperationAction(ISD::ROTL, VT, Expand);
834     setOperationAction(ISD::ROTR, VT, Expand);
835   }
836   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
837   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
838   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
839     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
840 
841   // @llvm.readcyclecounter requires the Performance Monitors extension.
842   // Default to the 0 expansion on unsupported platforms.
843   // FIXME: Technically there are older ARM CPUs that have
844   // implementation-specific ways of obtaining this information.
845   if (Subtarget->hasPerfMon())
846     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
847 
848   // Only ARMv6 has BSWAP.
849   if (!Subtarget->hasV6Ops())
850     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
851 
852   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
853                                         : Subtarget->hasDivideInARMMode();
854   if (!hasDivide) {
855     // These are expanded into libcalls if the cpu doesn't have HW divider.
856     setOperationAction(ISD::SDIV,  MVT::i32, LibCall);
857     setOperationAction(ISD::UDIV,  MVT::i32, LibCall);
858   }
859 
860   if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
861     setOperationAction(ISD::SDIV, MVT::i32, Custom);
862     setOperationAction(ISD::UDIV, MVT::i32, Custom);
863 
864     setOperationAction(ISD::SDIV, MVT::i64, Custom);
865     setOperationAction(ISD::UDIV, MVT::i64, Custom);
866   }
867 
868   setOperationAction(ISD::SREM,  MVT::i32, Expand);
869   setOperationAction(ISD::UREM,  MVT::i32, Expand);
870 
871   // Register based DivRem for AEABI (RTABI 4.2)
872   if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
873       Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
874       Subtarget->isTargetWindows()) {
875     setOperationAction(ISD::SREM, MVT::i64, Custom);
876     setOperationAction(ISD::UREM, MVT::i64, Custom);
877     HasStandaloneRem = false;
878 
879     if (Subtarget->isTargetWindows()) {
880       const struct {
881         const RTLIB::Libcall Op;
882         const char * const Name;
883         const CallingConv::ID CC;
884       } LibraryCalls[] = {
885         { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS },
886         { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS },
887         { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS },
888         { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS },
889 
890         { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS },
891         { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS },
892         { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS },
893         { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS },
894       };
895 
896       for (const auto &LC : LibraryCalls) {
897         setLibcallName(LC.Op, LC.Name);
898         setLibcallCallingConv(LC.Op, LC.CC);
899       }
900     } else {
901       const struct {
902         const RTLIB::Libcall Op;
903         const char * const Name;
904         const CallingConv::ID CC;
905       } LibraryCalls[] = {
906         { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
907         { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
908         { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
909         { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS },
910 
911         { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
912         { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
913         { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
914         { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS },
915       };
916 
917       for (const auto &LC : LibraryCalls) {
918         setLibcallName(LC.Op, LC.Name);
919         setLibcallCallingConv(LC.Op, LC.CC);
920       }
921     }
922 
923     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
924     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
925     setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
926     setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
927   } else {
928     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
929     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
930   }
931 
932   if (Subtarget->isTargetWindows() && Subtarget->getTargetTriple().isOSMSVCRT())
933     for (auto &VT : {MVT::f32, MVT::f64})
934       setOperationAction(ISD::FPOWI, VT, Custom);
935 
936   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
937   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
938   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
939   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
940 
941   setOperationAction(ISD::TRAP, MVT::Other, Legal);
942 
943   // Use the default implementation.
944   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
945   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
946   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
947   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
948   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
949   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
950 
951   if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
952     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
953   else
954     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
955 
956   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
957   // the default expansion.
958   InsertFencesForAtomic = false;
959   if (Subtarget->hasAnyDataBarrier() &&
960       (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
961     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
962     // to ldrex/strex loops already.
963     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
964     if (!Subtarget->isThumb() || !Subtarget->isMClass())
965       setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Custom);
966 
967     // On v8, we have particularly efficient implementations of atomic fences
968     // if they can be combined with nearby atomic loads and stores.
969     if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) {
970       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
971       InsertFencesForAtomic = true;
972     }
973   } else {
974     // If there's anything we can use as a barrier, go through custom lowering
975     // for ATOMIC_FENCE.
976     // If target has DMB in thumb, Fences can be inserted.
977     if (Subtarget->hasDataBarrier())
978       InsertFencesForAtomic = true;
979 
980     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
981                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
982 
983     // Set them all for expansion, which will force libcalls.
984     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
985     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
986     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
987     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
988     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
989     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
990     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
991     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
992     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
993     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
994     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
995     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
996     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
997     // Unordered/Monotonic case.
998     if (!InsertFencesForAtomic) {
999       setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1000       setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1001     }
1002   }
1003 
1004   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
1005 
1006   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1007   if (!Subtarget->hasV6Ops()) {
1008     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1009     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
1010   }
1011   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1012 
1013   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
1014       !Subtarget->isThumb1Only()) {
1015     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1016     // iff target supports vfp2.
1017     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1018     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
1019   }
1020 
1021   // We want to custom lower some of our intrinsics.
1022   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1023   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1024   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1025   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
1026   if (Subtarget->useSjLjEH())
1027     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
1028 
1029   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
1030   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
1031   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
1032   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
1033   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
1034   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
1035   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1036   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1037   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1038 
1039   // Thumb-1 cannot currently select ARMISD::SUBE.
1040   if (!Subtarget->isThumb1Only())
1041     setOperationAction(ISD::SETCCE, MVT::i32, Custom);
1042 
1043   setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
1044   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
1045   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
1046   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
1047   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
1048 
1049   // We don't support sin/cos/fmod/copysign/pow
1050   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
1051   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
1052   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
1053   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
1054   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
1055   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
1056   setOperationAction(ISD::FREM,      MVT::f64, Expand);
1057   setOperationAction(ISD::FREM,      MVT::f32, Expand);
1058   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
1059       !Subtarget->isThumb1Only()) {
1060     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
1061     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
1062   }
1063   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
1064   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
1065 
1066   if (!Subtarget->hasVFP4()) {
1067     setOperationAction(ISD::FMA, MVT::f64, Expand);
1068     setOperationAction(ISD::FMA, MVT::f32, Expand);
1069   }
1070 
1071   // Various VFP goodness
1072   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
1073     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1074     if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) {
1075       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1076       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1077     }
1078 
1079     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1080     if (!Subtarget->hasFP16()) {
1081       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1082       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1083     }
1084   }
1085 
1086   // Combine sin / cos into one node or libcall if possible.
1087   if (Subtarget->hasSinCos()) {
1088     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1089     setLibcallName(RTLIB::SINCOS_F64, "sincos");
1090     if (Subtarget->isTargetWatchABI()) {
1091       setLibcallCallingConv(RTLIB::SINCOS_F32, CallingConv::ARM_AAPCS_VFP);
1092       setLibcallCallingConv(RTLIB::SINCOS_F64, CallingConv::ARM_AAPCS_VFP);
1093     }
1094     if (Subtarget->isTargetIOS() || Subtarget->isTargetWatchOS()) {
1095       // For iOS, we don't want to the normal expansion of a libcall to
1096       // sincos. We want to issue a libcall to __sincos_stret.
1097       setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1098       setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1099     }
1100   }
1101 
1102   // FP-ARMv8 implements a lot of rounding-like FP operations.
1103   if (Subtarget->hasFPARMv8()) {
1104     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1105     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1106     setOperationAction(ISD::FROUND, MVT::f32, Legal);
1107     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1108     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1109     setOperationAction(ISD::FRINT, MVT::f32, Legal);
1110     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1111     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1112     setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
1113     setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
1114     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1115     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1116 
1117     if (!Subtarget->isFPOnlySP()) {
1118       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1119       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1120       setOperationAction(ISD::FROUND, MVT::f64, Legal);
1121       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1122       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1123       setOperationAction(ISD::FRINT, MVT::f64, Legal);
1124       setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1125       setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1126     }
1127   }
1128 
1129   if (Subtarget->hasNEON()) {
1130     // vmin and vmax aren't available in a scalar form, so we use
1131     // a NEON instruction with an undef lane instead.
1132     setOperationAction(ISD::FMINNAN, MVT::f32, Legal);
1133     setOperationAction(ISD::FMAXNAN, MVT::f32, Legal);
1134     setOperationAction(ISD::FMINNAN, MVT::v2f32, Legal);
1135     setOperationAction(ISD::FMAXNAN, MVT::v2f32, Legal);
1136     setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal);
1137     setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal);
1138   }
1139 
1140   // We have target-specific dag combine patterns for the following nodes:
1141   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
1142   setTargetDAGCombine(ISD::ADD);
1143   setTargetDAGCombine(ISD::SUB);
1144   setTargetDAGCombine(ISD::MUL);
1145   setTargetDAGCombine(ISD::AND);
1146   setTargetDAGCombine(ISD::OR);
1147   setTargetDAGCombine(ISD::XOR);
1148 
1149   if (Subtarget->hasV6Ops())
1150     setTargetDAGCombine(ISD::SRL);
1151 
1152   setStackPointerRegisterToSaveRestore(ARM::SP);
1153 
1154   if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
1155       !Subtarget->hasVFP2())
1156     setSchedulingPreference(Sched::RegPressure);
1157   else
1158     setSchedulingPreference(Sched::Hybrid);
1159 
1160   //// temporary - rewrite interface to use type
1161   MaxStoresPerMemset = 8;
1162   MaxStoresPerMemsetOptSize = 4;
1163   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
1164   MaxStoresPerMemcpyOptSize = 2;
1165   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
1166   MaxStoresPerMemmoveOptSize = 2;
1167 
1168   // On ARM arguments smaller than 4 bytes are extended, so all arguments
1169   // are at least 4 bytes aligned.
1170   setMinStackArgumentAlignment(4);
1171 
1172   // Prefer likely predicted branches to selects on out-of-order cores.
1173   PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
1174 
1175   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
1176 }
1177 
1178 bool ARMTargetLowering::useSoftFloat() const {
1179   return Subtarget->useSoftFloat();
1180 }
1181 
1182 // FIXME: It might make sense to define the representative register class as the
1183 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1184 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1185 // SPR's representative would be DPR_VFP2. This should work well if register
1186 // pressure tracking were modified such that a register use would increment the
1187 // pressure of the register class's representative and all of it's super
1188 // classes' representatives transitively. We have not implemented this because
1189 // of the difficulty prior to coalescing of modeling operand register classes
1190 // due to the common occurrence of cross class copies and subregister insertions
1191 // and extractions.
1192 std::pair<const TargetRegisterClass *, uint8_t>
1193 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1194                                            MVT VT) const {
1195   const TargetRegisterClass *RRC = nullptr;
1196   uint8_t Cost = 1;
1197   switch (VT.SimpleTy) {
1198   default:
1199     return TargetLowering::findRepresentativeClass(TRI, VT);
1200   // Use DPR as representative register class for all floating point
1201   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1202   // the cost is 1 for both f32 and f64.
1203   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
1204   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
1205     RRC = &ARM::DPRRegClass;
1206     // When NEON is used for SP, only half of the register file is available
1207     // because operations that define both SP and DP results will be constrained
1208     // to the VFP2 class (D0-D15). We currently model this constraint prior to
1209     // coalescing by double-counting the SP regs. See the FIXME above.
1210     if (Subtarget->useNEONForSinglePrecisionFP())
1211       Cost = 2;
1212     break;
1213   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1214   case MVT::v4f32: case MVT::v2f64:
1215     RRC = &ARM::DPRRegClass;
1216     Cost = 2;
1217     break;
1218   case MVT::v4i64:
1219     RRC = &ARM::DPRRegClass;
1220     Cost = 4;
1221     break;
1222   case MVT::v8i64:
1223     RRC = &ARM::DPRRegClass;
1224     Cost = 8;
1225     break;
1226   }
1227   return std::make_pair(RRC, Cost);
1228 }
1229 
1230 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
1231   switch ((ARMISD::NodeType)Opcode) {
1232   case ARMISD::FIRST_NUMBER:  break;
1233   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
1234   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
1235   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
1236   case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
1237   case ARMISD::CALL:          return "ARMISD::CALL";
1238   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
1239   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
1240   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
1241   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
1242   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
1243   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
1244   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
1245   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
1246   case ARMISD::CMP:           return "ARMISD::CMP";
1247   case ARMISD::CMN:           return "ARMISD::CMN";
1248   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
1249   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
1250   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
1251   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
1252   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
1253 
1254   case ARMISD::CMOV:          return "ARMISD::CMOV";
1255 
1256   case ARMISD::SSAT:          return "ARMISD::SSAT";
1257 
1258   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
1259   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
1260   case ARMISD::RRX:           return "ARMISD::RRX";
1261 
1262   case ARMISD::ADDC:          return "ARMISD::ADDC";
1263   case ARMISD::ADDE:          return "ARMISD::ADDE";
1264   case ARMISD::SUBC:          return "ARMISD::SUBC";
1265   case ARMISD::SUBE:          return "ARMISD::SUBE";
1266 
1267   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
1268   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
1269 
1270   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
1271   case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
1272   case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
1273 
1274   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
1275 
1276   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
1277 
1278   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
1279 
1280   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
1281 
1282   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
1283 
1284   case ARMISD::WIN__CHKSTK:   return "ARMISD::WIN__CHKSTK";
1285   case ARMISD::WIN__DBZCHK:   return "ARMISD::WIN__DBZCHK";
1286 
1287   case ARMISD::VCEQ:          return "ARMISD::VCEQ";
1288   case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
1289   case ARMISD::VCGE:          return "ARMISD::VCGE";
1290   case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
1291   case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
1292   case ARMISD::VCGEU:         return "ARMISD::VCGEU";
1293   case ARMISD::VCGT:          return "ARMISD::VCGT";
1294   case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
1295   case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
1296   case ARMISD::VCGTU:         return "ARMISD::VCGTU";
1297   case ARMISD::VTST:          return "ARMISD::VTST";
1298 
1299   case ARMISD::VSHL:          return "ARMISD::VSHL";
1300   case ARMISD::VSHRs:         return "ARMISD::VSHRs";
1301   case ARMISD::VSHRu:         return "ARMISD::VSHRu";
1302   case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
1303   case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
1304   case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
1305   case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
1306   case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
1307   case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
1308   case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
1309   case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
1310   case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
1311   case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
1312   case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
1313   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
1314   case ARMISD::VSLI:          return "ARMISD::VSLI";
1315   case ARMISD::VSRI:          return "ARMISD::VSRI";
1316   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
1317   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
1318   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
1319   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
1320   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
1321   case ARMISD::VDUP:          return "ARMISD::VDUP";
1322   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
1323   case ARMISD::VEXT:          return "ARMISD::VEXT";
1324   case ARMISD::VREV64:        return "ARMISD::VREV64";
1325   case ARMISD::VREV32:        return "ARMISD::VREV32";
1326   case ARMISD::VREV16:        return "ARMISD::VREV16";
1327   case ARMISD::VZIP:          return "ARMISD::VZIP";
1328   case ARMISD::VUZP:          return "ARMISD::VUZP";
1329   case ARMISD::VTRN:          return "ARMISD::VTRN";
1330   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
1331   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
1332   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
1333   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
1334   case ARMISD::UMAAL:         return "ARMISD::UMAAL";
1335   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
1336   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
1337   case ARMISD::SMLALBB:       return "ARMISD::SMLALBB";
1338   case ARMISD::SMLALBT:       return "ARMISD::SMLALBT";
1339   case ARMISD::SMLALTB:       return "ARMISD::SMLALTB";
1340   case ARMISD::SMLALTT:       return "ARMISD::SMLALTT";
1341   case ARMISD::SMULWB:        return "ARMISD::SMULWB";
1342   case ARMISD::SMULWT:        return "ARMISD::SMULWT";
1343   case ARMISD::SMLALD:        return "ARMISD::SMLALD";
1344   case ARMISD::SMLALDX:       return "ARMISD::SMLALDX";
1345   case ARMISD::SMLSLD:        return "ARMISD::SMLSLD";
1346   case ARMISD::SMLSLDX:       return "ARMISD::SMLSLDX";
1347   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
1348   case ARMISD::BFI:           return "ARMISD::BFI";
1349   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
1350   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
1351   case ARMISD::VBSL:          return "ARMISD::VBSL";
1352   case ARMISD::MEMCPY:        return "ARMISD::MEMCPY";
1353   case ARMISD::VLD1DUP:       return "ARMISD::VLD1DUP";
1354   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
1355   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
1356   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
1357   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
1358   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
1359   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
1360   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
1361   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
1362   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
1363   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
1364   case ARMISD::VLD1DUP_UPD:   return "ARMISD::VLD1DUP_UPD";
1365   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
1366   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
1367   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
1368   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
1369   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
1370   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
1371   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
1372   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
1373   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
1374   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
1375   }
1376   return nullptr;
1377 }
1378 
1379 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1380                                           EVT VT) const {
1381   if (!VT.isVector())
1382     return getPointerTy(DL);
1383   return VT.changeVectorElementTypeToInteger();
1384 }
1385 
1386 /// getRegClassFor - Return the register class that should be used for the
1387 /// specified value type.
1388 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const {
1389   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1390   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1391   // load / store 4 to 8 consecutive D registers.
1392   if (Subtarget->hasNEON()) {
1393     if (VT == MVT::v4i64)
1394       return &ARM::QQPRRegClass;
1395     if (VT == MVT::v8i64)
1396       return &ARM::QQQQPRRegClass;
1397   }
1398   return TargetLowering::getRegClassFor(VT);
1399 }
1400 
1401 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1402 // source/dest is aligned and the copy size is large enough. We therefore want
1403 // to align such objects passed to memory intrinsics.
1404 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
1405                                                unsigned &PrefAlign) const {
1406   if (!isa<MemIntrinsic>(CI))
1407     return false;
1408   MinSize = 8;
1409   // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1410   // cycle faster than 4-byte aligned LDM.
1411   PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
1412   return true;
1413 }
1414 
1415 // Create a fast isel object.
1416 FastISel *
1417 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
1418                                   const TargetLibraryInfo *libInfo) const {
1419   return ARM::createFastISel(funcInfo, libInfo);
1420 }
1421 
1422 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
1423   unsigned NumVals = N->getNumValues();
1424   if (!NumVals)
1425     return Sched::RegPressure;
1426 
1427   for (unsigned i = 0; i != NumVals; ++i) {
1428     EVT VT = N->getValueType(i);
1429     if (VT == MVT::Glue || VT == MVT::Other)
1430       continue;
1431     if (VT.isFloatingPoint() || VT.isVector())
1432       return Sched::ILP;
1433   }
1434 
1435   if (!N->isMachineOpcode())
1436     return Sched::RegPressure;
1437 
1438   // Load are scheduled for latency even if there instruction itinerary
1439   // is not available.
1440   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1441   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1442 
1443   if (MCID.getNumDefs() == 0)
1444     return Sched::RegPressure;
1445   if (!Itins->isEmpty() &&
1446       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1447     return Sched::ILP;
1448 
1449   return Sched::RegPressure;
1450 }
1451 
1452 //===----------------------------------------------------------------------===//
1453 // Lowering Code
1454 //===----------------------------------------------------------------------===//
1455 
1456 static bool isSRL16(const SDValue &Op) {
1457   if (Op.getOpcode() != ISD::SRL)
1458     return false;
1459   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1460     return Const->getZExtValue() == 16;
1461   return false;
1462 }
1463 
1464 static bool isSRA16(const SDValue &Op) {
1465   if (Op.getOpcode() != ISD::SRA)
1466     return false;
1467   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1468     return Const->getZExtValue() == 16;
1469   return false;
1470 }
1471 
1472 static bool isSHL16(const SDValue &Op) {
1473   if (Op.getOpcode() != ISD::SHL)
1474     return false;
1475   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1476     return Const->getZExtValue() == 16;
1477   return false;
1478 }
1479 
1480 // Check for a signed 16-bit value. We special case SRA because it makes it
1481 // more simple when also looking for SRAs that aren't sign extending a
1482 // smaller value. Without the check, we'd need to take extra care with
1483 // checking order for some operations.
1484 static bool isS16(const SDValue &Op, SelectionDAG &DAG) {
1485   if (isSRA16(Op))
1486     return isSHL16(Op.getOperand(0));
1487   return DAG.ComputeNumSignBits(Op) == 17;
1488 }
1489 
1490 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
1491 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1492   switch (CC) {
1493   default: llvm_unreachable("Unknown condition code!");
1494   case ISD::SETNE:  return ARMCC::NE;
1495   case ISD::SETEQ:  return ARMCC::EQ;
1496   case ISD::SETGT:  return ARMCC::GT;
1497   case ISD::SETGE:  return ARMCC::GE;
1498   case ISD::SETLT:  return ARMCC::LT;
1499   case ISD::SETLE:  return ARMCC::LE;
1500   case ISD::SETUGT: return ARMCC::HI;
1501   case ISD::SETUGE: return ARMCC::HS;
1502   case ISD::SETULT: return ARMCC::LO;
1503   case ISD::SETULE: return ARMCC::LS;
1504   }
1505 }
1506 
1507 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
1508 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1509                         ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN) {
1510   CondCode2 = ARMCC::AL;
1511   InvalidOnQNaN = true;
1512   switch (CC) {
1513   default: llvm_unreachable("Unknown FP condition!");
1514   case ISD::SETEQ:
1515   case ISD::SETOEQ:
1516     CondCode = ARMCC::EQ;
1517     InvalidOnQNaN = false;
1518     break;
1519   case ISD::SETGT:
1520   case ISD::SETOGT: CondCode = ARMCC::GT; break;
1521   case ISD::SETGE:
1522   case ISD::SETOGE: CondCode = ARMCC::GE; break;
1523   case ISD::SETOLT: CondCode = ARMCC::MI; break;
1524   case ISD::SETOLE: CondCode = ARMCC::LS; break;
1525   case ISD::SETONE:
1526     CondCode = ARMCC::MI;
1527     CondCode2 = ARMCC::GT;
1528     InvalidOnQNaN = false;
1529     break;
1530   case ISD::SETO:   CondCode = ARMCC::VC; break;
1531   case ISD::SETUO:  CondCode = ARMCC::VS; break;
1532   case ISD::SETUEQ:
1533     CondCode = ARMCC::EQ;
1534     CondCode2 = ARMCC::VS;
1535     InvalidOnQNaN = false;
1536     break;
1537   case ISD::SETUGT: CondCode = ARMCC::HI; break;
1538   case ISD::SETUGE: CondCode = ARMCC::PL; break;
1539   case ISD::SETLT:
1540   case ISD::SETULT: CondCode = ARMCC::LT; break;
1541   case ISD::SETLE:
1542   case ISD::SETULE: CondCode = ARMCC::LE; break;
1543   case ISD::SETNE:
1544   case ISD::SETUNE:
1545     CondCode = ARMCC::NE;
1546     InvalidOnQNaN = false;
1547     break;
1548   }
1549 }
1550 
1551 //===----------------------------------------------------------------------===//
1552 //                      Calling Convention Implementation
1553 //===----------------------------------------------------------------------===//
1554 
1555 #include "ARMGenCallingConv.inc"
1556 
1557 /// getEffectiveCallingConv - Get the effective calling convention, taking into
1558 /// account presence of floating point hardware and calling convention
1559 /// limitations, such as support for variadic functions.
1560 CallingConv::ID
1561 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
1562                                            bool isVarArg) const {
1563   switch (CC) {
1564   default:
1565     llvm_unreachable("Unsupported calling convention");
1566   case CallingConv::ARM_AAPCS:
1567   case CallingConv::ARM_APCS:
1568   case CallingConv::GHC:
1569     return CC;
1570   case CallingConv::PreserveMost:
1571     return CallingConv::PreserveMost;
1572   case CallingConv::ARM_AAPCS_VFP:
1573   case CallingConv::Swift:
1574     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
1575   case CallingConv::C:
1576     if (!Subtarget->isAAPCS_ABI())
1577       return CallingConv::ARM_APCS;
1578     else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
1579              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
1580              !isVarArg)
1581       return CallingConv::ARM_AAPCS_VFP;
1582     else
1583       return CallingConv::ARM_AAPCS;
1584   case CallingConv::Fast:
1585   case CallingConv::CXX_FAST_TLS:
1586     if (!Subtarget->isAAPCS_ABI()) {
1587       if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
1588         return CallingConv::Fast;
1589       return CallingConv::ARM_APCS;
1590     } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
1591       return CallingConv::ARM_AAPCS_VFP;
1592     else
1593       return CallingConv::ARM_AAPCS;
1594   }
1595 }
1596 
1597 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1598                                                  bool isVarArg) const {
1599   return CCAssignFnForNode(CC, false, isVarArg);
1600 }
1601 
1602 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1603                                                    bool isVarArg) const {
1604   return CCAssignFnForNode(CC, true, isVarArg);
1605 }
1606 
1607 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
1608 /// CallingConvention.
1609 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
1610                                                  bool Return,
1611                                                  bool isVarArg) const {
1612   switch (getEffectiveCallingConv(CC, isVarArg)) {
1613   default:
1614     llvm_unreachable("Unsupported calling convention");
1615   case CallingConv::ARM_APCS:
1616     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1617   case CallingConv::ARM_AAPCS:
1618     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1619   case CallingConv::ARM_AAPCS_VFP:
1620     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1621   case CallingConv::Fast:
1622     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1623   case CallingConv::GHC:
1624     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1625   case CallingConv::PreserveMost:
1626     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1627   }
1628 }
1629 
1630 /// LowerCallResult - Lower the result values of a call into the
1631 /// appropriate copies out of appropriate physical registers.
1632 SDValue ARMTargetLowering::LowerCallResult(
1633     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
1634     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1635     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
1636     SDValue ThisVal) const {
1637 
1638   // Assign locations to each value returned by this call.
1639   SmallVector<CCValAssign, 16> RVLocs;
1640   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1641                  *DAG.getContext());
1642   CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));
1643 
1644   // Copy all of the result registers out of their specified physreg.
1645   for (unsigned i = 0; i != RVLocs.size(); ++i) {
1646     CCValAssign VA = RVLocs[i];
1647 
1648     // Pass 'this' value directly from the argument to return value, to avoid
1649     // reg unit interference
1650     if (i == 0 && isThisReturn) {
1651       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
1652              "unexpected return calling convention register assignment");
1653       InVals.push_back(ThisVal);
1654       continue;
1655     }
1656 
1657     SDValue Val;
1658     if (VA.needsCustom()) {
1659       // Handle f64 or half of a v2f64.
1660       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1661                                       InFlag);
1662       Chain = Lo.getValue(1);
1663       InFlag = Lo.getValue(2);
1664       VA = RVLocs[++i]; // skip ahead to next loc
1665       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1666                                       InFlag);
1667       Chain = Hi.getValue(1);
1668       InFlag = Hi.getValue(2);
1669       if (!Subtarget->isLittle())
1670         std::swap (Lo, Hi);
1671       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1672 
1673       if (VA.getLocVT() == MVT::v2f64) {
1674         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1675         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1676                           DAG.getConstant(0, dl, MVT::i32));
1677 
1678         VA = RVLocs[++i]; // skip ahead to next loc
1679         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1680         Chain = Lo.getValue(1);
1681         InFlag = Lo.getValue(2);
1682         VA = RVLocs[++i]; // skip ahead to next loc
1683         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1684         Chain = Hi.getValue(1);
1685         InFlag = Hi.getValue(2);
1686         if (!Subtarget->isLittle())
1687           std::swap (Lo, Hi);
1688         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1689         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1690                           DAG.getConstant(1, dl, MVT::i32));
1691       }
1692     } else {
1693       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1694                                InFlag);
1695       Chain = Val.getValue(1);
1696       InFlag = Val.getValue(2);
1697     }
1698 
1699     switch (VA.getLocInfo()) {
1700     default: llvm_unreachable("Unknown loc info!");
1701     case CCValAssign::Full: break;
1702     case CCValAssign::BCvt:
1703       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1704       break;
1705     }
1706 
1707     InVals.push_back(Val);
1708   }
1709 
1710   return Chain;
1711 }
1712 
1713 /// LowerMemOpCallTo - Store the argument to the stack.
1714 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
1715                                             SDValue Arg, const SDLoc &dl,
1716                                             SelectionDAG &DAG,
1717                                             const CCValAssign &VA,
1718                                             ISD::ArgFlagsTy Flags) const {
1719   unsigned LocMemOffset = VA.getLocMemOffset();
1720   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
1721   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
1722                        StackPtr, PtrOff);
1723   return DAG.getStore(
1724       Chain, dl, Arg, PtrOff,
1725       MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
1726 }
1727 
1728 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
1729                                          SDValue Chain, SDValue &Arg,
1730                                          RegsToPassVector &RegsToPass,
1731                                          CCValAssign &VA, CCValAssign &NextVA,
1732                                          SDValue &StackPtr,
1733                                          SmallVectorImpl<SDValue> &MemOpChains,
1734                                          ISD::ArgFlagsTy Flags) const {
1735 
1736   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1737                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
1738   unsigned id = Subtarget->isLittle() ? 0 : 1;
1739   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
1740 
1741   if (NextVA.isRegLoc())
1742     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
1743   else {
1744     assert(NextVA.isMemLoc());
1745     if (!StackPtr.getNode())
1746       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
1747                                     getPointerTy(DAG.getDataLayout()));
1748 
1749     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
1750                                            dl, DAG, NextVA,
1751                                            Flags));
1752   }
1753 }
1754 
1755 /// LowerCall - Lowering a call into a callseq_start <-
1756 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
1757 /// nodes.
1758 SDValue
1759 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1760                              SmallVectorImpl<SDValue> &InVals) const {
1761   SelectionDAG &DAG                     = CLI.DAG;
1762   SDLoc &dl                             = CLI.DL;
1763   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1764   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
1765   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
1766   SDValue Chain                         = CLI.Chain;
1767   SDValue Callee                        = CLI.Callee;
1768   bool &isTailCall                      = CLI.IsTailCall;
1769   CallingConv::ID CallConv              = CLI.CallConv;
1770   bool doesNotRet                       = CLI.DoesNotReturn;
1771   bool isVarArg                         = CLI.IsVarArg;
1772 
1773   MachineFunction &MF = DAG.getMachineFunction();
1774   bool isStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
1775   bool isThisReturn   = false;
1776   bool isSibCall      = false;
1777   auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
1778 
1779   // Disable tail calls if they're not supported.
1780   if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true")
1781     isTailCall = false;
1782 
1783   if (isTailCall) {
1784     // Check if it's really possible to do a tail call.
1785     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1786                     isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
1787                                                    Outs, OutVals, Ins, DAG);
1788     if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
1789       report_fatal_error("failed to perform tail call elimination on a call "
1790                          "site marked musttail");
1791     // We don't support GuaranteedTailCallOpt for ARM, only automatically
1792     // detected sibcalls.
1793     if (isTailCall) {
1794       ++NumTailCalls;
1795       isSibCall = true;
1796     }
1797   }
1798 
1799   // Analyze operands of the call, assigning locations to each operand.
1800   SmallVector<CCValAssign, 16> ArgLocs;
1801   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1802                  *DAG.getContext());
1803   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));
1804 
1805   // Get a count of how many bytes are to be pushed on the stack.
1806   unsigned NumBytes = CCInfo.getNextStackOffset();
1807 
1808   // For tail calls, memory operands are available in our caller's stack.
1809   if (isSibCall)
1810     NumBytes = 0;
1811 
1812   // Adjust the stack pointer for the new arguments...
1813   // These operations are automatically eliminated by the prolog/epilog pass
1814   if (!isSibCall)
1815     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1816 
1817   SDValue StackPtr =
1818       DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));
1819 
1820   RegsToPassVector RegsToPass;
1821   SmallVector<SDValue, 8> MemOpChains;
1822 
1823   // Walk the register/memloc assignments, inserting copies/loads.  In the case
1824   // of tail call optimization, arguments are handled later.
1825   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1826        i != e;
1827        ++i, ++realArgIdx) {
1828     CCValAssign &VA = ArgLocs[i];
1829     SDValue Arg = OutVals[realArgIdx];
1830     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1831     bool isByVal = Flags.isByVal();
1832 
1833     // Promote the value if needed.
1834     switch (VA.getLocInfo()) {
1835     default: llvm_unreachable("Unknown loc info!");
1836     case CCValAssign::Full: break;
1837     case CCValAssign::SExt:
1838       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1839       break;
1840     case CCValAssign::ZExt:
1841       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1842       break;
1843     case CCValAssign::AExt:
1844       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1845       break;
1846     case CCValAssign::BCvt:
1847       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1848       break;
1849     }
1850 
1851     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
1852     if (VA.needsCustom()) {
1853       if (VA.getLocVT() == MVT::v2f64) {
1854         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1855                                   DAG.getConstant(0, dl, MVT::i32));
1856         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1857                                   DAG.getConstant(1, dl, MVT::i32));
1858 
1859         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1860                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1861 
1862         VA = ArgLocs[++i]; // skip ahead to next loc
1863         if (VA.isRegLoc()) {
1864           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1865                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1866         } else {
1867           assert(VA.isMemLoc());
1868 
1869           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1870                                                  dl, DAG, VA, Flags));
1871         }
1872       } else {
1873         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1874                          StackPtr, MemOpChains, Flags);
1875       }
1876     } else if (VA.isRegLoc()) {
1877       if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
1878           Outs[0].VT == MVT::i32) {
1879         assert(VA.getLocVT() == MVT::i32 &&
1880                "unexpected calling convention register assignment");
1881         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
1882                "unexpected use of 'returned'");
1883         isThisReturn = true;
1884       }
1885       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1886     } else if (isByVal) {
1887       assert(VA.isMemLoc());
1888       unsigned offset = 0;
1889 
1890       // True if this byval aggregate will be split between registers
1891       // and memory.
1892       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
1893       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
1894 
1895       if (CurByValIdx < ByValArgsCount) {
1896 
1897         unsigned RegBegin, RegEnd;
1898         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
1899 
1900         EVT PtrVT =
1901             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
1902         unsigned int i, j;
1903         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
1904           SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
1905           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
1906           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
1907                                      MachinePointerInfo(),
1908                                      DAG.InferPtrAlignment(AddArg));
1909           MemOpChains.push_back(Load.getValue(1));
1910           RegsToPass.push_back(std::make_pair(j, Load));
1911         }
1912 
1913         // If parameter size outsides register area, "offset" value
1914         // helps us to calculate stack slot for remained part properly.
1915         offset = RegEnd - RegBegin;
1916 
1917         CCInfo.nextInRegsParam();
1918       }
1919 
1920       if (Flags.getByValSize() > 4*offset) {
1921         auto PtrVT = getPointerTy(DAG.getDataLayout());
1922         unsigned LocMemOffset = VA.getLocMemOffset();
1923         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
1924         SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff);
1925         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
1926         SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
1927         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
1928                                            MVT::i32);
1929         SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl,
1930                                             MVT::i32);
1931 
1932         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
1933         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
1934         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
1935                                           Ops));
1936       }
1937     } else if (!isSibCall) {
1938       assert(VA.isMemLoc());
1939 
1940       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1941                                              dl, DAG, VA, Flags));
1942     }
1943   }
1944 
1945   if (!MemOpChains.empty())
1946     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1947 
1948   // Build a sequence of copy-to-reg nodes chained together with token chain
1949   // and flag operands which copy the outgoing args into the appropriate regs.
1950   SDValue InFlag;
1951   // Tail call byval lowering might overwrite argument registers so in case of
1952   // tail call optimization the copies to registers are lowered later.
1953   if (!isTailCall)
1954     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1955       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1956                                RegsToPass[i].second, InFlag);
1957       InFlag = Chain.getValue(1);
1958     }
1959 
1960   // For tail calls lower the arguments to the 'real' stack slot.
1961   if (isTailCall) {
1962     // Force all the incoming stack arguments to be loaded from the stack
1963     // before any new outgoing arguments are stored to the stack, because the
1964     // outgoing stack slots may alias the incoming argument stack slots, and
1965     // the alias isn't otherwise explicit. This is slightly more conservative
1966     // than necessary, because it means that each store effectively depends
1967     // on every argument instead of just those arguments it would clobber.
1968 
1969     // Do not flag preceding copytoreg stuff together with the following stuff.
1970     InFlag = SDValue();
1971     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1972       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1973                                RegsToPass[i].second, InFlag);
1974       InFlag = Chain.getValue(1);
1975     }
1976     InFlag = SDValue();
1977   }
1978 
1979   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1980   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1981   // node so that legalize doesn't hack it.
1982   bool isDirect = false;
1983 
1984   const TargetMachine &TM = getTargetMachine();
1985   const Module *Mod = MF.getFunction()->getParent();
1986   const GlobalValue *GV = nullptr;
1987   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1988     GV = G->getGlobal();
1989   bool isStub =
1990       !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();
1991 
1992   bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
1993   bool isLocalARMFunc = false;
1994   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1995   auto PtrVt = getPointerTy(DAG.getDataLayout());
1996 
1997   if (Subtarget->genLongCalls()) {
1998     assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
1999            "long-calls codegen is not position independent!");
2000     // Handle a global address or an external symbol. If it's not one of
2001     // those, the target's already in a register, so we don't need to do
2002     // anything extra.
2003     if (isa<GlobalAddressSDNode>(Callee)) {
2004       // Create a constant pool entry for the callee address
2005       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2006       ARMConstantPoolValue *CPV =
2007         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
2008 
2009       // Get the address of the callee into a register
2010       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2011       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2012       Callee = DAG.getLoad(
2013           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2014           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2015     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
2016       const char *Sym = S->getSymbol();
2017 
2018       // Create a constant pool entry for the callee address
2019       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2020       ARMConstantPoolValue *CPV =
2021         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2022                                       ARMPCLabelIndex, 0);
2023       // Get the address of the callee into a register
2024       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2025       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2026       Callee = DAG.getLoad(
2027           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2028           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2029     }
2030   } else if (isa<GlobalAddressSDNode>(Callee)) {
2031     // If we're optimizing for minimum size and the function is called three or
2032     // more times in this block, we can improve codesize by calling indirectly
2033     // as BLXr has a 16-bit encoding.
2034     auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2035     auto *BB = CLI.CS->getParent();
2036     bool PreferIndirect =
2037         Subtarget->isThumb() && MF.getFunction()->optForMinSize() &&
2038         count_if(GV->users(), [&BB](const User *U) {
2039           return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
2040         }) > 2;
2041 
2042     if (!PreferIndirect) {
2043       isDirect = true;
2044       bool isDef = GV->isStrongDefinitionForLinker();
2045 
2046       // ARM call to a local ARM function is predicable.
2047       isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
2048       // tBX takes a register source operand.
2049       if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2050         assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
2051         Callee = DAG.getNode(
2052             ARMISD::WrapperPIC, dl, PtrVt,
2053             DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
2054         Callee = DAG.getLoad(
2055             PtrVt, dl, DAG.getEntryNode(), Callee,
2056             MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2057             /* Alignment = */ 0, MachineMemOperand::MODereferenceable |
2058                                      MachineMemOperand::MOInvariant);
2059       } else if (Subtarget->isTargetCOFF()) {
2060         assert(Subtarget->isTargetWindows() &&
2061                "Windows is the only supported COFF target");
2062         unsigned TargetFlags = GV->hasDLLImportStorageClass()
2063                                    ? ARMII::MO_DLLIMPORT
2064                                    : ARMII::MO_NO_FLAG;
2065         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0,
2066                                             TargetFlags);
2067         if (GV->hasDLLImportStorageClass())
2068           Callee =
2069               DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
2070                           DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
2071                           MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2072       } else {
2073         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
2074       }
2075     }
2076   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2077     isDirect = true;
2078     // tBX takes a register source operand.
2079     const char *Sym = S->getSymbol();
2080     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2081       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2082       ARMConstantPoolValue *CPV =
2083         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2084                                       ARMPCLabelIndex, 4);
2085       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2086       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2087       Callee = DAG.getLoad(
2088           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2089           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2090       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2091       Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
2092     } else {
2093       Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
2094     }
2095   }
2096 
2097   // FIXME: handle tail calls differently.
2098   unsigned CallOpc;
2099   if (Subtarget->isThumb()) {
2100     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2101       CallOpc = ARMISD::CALL_NOLINK;
2102     else
2103       CallOpc = ARMISD::CALL;
2104   } else {
2105     if (!isDirect && !Subtarget->hasV5TOps())
2106       CallOpc = ARMISD::CALL_NOLINK;
2107     else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2108              // Emit regular call when code size is the priority
2109              !MF.getFunction()->optForMinSize())
2110       // "mov lr, pc; b _foo" to avoid confusing the RSP
2111       CallOpc = ARMISD::CALL_NOLINK;
2112     else
2113       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
2114   }
2115 
2116   std::vector<SDValue> Ops;
2117   Ops.push_back(Chain);
2118   Ops.push_back(Callee);
2119 
2120   // Add argument registers to the end of the list so that they are known live
2121   // into the call.
2122   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2123     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2124                                   RegsToPass[i].second.getValueType()));
2125 
2126   // Add a register mask operand representing the call-preserved registers.
2127   if (!isTailCall) {
2128     const uint32_t *Mask;
2129     const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
2130     if (isThisReturn) {
2131       // For 'this' returns, use the R0-preserving mask if applicable
2132       Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
2133       if (!Mask) {
2134         // Set isThisReturn to false if the calling convention is not one that
2135         // allows 'returned' to be modeled in this way, so LowerCallResult does
2136         // not try to pass 'this' straight through
2137         isThisReturn = false;
2138         Mask = ARI->getCallPreservedMask(MF, CallConv);
2139       }
2140     } else
2141       Mask = ARI->getCallPreservedMask(MF, CallConv);
2142 
2143     assert(Mask && "Missing call preserved mask for calling convention");
2144     Ops.push_back(DAG.getRegisterMask(Mask));
2145   }
2146 
2147   if (InFlag.getNode())
2148     Ops.push_back(InFlag);
2149 
2150   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2151   if (isTailCall) {
2152     MF.getFrameInfo().setHasTailCall();
2153     return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
2154   }
2155 
2156   // Returns a chain and a flag for retval copy to use.
2157   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
2158   InFlag = Chain.getValue(1);
2159 
2160   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
2161                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
2162   if (!Ins.empty())
2163     InFlag = Chain.getValue(1);
2164 
2165   // Handle result values, copying them out of physregs into vregs that we
2166   // return.
2167   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2168                          InVals, isThisReturn,
2169                          isThisReturn ? OutVals[0] : SDValue());
2170 }
2171 
2172 /// HandleByVal - Every parameter *after* a byval parameter is passed
2173 /// on the stack.  Remember the next parameter register to allocate,
2174 /// and then confiscate the rest of the parameter registers to insure
2175 /// this.
2176 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
2177                                     unsigned Align) const {
2178   // Byval (as with any stack) slots are always at least 4 byte aligned.
2179   Align = std::max(Align, 4U);
2180 
2181   unsigned Reg = State->AllocateReg(GPRArgRegs);
2182   if (!Reg)
2183     return;
2184 
2185   unsigned AlignInRegs = Align / 4;
2186   unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
2187   for (unsigned i = 0; i < Waste; ++i)
2188     Reg = State->AllocateReg(GPRArgRegs);
2189 
2190   if (!Reg)
2191     return;
2192 
2193   unsigned Excess = 4 * (ARM::R4 - Reg);
2194 
2195   // Special case when NSAA != SP and parameter size greater than size of
2196   // all remained GPR regs. In that case we can't split parameter, we must
2197   // send it to stack. We also must set NCRN to R4, so waste all
2198   // remained registers.
2199   const unsigned NSAAOffset = State->getNextStackOffset();
2200   if (NSAAOffset != 0 && Size > Excess) {
2201     while (State->AllocateReg(GPRArgRegs))
2202       ;
2203     return;
2204   }
2205 
2206   // First register for byval parameter is the first register that wasn't
2207   // allocated before this method call, so it would be "reg".
2208   // If parameter is small enough to be saved in range [reg, r4), then
2209   // the end (first after last) register would be reg + param-size-in-regs,
2210   // else parameter would be splitted between registers and stack,
2211   // end register would be r4 in this case.
2212   unsigned ByValRegBegin = Reg;
2213   unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
2214   State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
2215   // Note, first register is allocated in the beginning of function already,
2216   // allocate remained amount of registers we need.
2217   for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2218     State->AllocateReg(GPRArgRegs);
2219   // A byval parameter that is split between registers and memory needs its
2220   // size truncated here.
2221   // In the case where the entire structure fits in registers, we set the
2222   // size in memory to zero.
2223   Size = std::max<int>(Size - Excess, 0);
2224 }
2225 
2226 /// MatchingStackOffset - Return true if the given stack call argument is
2227 /// already available in the same position (relatively) of the caller's
2228 /// incoming argument stack.
2229 static
2230 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2231                          MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
2232                          const TargetInstrInfo *TII) {
2233   unsigned Bytes = Arg.getValueSizeInBits() / 8;
2234   int FI = std::numeric_limits<int>::max();
2235   if (Arg.getOpcode() == ISD::CopyFromReg) {
2236     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2237     if (!TargetRegisterInfo::isVirtualRegister(VR))
2238       return false;
2239     MachineInstr *Def = MRI->getVRegDef(VR);
2240     if (!Def)
2241       return false;
2242     if (!Flags.isByVal()) {
2243       if (!TII->isLoadFromStackSlot(*Def, FI))
2244         return false;
2245     } else {
2246       return false;
2247     }
2248   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2249     if (Flags.isByVal())
2250       // ByVal argument is passed in as a pointer but it's now being
2251       // dereferenced. e.g.
2252       // define @foo(%struct.X* %A) {
2253       //   tail call @bar(%struct.X* byval %A)
2254       // }
2255       return false;
2256     SDValue Ptr = Ld->getBasePtr();
2257     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2258     if (!FINode)
2259       return false;
2260     FI = FINode->getIndex();
2261   } else
2262     return false;
2263 
2264   assert(FI != std::numeric_limits<int>::max());
2265   if (!MFI.isFixedObjectIndex(FI))
2266     return false;
2267   return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
2268 }
2269 
2270 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2271 /// for tail call optimization. Targets which want to do tail call
2272 /// optimization should implement this function.
2273 bool
2274 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
2275                                                      CallingConv::ID CalleeCC,
2276                                                      bool isVarArg,
2277                                                      bool isCalleeStructRet,
2278                                                      bool isCallerStructRet,
2279                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
2280                                     const SmallVectorImpl<SDValue> &OutVals,
2281                                     const SmallVectorImpl<ISD::InputArg> &Ins,
2282                                                      SelectionDAG& DAG) const {
2283   MachineFunction &MF = DAG.getMachineFunction();
2284   const Function *CallerF = MF.getFunction();
2285   CallingConv::ID CallerCC = CallerF->getCallingConv();
2286 
2287   assert(Subtarget->supportsTailCall());
2288 
2289   // Look for obvious safe cases to perform tail call optimization that do not
2290   // require ABI changes. This is what gcc calls sibcall.
2291 
2292   // Exception-handling functions need a special set of instructions to indicate
2293   // a return to the hardware. Tail-calling another function would probably
2294   // break this.
2295   if (CallerF->hasFnAttribute("interrupt"))
2296     return false;
2297 
2298   // Also avoid sibcall optimization if either caller or callee uses struct
2299   // return semantics.
2300   if (isCalleeStructRet || isCallerStructRet)
2301     return false;
2302 
2303   // Externally-defined functions with weak linkage should not be
2304   // tail-called on ARM when the OS does not support dynamic
2305   // pre-emption of symbols, as the AAELF spec requires normal calls
2306   // to undefined weak functions to be replaced with a NOP or jump to the
2307   // next instruction. The behaviour of branch instructions in this
2308   // situation (as used for tail calls) is implementation-defined, so we
2309   // cannot rely on the linker replacing the tail call with a return.
2310   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2311     const GlobalValue *GV = G->getGlobal();
2312     const Triple &TT = getTargetMachine().getTargetTriple();
2313     if (GV->hasExternalWeakLinkage() &&
2314         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
2315       return false;
2316   }
2317 
2318   // Check that the call results are passed in the same way.
2319   LLVMContext &C = *DAG.getContext();
2320   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
2321                                   CCAssignFnForReturn(CalleeCC, isVarArg),
2322                                   CCAssignFnForReturn(CallerCC, isVarArg)))
2323     return false;
2324   // The callee has to preserve all registers the caller needs to preserve.
2325   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2326   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2327   if (CalleeCC != CallerCC) {
2328     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2329     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2330       return false;
2331   }
2332 
2333   // If Caller's vararg or byval argument has been split between registers and
2334   // stack, do not perform tail call, since part of the argument is in caller's
2335   // local frame.
2336   const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
2337   if (AFI_Caller->getArgRegsSaveSize())
2338     return false;
2339 
2340   // If the callee takes no arguments then go on to check the results of the
2341   // call.
2342   if (!Outs.empty()) {
2343     // Check if stack adjustment is needed. For now, do not do this if any
2344     // argument is passed on the stack.
2345     SmallVector<CCValAssign, 16> ArgLocs;
2346     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2347     CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
2348     if (CCInfo.getNextStackOffset()) {
2349       // Check if the arguments are already laid out in the right way as
2350       // the caller's fixed stack objects.
2351       MachineFrameInfo &MFI = MF.getFrameInfo();
2352       const MachineRegisterInfo *MRI = &MF.getRegInfo();
2353       const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2354       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2355            i != e;
2356            ++i, ++realArgIdx) {
2357         CCValAssign &VA = ArgLocs[i];
2358         EVT RegVT = VA.getLocVT();
2359         SDValue Arg = OutVals[realArgIdx];
2360         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2361         if (VA.getLocInfo() == CCValAssign::Indirect)
2362           return false;
2363         if (VA.needsCustom()) {
2364           // f64 and vector types are split into multiple registers or
2365           // register/stack-slot combinations.  The types will not match
2366           // the registers; give up on memory f64 refs until we figure
2367           // out what to do about this.
2368           if (!VA.isRegLoc())
2369             return false;
2370           if (!ArgLocs[++i].isRegLoc())
2371             return false;
2372           if (RegVT == MVT::v2f64) {
2373             if (!ArgLocs[++i].isRegLoc())
2374               return false;
2375             if (!ArgLocs[++i].isRegLoc())
2376               return false;
2377           }
2378         } else if (!VA.isRegLoc()) {
2379           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
2380                                    MFI, MRI, TII))
2381             return false;
2382         }
2383       }
2384     }
2385 
2386     const MachineRegisterInfo &MRI = MF.getRegInfo();
2387     if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
2388       return false;
2389   }
2390 
2391   return true;
2392 }
2393 
2394 bool
2395 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2396                                   MachineFunction &MF, bool isVarArg,
2397                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
2398                                   LLVMContext &Context) const {
2399   SmallVector<CCValAssign, 16> RVLocs;
2400   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2401   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2402 }
2403 
2404 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
2405                                     const SDLoc &DL, SelectionDAG &DAG) {
2406   const MachineFunction &MF = DAG.getMachineFunction();
2407   const Function *F = MF.getFunction();
2408 
2409   StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
2410 
2411   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
2412   // version of the "preferred return address". These offsets affect the return
2413   // instruction if this is a return from PL1 without hypervisor extensions.
2414   //    IRQ/FIQ: +4     "subs pc, lr, #4"
2415   //    SWI:     0      "subs pc, lr, #0"
2416   //    ABORT:   +4     "subs pc, lr, #4"
2417   //    UNDEF:   +4/+2  "subs pc, lr, #0"
2418   // UNDEF varies depending on where the exception came from ARM or Thumb
2419   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
2420 
2421   int64_t LROffset;
2422   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
2423       IntKind == "ABORT")
2424     LROffset = 4;
2425   else if (IntKind == "SWI" || IntKind == "UNDEF")
2426     LROffset = 0;
2427   else
2428     report_fatal_error("Unsupported interrupt attribute. If present, value "
2429                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2430 
2431   RetOps.insert(RetOps.begin() + 1,
2432                 DAG.getConstant(LROffset, DL, MVT::i32, false));
2433 
2434   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
2435 }
2436 
2437 SDValue
2438 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2439                                bool isVarArg,
2440                                const SmallVectorImpl<ISD::OutputArg> &Outs,
2441                                const SmallVectorImpl<SDValue> &OutVals,
2442                                const SDLoc &dl, SelectionDAG &DAG) const {
2443 
2444   // CCValAssign - represent the assignment of the return value to a location.
2445   SmallVector<CCValAssign, 16> RVLocs;
2446 
2447   // CCState - Info about the registers and stack slots.
2448   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2449                  *DAG.getContext());
2450 
2451   // Analyze outgoing return values.
2452   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2453 
2454   SDValue Flag;
2455   SmallVector<SDValue, 4> RetOps;
2456   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2457   bool isLittleEndian = Subtarget->isLittle();
2458 
2459   MachineFunction &MF = DAG.getMachineFunction();
2460   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2461   AFI->setReturnRegsCount(RVLocs.size());
2462 
2463   // Copy the result values into the output registers.
2464   for (unsigned i = 0, realRVLocIdx = 0;
2465        i != RVLocs.size();
2466        ++i, ++realRVLocIdx) {
2467     CCValAssign &VA = RVLocs[i];
2468     assert(VA.isRegLoc() && "Can only return in registers!");
2469 
2470     SDValue Arg = OutVals[realRVLocIdx];
2471 
2472     switch (VA.getLocInfo()) {
2473     default: llvm_unreachable("Unknown loc info!");
2474     case CCValAssign::Full: break;
2475     case CCValAssign::BCvt:
2476       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2477       break;
2478     }
2479 
2480     if (VA.needsCustom()) {
2481       if (VA.getLocVT() == MVT::v2f64) {
2482         // Extract the first half and return it in two registers.
2483         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2484                                    DAG.getConstant(0, dl, MVT::i32));
2485         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
2486                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
2487 
2488         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2489                                  HalfGPRs.getValue(isLittleEndian ? 0 : 1),
2490                                  Flag);
2491         Flag = Chain.getValue(1);
2492         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2493         VA = RVLocs[++i]; // skip ahead to next loc
2494         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2495                                  HalfGPRs.getValue(isLittleEndian ? 1 : 0),
2496                                  Flag);
2497         Flag = Chain.getValue(1);
2498         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2499         VA = RVLocs[++i]; // skip ahead to next loc
2500 
2501         // Extract the 2nd half and fall through to handle it as an f64 value.
2502         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2503                           DAG.getConstant(1, dl, MVT::i32));
2504       }
2505       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
2506       // available.
2507       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2508                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
2509       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2510                                fmrrd.getValue(isLittleEndian ? 0 : 1),
2511                                Flag);
2512       Flag = Chain.getValue(1);
2513       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2514       VA = RVLocs[++i]; // skip ahead to next loc
2515       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2516                                fmrrd.getValue(isLittleEndian ? 1 : 0),
2517                                Flag);
2518     } else
2519       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
2520 
2521     // Guarantee that all emitted copies are
2522     // stuck together, avoiding something bad.
2523     Flag = Chain.getValue(1);
2524     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2525   }
2526   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2527   const MCPhysReg *I =
2528       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2529   if (I) {
2530     for (; *I; ++I) {
2531       if (ARM::GPRRegClass.contains(*I))
2532         RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2533       else if (ARM::DPRRegClass.contains(*I))
2534         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
2535       else
2536         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2537     }
2538   }
2539 
2540   // Update chain and glue.
2541   RetOps[0] = Chain;
2542   if (Flag.getNode())
2543     RetOps.push_back(Flag);
2544 
2545   // CPUs which aren't M-class use a special sequence to return from
2546   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
2547   // though we use "subs pc, lr, #N").
2548   //
2549   // M-class CPUs actually use a normal return sequence with a special
2550   // (hardware-provided) value in LR, so the normal code path works.
2551   if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
2552       !Subtarget->isMClass()) {
2553     if (Subtarget->isThumb1Only())
2554       report_fatal_error("interrupt attribute is not supported in Thumb1");
2555     return LowerInterruptReturn(RetOps, dl, DAG);
2556   }
2557 
2558   return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
2559 }
2560 
2561 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2562   if (N->getNumValues() != 1)
2563     return false;
2564   if (!N->hasNUsesOfValue(1, 0))
2565     return false;
2566 
2567   SDValue TCChain = Chain;
2568   SDNode *Copy = *N->use_begin();
2569   if (Copy->getOpcode() == ISD::CopyToReg) {
2570     // If the copy has a glue operand, we conservatively assume it isn't safe to
2571     // perform a tail call.
2572     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2573       return false;
2574     TCChain = Copy->getOperand(0);
2575   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
2576     SDNode *VMov = Copy;
2577     // f64 returned in a pair of GPRs.
2578     SmallPtrSet<SDNode*, 2> Copies;
2579     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2580          UI != UE; ++UI) {
2581       if (UI->getOpcode() != ISD::CopyToReg)
2582         return false;
2583       Copies.insert(*UI);
2584     }
2585     if (Copies.size() > 2)
2586       return false;
2587 
2588     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2589          UI != UE; ++UI) {
2590       SDValue UseChain = UI->getOperand(0);
2591       if (Copies.count(UseChain.getNode()))
2592         // Second CopyToReg
2593         Copy = *UI;
2594       else {
2595         // We are at the top of this chain.
2596         // If the copy has a glue operand, we conservatively assume it
2597         // isn't safe to perform a tail call.
2598         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
2599           return false;
2600         // First CopyToReg
2601         TCChain = UseChain;
2602       }
2603     }
2604   } else if (Copy->getOpcode() == ISD::BITCAST) {
2605     // f32 returned in a single GPR.
2606     if (!Copy->hasOneUse())
2607       return false;
2608     Copy = *Copy->use_begin();
2609     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
2610       return false;
2611     // If the copy has a glue operand, we conservatively assume it isn't safe to
2612     // perform a tail call.
2613     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2614       return false;
2615     TCChain = Copy->getOperand(0);
2616   } else {
2617     return false;
2618   }
2619 
2620   bool HasRet = false;
2621   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2622        UI != UE; ++UI) {
2623     if (UI->getOpcode() != ARMISD::RET_FLAG &&
2624         UI->getOpcode() != ARMISD::INTRET_FLAG)
2625       return false;
2626     HasRet = true;
2627   }
2628 
2629   if (!HasRet)
2630     return false;
2631 
2632   Chain = TCChain;
2633   return true;
2634 }
2635 
2636 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2637   if (!Subtarget->supportsTailCall())
2638     return false;
2639 
2640   auto Attr =
2641       CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2642   if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2643     return false;
2644 
2645   return true;
2646 }
2647 
2648 // Trying to write a 64 bit value so need to split into two 32 bit values first,
2649 // and pass the lower and high parts through.
2650 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
2651   SDLoc DL(Op);
2652   SDValue WriteValue = Op->getOperand(2);
2653 
2654   // This function is only supposed to be called for i64 type argument.
2655   assert(WriteValue.getValueType() == MVT::i64
2656           && "LowerWRITE_REGISTER called for non-i64 type argument.");
2657 
2658   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2659                            DAG.getConstant(0, DL, MVT::i32));
2660   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2661                            DAG.getConstant(1, DL, MVT::i32));
2662   SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
2663   return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
2664 }
2665 
2666 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2667 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
2668 // one of the above mentioned nodes. It has to be wrapped because otherwise
2669 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2670 // be used to form addressing mode. These wrapped nodes will be selected
2671 // into MOVi.
2672 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
2673   EVT PtrVT = Op.getValueType();
2674   // FIXME there is no actual debug info here
2675   SDLoc dl(Op);
2676   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2677   SDValue Res;
2678   if (CP->isMachineConstantPoolEntry())
2679     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
2680                                     CP->getAlignment());
2681   else
2682     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
2683                                     CP->getAlignment());
2684   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
2685 }
2686 
2687 unsigned ARMTargetLowering::getJumpTableEncoding() const {
2688   return MachineJumpTableInfo::EK_Inline;
2689 }
2690 
2691 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
2692                                              SelectionDAG &DAG) const {
2693   MachineFunction &MF = DAG.getMachineFunction();
2694   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2695   unsigned ARMPCLabelIndex = 0;
2696   SDLoc DL(Op);
2697   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2698   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2699   SDValue CPAddr;
2700   bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
2701   if (!IsPositionIndependent) {
2702     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
2703   } else {
2704     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2705     ARMPCLabelIndex = AFI->createPICLabelUId();
2706     ARMConstantPoolValue *CPV =
2707       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
2708                                       ARMCP::CPBlockAddress, PCAdj);
2709     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2710   }
2711   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
2712   SDValue Result = DAG.getLoad(
2713       PtrVT, DL, DAG.getEntryNode(), CPAddr,
2714       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2715   if (!IsPositionIndependent)
2716     return Result;
2717   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
2718   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
2719 }
2720 
2721 /// \brief Convert a TLS address reference into the correct sequence of loads
2722 /// and calls to compute the variable's address for Darwin, and return an
2723 /// SDValue containing the final node.
2724 
2725 /// Darwin only has one TLS scheme which must be capable of dealing with the
2726 /// fully general situation, in the worst case. This means:
2727 ///     + "extern __thread" declaration.
2728 ///     + Defined in a possibly unknown dynamic library.
2729 ///
2730 /// The general system is that each __thread variable has a [3 x i32] descriptor
2731 /// which contains information used by the runtime to calculate the address. The
2732 /// only part of this the compiler needs to know about is the first word, which
2733 /// contains a function pointer that must be called with the address of the
2734 /// entire descriptor in "r0".
2735 ///
2736 /// Since this descriptor may be in a different unit, in general access must
2737 /// proceed along the usual ARM rules. A common sequence to produce is:
2738 ///
2739 ///     movw rT1, :lower16:_var$non_lazy_ptr
2740 ///     movt rT1, :upper16:_var$non_lazy_ptr
2741 ///     ldr r0, [rT1]
2742 ///     ldr rT2, [r0]
2743 ///     blx rT2
2744 ///     [...address now in r0...]
2745 SDValue
2746 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
2747                                                SelectionDAG &DAG) const {
2748   assert(Subtarget->isTargetDarwin() && "TLS only supported on Darwin");
2749   SDLoc DL(Op);
2750 
2751   // First step is to get the address of the actua global symbol. This is where
2752   // the TLS descriptor lives.
2753   SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
2754 
2755   // The first entry in the descriptor is a function pointer that we must call
2756   // to obtain the address of the variable.
2757   SDValue Chain = DAG.getEntryNode();
2758   SDValue FuncTLVGet = DAG.getLoad(
2759       MVT::i32, DL, Chain, DescAddr,
2760       MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2761       /* Alignment = */ 4,
2762       MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
2763           MachineMemOperand::MOInvariant);
2764   Chain = FuncTLVGet.getValue(1);
2765 
2766   MachineFunction &F = DAG.getMachineFunction();
2767   MachineFrameInfo &MFI = F.getFrameInfo();
2768   MFI.setAdjustsStack(true);
2769 
2770   // TLS calls preserve all registers except those that absolutely must be
2771   // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
2772   // silly).
2773   auto TRI =
2774       getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo();
2775   auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
2776   const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
2777 
2778   // Finally, we can make the call. This is just a degenerate version of a
2779   // normal AArch64 call node: r0 takes the address of the descriptor, and
2780   // returns the address of the variable in this thread.
2781   Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
2782   Chain =
2783       DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
2784                   Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
2785                   DAG.getRegisterMask(Mask), Chain.getValue(1));
2786   return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
2787 }
2788 
2789 SDValue
2790 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
2791                                                 SelectionDAG &DAG) const {
2792   assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
2793 
2794   SDValue Chain = DAG.getEntryNode();
2795   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2796   SDLoc DL(Op);
2797 
2798   // Load the current TEB (thread environment block)
2799   SDValue Ops[] = {Chain,
2800                    DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
2801                    DAG.getConstant(15, DL, MVT::i32),
2802                    DAG.getConstant(0, DL, MVT::i32),
2803                    DAG.getConstant(13, DL, MVT::i32),
2804                    DAG.getConstant(0, DL, MVT::i32),
2805                    DAG.getConstant(2, DL, MVT::i32)};
2806   SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
2807                                    DAG.getVTList(MVT::i32, MVT::Other), Ops);
2808 
2809   SDValue TEB = CurrentTEB.getValue(0);
2810   Chain = CurrentTEB.getValue(1);
2811 
2812   // Load the ThreadLocalStoragePointer from the TEB
2813   // A pointer to the TLS array is located at offset 0x2c from the TEB.
2814   SDValue TLSArray =
2815       DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
2816   TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
2817 
2818   // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
2819   // offset into the TLSArray.
2820 
2821   // Load the TLS index from the C runtime
2822   SDValue TLSIndex =
2823       DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
2824   TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
2825   TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
2826 
2827   SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
2828                               DAG.getConstant(2, DL, MVT::i32));
2829   SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
2830                             DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
2831                             MachinePointerInfo());
2832 
2833   // Get the offset of the start of the .tls section (section base)
2834   const auto *GA = cast<GlobalAddressSDNode>(Op);
2835   auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
2836   SDValue Offset = DAG.getLoad(
2837       PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
2838                                     DAG.getTargetConstantPool(CPV, PtrVT, 4)),
2839       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2840 
2841   return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
2842 }
2843 
2844 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
2845 SDValue
2846 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
2847                                                  SelectionDAG &DAG) const {
2848   SDLoc dl(GA);
2849   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2850   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2851   MachineFunction &MF = DAG.getMachineFunction();
2852   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2853   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2854   ARMConstantPoolValue *CPV =
2855     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2856                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
2857   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2858   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
2859   Argument = DAG.getLoad(
2860       PtrVT, dl, DAG.getEntryNode(), Argument,
2861       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2862   SDValue Chain = Argument.getValue(1);
2863 
2864   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2865   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
2866 
2867   // call __tls_get_addr.
2868   ArgListTy Args;
2869   ArgListEntry Entry;
2870   Entry.Node = Argument;
2871   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
2872   Args.push_back(Entry);
2873 
2874   // FIXME: is there useful debug info available here?
2875   TargetLowering::CallLoweringInfo CLI(DAG);
2876   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
2877       CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
2878       DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
2879 
2880   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2881   return CallResult.first;
2882 }
2883 
2884 // Lower ISD::GlobalTLSAddress using the "initial exec" or
2885 // "local exec" model.
2886 SDValue
2887 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
2888                                         SelectionDAG &DAG,
2889                                         TLSModel::Model model) const {
2890   const GlobalValue *GV = GA->getGlobal();
2891   SDLoc dl(GA);
2892   SDValue Offset;
2893   SDValue Chain = DAG.getEntryNode();
2894   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2895   // Get the Thread Pointer
2896   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
2897 
2898   if (model == TLSModel::InitialExec) {
2899     MachineFunction &MF = DAG.getMachineFunction();
2900     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2901     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2902     // Initial exec model.
2903     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2904     ARMConstantPoolValue *CPV =
2905       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2906                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
2907                                       true);
2908     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2909     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2910     Offset = DAG.getLoad(
2911         PtrVT, dl, Chain, Offset,
2912         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2913     Chain = Offset.getValue(1);
2914 
2915     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2916     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
2917 
2918     Offset = DAG.getLoad(
2919         PtrVT, dl, Chain, Offset,
2920         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2921   } else {
2922     // local exec model
2923     assert(model == TLSModel::LocalExec);
2924     ARMConstantPoolValue *CPV =
2925       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
2926     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2927     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2928     Offset = DAG.getLoad(
2929         PtrVT, dl, Chain, Offset,
2930         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2931   }
2932 
2933   // The address of the thread local variable is the add of the thread
2934   // pointer with the offset of the variable.
2935   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
2936 }
2937 
2938 SDValue
2939 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
2940   if (Subtarget->isTargetDarwin())
2941     return LowerGlobalTLSAddressDarwin(Op, DAG);
2942 
2943   if (Subtarget->isTargetWindows())
2944     return LowerGlobalTLSAddressWindows(Op, DAG);
2945 
2946   // TODO: implement the "local dynamic" model
2947   assert(Subtarget->isTargetELF() && "Only ELF implemented here");
2948   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2949   if (DAG.getTarget().Options.EmulatedTLS)
2950     return LowerToTLSEmulatedModel(GA, DAG);
2951 
2952   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
2953 
2954   switch (model) {
2955     case TLSModel::GeneralDynamic:
2956     case TLSModel::LocalDynamic:
2957       return LowerToTLSGeneralDynamicModel(GA, DAG);
2958     case TLSModel::InitialExec:
2959     case TLSModel::LocalExec:
2960       return LowerToTLSExecModels(GA, DAG, model);
2961   }
2962   llvm_unreachable("bogus TLS model");
2963 }
2964 
2965 /// Return true if all users of V are within function F, looking through
2966 /// ConstantExprs.
2967 static bool allUsersAreInFunction(const Value *V, const Function *F) {
2968   SmallVector<const User*,4> Worklist;
2969   for (auto *U : V->users())
2970     Worklist.push_back(U);
2971   while (!Worklist.empty()) {
2972     auto *U = Worklist.pop_back_val();
2973     if (isa<ConstantExpr>(U)) {
2974       for (auto *UU : U->users())
2975         Worklist.push_back(UU);
2976       continue;
2977     }
2978 
2979     auto *I = dyn_cast<Instruction>(U);
2980     if (!I || I->getParent()->getParent() != F)
2981       return false;
2982   }
2983   return true;
2984 }
2985 
2986 /// Return true if all users of V are within some (any) function, looking through
2987 /// ConstantExprs. In other words, are there any global constant users?
2988 static bool allUsersAreInFunctions(const Value *V) {
2989   SmallVector<const User*,4> Worklist;
2990   for (auto *U : V->users())
2991     Worklist.push_back(U);
2992   while (!Worklist.empty()) {
2993     auto *U = Worklist.pop_back_val();
2994     if (isa<ConstantExpr>(U)) {
2995       for (auto *UU : U->users())
2996         Worklist.push_back(UU);
2997       continue;
2998     }
2999 
3000     if (!isa<Instruction>(U))
3001       return false;
3002   }
3003   return true;
3004 }
3005 
3006 // Return true if T is an integer, float or an array/vector of either.
3007 static bool isSimpleType(Type *T) {
3008   if (T->isIntegerTy() || T->isFloatingPointTy())
3009     return true;
3010   Type *SubT = nullptr;
3011   if (T->isArrayTy())
3012     SubT = T->getArrayElementType();
3013   else if (T->isVectorTy())
3014     SubT = T->getVectorElementType();
3015   else
3016     return false;
3017   return SubT->isIntegerTy() || SubT->isFloatingPointTy();
3018 }
3019 
3020 static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG,
3021                                      EVT PtrVT, const SDLoc &dl) {
3022   // If we're creating a pool entry for a constant global with unnamed address,
3023   // and the global is small enough, we can emit it inline into the constant pool
3024   // to save ourselves an indirection.
3025   //
3026   // This is a win if the constant is only used in one function (so it doesn't
3027   // need to be duplicated) or duplicating the constant wouldn't increase code
3028   // size (implying the constant is no larger than 4 bytes).
3029   const Function *F = DAG.getMachineFunction().getFunction();
3030 
3031   // We rely on this decision to inline being idemopotent and unrelated to the
3032   // use-site. We know that if we inline a variable at one use site, we'll
3033   // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3034   // doesn't know about this optimization, so bail out if it's enabled else
3035   // we could decide to inline here (and thus never emit the GV) but require
3036   // the GV from fast-isel generated code.
3037   if (!EnableConstpoolPromotion ||
3038       DAG.getMachineFunction().getTarget().Options.EnableFastISel)
3039       return SDValue();
3040 
3041   auto *GVar = dyn_cast<GlobalVariable>(GV);
3042   if (!GVar || !GVar->hasInitializer() ||
3043       !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3044       !GVar->hasLocalLinkage())
3045     return SDValue();
3046 
3047   // Ensure that we don't try and inline any type that contains pointers. If
3048   // we inline a value that contains relocations, we move the relocations from
3049   // .data to .text which is not ideal.
3050   auto *Init = GVar->getInitializer();
3051   if (!isSimpleType(Init->getType()))
3052     return SDValue();
3053 
3054   // The constant islands pass can only really deal with alignment requests
3055   // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3056   // any type wanting greater alignment requirements than 4 bytes. We also
3057   // can only promote constants that are multiples of 4 bytes in size or
3058   // are paddable to a multiple of 4. Currently we only try and pad constants
3059   // that are strings for simplicity.
3060   auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
3061   unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
3062   unsigned Align = GVar->getAlignment();
3063   unsigned RequiredPadding = 4 - (Size % 4);
3064   bool PaddingPossible =
3065     RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3066   if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize ||
3067       Size == 0)
3068     return SDValue();
3069 
3070   unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3071   MachineFunction &MF = DAG.getMachineFunction();
3072   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3073 
3074   // We can't bloat the constant pool too much, else the ConstantIslands pass
3075   // may fail to converge. If we haven't promoted this global yet (it may have
3076   // multiple uses), and promoting it would increase the constant pool size (Sz
3077   // > 4), ensure we have space to do so up to MaxTotal.
3078   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
3079     if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
3080         ConstpoolPromotionMaxTotal)
3081       return SDValue();
3082 
3083   // This is only valid if all users are in a single function OR it has users
3084   // in multiple functions but it no larger than a pointer. We also check if
3085   // GVar has constant (non-ConstantExpr) users. If so, it essentially has its
3086   // address taken.
3087   if (!allUsersAreInFunction(GVar, F) &&
3088       !(Size <= 4 && allUsersAreInFunctions(GVar)))
3089     return SDValue();
3090 
3091   // We're going to inline this global. Pad it out if needed.
3092   if (RequiredPadding != 4) {
3093     StringRef S = CDAInit->getAsString();
3094 
3095     SmallVector<uint8_t,16> V(S.size());
3096     std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
3097     while (RequiredPadding--)
3098       V.push_back(0);
3099     Init = ConstantDataArray::get(*DAG.getContext(), V);
3100   }
3101 
3102   auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
3103   SDValue CPAddr =
3104     DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4);
3105   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
3106     AFI->markGlobalAsPromotedToConstantPool(GVar);
3107     AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
3108                                       PaddedSize - 4);
3109   }
3110   ++NumConstpoolPromoted;
3111   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3112 }
3113 
3114 static bool isReadOnly(const GlobalValue *GV) {
3115   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3116     GV = GA->getBaseObject();
3117   return (isa<GlobalVariable>(GV) && cast<GlobalVariable>(GV)->isConstant()) ||
3118          isa<Function>(GV);
3119 }
3120 
3121 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
3122                                                  SelectionDAG &DAG) const {
3123   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3124   SDLoc dl(Op);
3125   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3126   const TargetMachine &TM = getTargetMachine();
3127   bool IsRO = isReadOnly(GV);
3128 
3129   // promoteToConstantPool only if not generating XO text section
3130   if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
3131     if (SDValue V = promoteToConstantPool(GV, DAG, PtrVT, dl))
3132       return V;
3133 
3134   if (isPositionIndependent()) {
3135     bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
3136 
3137     MachineFunction &MF = DAG.getMachineFunction();
3138     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3139     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3140     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3141     SDLoc dl(Op);
3142     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3143     ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
3144         GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj,
3145         UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier,
3146         /*AddCurrentAddress=*/UseGOT_PREL);
3147     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3148     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3149     SDValue Result = DAG.getLoad(
3150         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3151         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3152     SDValue Chain = Result.getValue(1);
3153     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3154     Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3155     if (UseGOT_PREL)
3156       Result =
3157           DAG.getLoad(PtrVT, dl, Chain, Result,
3158                       MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3159     return Result;
3160   } else if (Subtarget->isROPI() && IsRO) {
3161     // PC-relative.
3162     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
3163     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3164     return Result;
3165   } else if (Subtarget->isRWPI() && !IsRO) {
3166     // SB-relative.
3167     SDValue RelAddr;
3168     if (Subtarget->useMovt(DAG.getMachineFunction())) {
3169       ++NumMovwMovt;
3170       SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL);
3171       RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G);
3172     } else { // use literal pool for address constant
3173       ARMConstantPoolValue *CPV =
3174         ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
3175       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3176       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3177       RelAddr = DAG.getLoad(
3178           PtrVT, dl, DAG.getEntryNode(), CPAddr,
3179           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3180     }
3181     SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
3182     SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr);
3183     return Result;
3184   }
3185 
3186   // If we have T2 ops, we can materialize the address directly via movt/movw
3187   // pair. This is always cheaper.
3188   if (Subtarget->useMovt(DAG.getMachineFunction())) {
3189     ++NumMovwMovt;
3190     // FIXME: Once remat is capable of dealing with instructions with register
3191     // operands, expand this into two nodes.
3192     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
3193                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
3194   } else {
3195     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
3196     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3197     return DAG.getLoad(
3198         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3199         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3200   }
3201 }
3202 
3203 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
3204                                                     SelectionDAG &DAG) const {
3205   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3206          "ROPI/RWPI not currently supported for Darwin");
3207   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3208   SDLoc dl(Op);
3209   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3210 
3211   if (Subtarget->useMovt(DAG.getMachineFunction()))
3212     ++NumMovwMovt;
3213 
3214   // FIXME: Once remat is capable of dealing with instructions with register
3215   // operands, expand this into multiple nodes
3216   unsigned Wrapper =
3217       isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
3218 
3219   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
3220   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
3221 
3222   if (Subtarget->isGVIndirectSymbol(GV))
3223     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3224                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3225   return Result;
3226 }
3227 
3228 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
3229                                                      SelectionDAG &DAG) const {
3230   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
3231   assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
3232          "Windows on ARM expects to use movw/movt");
3233   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3234          "ROPI/RWPI not currently supported for Windows");
3235 
3236   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3237   const ARMII::TOF TargetFlags =
3238     (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
3239   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3240   SDValue Result;
3241   SDLoc DL(Op);
3242 
3243   ++NumMovwMovt;
3244 
3245   // FIXME: Once remat is capable of dealing with instructions with register
3246   // operands, expand this into two nodes.
3247   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
3248                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
3249                                                   TargetFlags));
3250   if (GV->hasDLLImportStorageClass())
3251     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3252                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3253   return Result;
3254 }
3255 
3256 SDValue
3257 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
3258   SDLoc dl(Op);
3259   SDValue Val = DAG.getConstant(0, dl, MVT::i32);
3260   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
3261                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
3262                      Op.getOperand(1), Val);
3263 }
3264 
3265 SDValue
3266 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
3267   SDLoc dl(Op);
3268   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
3269                      Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
3270 }
3271 
3272 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
3273                                                       SelectionDAG &DAG) const {
3274   SDLoc dl(Op);
3275   return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
3276                      Op.getOperand(0));
3277 }
3278 
3279 SDValue
3280 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
3281                                           const ARMSubtarget *Subtarget) const {
3282   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3283   SDLoc dl(Op);
3284   switch (IntNo) {
3285   default: return SDValue();    // Don't custom lower most intrinsics.
3286   case Intrinsic::thread_pointer: {
3287     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3288     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3289   }
3290   case Intrinsic::eh_sjlj_lsda: {
3291     MachineFunction &MF = DAG.getMachineFunction();
3292     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3293     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3294     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3295     SDValue CPAddr;
3296     bool IsPositionIndependent = isPositionIndependent();
3297     unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
3298     ARMConstantPoolValue *CPV =
3299       ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
3300                                       ARMCP::CPLSDA, PCAdj);
3301     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3302     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3303     SDValue Result = DAG.getLoad(
3304         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3305         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3306 
3307     if (IsPositionIndependent) {
3308       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3309       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3310     }
3311     return Result;
3312   }
3313   case Intrinsic::arm_neon_vabs:
3314     return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
3315                         Op.getOperand(1));
3316   case Intrinsic::arm_neon_vmulls:
3317   case Intrinsic::arm_neon_vmullu: {
3318     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3319       ? ARMISD::VMULLs : ARMISD::VMULLu;
3320     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3321                        Op.getOperand(1), Op.getOperand(2));
3322   }
3323   case Intrinsic::arm_neon_vminnm:
3324   case Intrinsic::arm_neon_vmaxnm: {
3325     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3326       ? ISD::FMINNUM : ISD::FMAXNUM;
3327     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3328                        Op.getOperand(1), Op.getOperand(2));
3329   }
3330   case Intrinsic::arm_neon_vminu:
3331   case Intrinsic::arm_neon_vmaxu: {
3332     if (Op.getValueType().isFloatingPoint())
3333       return SDValue();
3334     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3335       ? ISD::UMIN : ISD::UMAX;
3336     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3337                          Op.getOperand(1), Op.getOperand(2));
3338   }
3339   case Intrinsic::arm_neon_vmins:
3340   case Intrinsic::arm_neon_vmaxs: {
3341     // v{min,max}s is overloaded between signed integers and floats.
3342     if (!Op.getValueType().isFloatingPoint()) {
3343       unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3344         ? ISD::SMIN : ISD::SMAX;
3345       return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3346                          Op.getOperand(1), Op.getOperand(2));
3347     }
3348     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3349       ? ISD::FMINNAN : ISD::FMAXNAN;
3350     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3351                        Op.getOperand(1), Op.getOperand(2));
3352   }
3353   case Intrinsic::arm_neon_vtbl1:
3354     return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(),
3355                        Op.getOperand(1), Op.getOperand(2));
3356   case Intrinsic::arm_neon_vtbl2:
3357     return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(),
3358                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3359   }
3360 }
3361 
3362 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
3363                                  const ARMSubtarget *Subtarget) {
3364   SDLoc dl(Op);
3365   ConstantSDNode *ScopeN = cast<ConstantSDNode>(Op.getOperand(2));
3366   auto Scope = static_cast<SynchronizationScope>(ScopeN->getZExtValue());
3367   if (Scope == SynchronizationScope::SingleThread)
3368     return Op;
3369 
3370   if (!Subtarget->hasDataBarrier()) {
3371     // Some ARMv6 cpus can support data barriers with an mcr instruction.
3372     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
3373     // here.
3374     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
3375            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3376     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
3377                        DAG.getConstant(0, dl, MVT::i32));
3378   }
3379 
3380   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
3381   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
3382   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
3383   if (Subtarget->isMClass()) {
3384     // Only a full system barrier exists in the M-class architectures.
3385     Domain = ARM_MB::SY;
3386   } else if (Subtarget->preferISHSTBarriers() &&
3387              Ord == AtomicOrdering::Release) {
3388     // Swift happens to implement ISHST barriers in a way that's compatible with
3389     // Release semantics but weaker than ISH so we'd be fools not to use
3390     // it. Beware: other processors probably don't!
3391     Domain = ARM_MB::ISHST;
3392   }
3393 
3394   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
3395                      DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
3396                      DAG.getConstant(Domain, dl, MVT::i32));
3397 }
3398 
3399 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
3400                              const ARMSubtarget *Subtarget) {
3401   // ARM pre v5TE and Thumb1 does not have preload instructions.
3402   if (!(Subtarget->isThumb2() ||
3403         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
3404     // Just preserve the chain.
3405     return Op.getOperand(0);
3406 
3407   SDLoc dl(Op);
3408   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
3409   if (!isRead &&
3410       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
3411     // ARMv7 with MP extension has PLDW.
3412     return Op.getOperand(0);
3413 
3414   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3415   if (Subtarget->isThumb()) {
3416     // Invert the bits.
3417     isRead = ~isRead & 1;
3418     isData = ~isData & 1;
3419   }
3420 
3421   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
3422                      Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
3423                      DAG.getConstant(isData, dl, MVT::i32));
3424 }
3425 
3426 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
3427   MachineFunction &MF = DAG.getMachineFunction();
3428   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
3429 
3430   // vastart just stores the address of the VarArgsFrameIndex slot into the
3431   // memory location argument.
3432   SDLoc dl(Op);
3433   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
3434   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3435   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3436   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3437                       MachinePointerInfo(SV));
3438 }
3439 
3440 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
3441                                                 CCValAssign &NextVA,
3442                                                 SDValue &Root,
3443                                                 SelectionDAG &DAG,
3444                                                 const SDLoc &dl) const {
3445   MachineFunction &MF = DAG.getMachineFunction();
3446   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3447 
3448   const TargetRegisterClass *RC;
3449   if (AFI->isThumb1OnlyFunction())
3450     RC = &ARM::tGPRRegClass;
3451   else
3452     RC = &ARM::GPRRegClass;
3453 
3454   // Transform the arguments stored in physical registers into virtual ones.
3455   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3456   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3457 
3458   SDValue ArgValue2;
3459   if (NextVA.isMemLoc()) {
3460     MachineFrameInfo &MFI = MF.getFrameInfo();
3461     int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);
3462 
3463     // Create load node to retrieve arguments from the stack.
3464     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3465     ArgValue2 = DAG.getLoad(
3466         MVT::i32, dl, Root, FIN,
3467         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3468   } else {
3469     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3470     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3471   }
3472   if (!Subtarget->isLittle())
3473     std::swap (ArgValue, ArgValue2);
3474   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
3475 }
3476 
3477 // The remaining GPRs hold either the beginning of variable-argument
3478 // data, or the beginning of an aggregate passed by value (usually
3479 // byval).  Either way, we allocate stack slots adjacent to the data
3480 // provided by our caller, and store the unallocated registers there.
3481 // If this is a variadic function, the va_list pointer will begin with
3482 // these values; otherwise, this reassembles a (byval) structure that
3483 // was split between registers and memory.
3484 // Return: The frame index registers were stored into.
3485 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
3486                                       const SDLoc &dl, SDValue &Chain,
3487                                       const Value *OrigArg,
3488                                       unsigned InRegsParamRecordIdx,
3489                                       int ArgOffset, unsigned ArgSize) const {
3490   // Currently, two use-cases possible:
3491   // Case #1. Non-var-args function, and we meet first byval parameter.
3492   //          Setup first unallocated register as first byval register;
3493   //          eat all remained registers
3494   //          (these two actions are performed by HandleByVal method).
3495   //          Then, here, we initialize stack frame with
3496   //          "store-reg" instructions.
3497   // Case #2. Var-args function, that doesn't contain byval parameters.
3498   //          The same: eat all remained unallocated registers,
3499   //          initialize stack frame.
3500 
3501   MachineFunction &MF = DAG.getMachineFunction();
3502   MachineFrameInfo &MFI = MF.getFrameInfo();
3503   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3504   unsigned RBegin, REnd;
3505   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
3506     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
3507   } else {
3508     unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3509     RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
3510     REnd = ARM::R4;
3511   }
3512 
3513   if (REnd != RBegin)
3514     ArgOffset = -4 * (ARM::R4 - RBegin);
3515 
3516   auto PtrVT = getPointerTy(DAG.getDataLayout());
3517   int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
3518   SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);
3519 
3520   SmallVector<SDValue, 4> MemOps;
3521   const TargetRegisterClass *RC =
3522       AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
3523 
3524   for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
3525     unsigned VReg = MF.addLiveIn(Reg, RC);
3526     SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
3527     SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3528                                  MachinePointerInfo(OrigArg, 4 * i));
3529     MemOps.push_back(Store);
3530     FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
3531   }
3532 
3533   if (!MemOps.empty())
3534     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3535   return FrameIndex;
3536 }
3537 
3538 // Setup stack frame, the va_list pointer will start from.
3539 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
3540                                              const SDLoc &dl, SDValue &Chain,
3541                                              unsigned ArgOffset,
3542                                              unsigned TotalArgRegsSaveSize,
3543                                              bool ForceMutable) const {
3544   MachineFunction &MF = DAG.getMachineFunction();
3545   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3546 
3547   // Try to store any remaining integer argument regs
3548   // to their spots on the stack so that they may be loaded by dereferencing
3549   // the result of va_next.
3550   // If there is no regs to be stored, just point address after last
3551   // argument passed via stack.
3552   int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
3553                                   CCInfo.getInRegsParamsCount(),
3554                                   CCInfo.getNextStackOffset(), 4);
3555   AFI->setVarArgsFrameIndex(FrameIndex);
3556 }
3557 
3558 SDValue ARMTargetLowering::LowerFormalArguments(
3559     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3560     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3561     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3562   MachineFunction &MF = DAG.getMachineFunction();
3563   MachineFrameInfo &MFI = MF.getFrameInfo();
3564 
3565   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3566 
3567   // Assign locations to all of the incoming arguments.
3568   SmallVector<CCValAssign, 16> ArgLocs;
3569   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3570                  *DAG.getContext());
3571   CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
3572 
3573   SmallVector<SDValue, 16> ArgValues;
3574   SDValue ArgValue;
3575   Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
3576   unsigned CurArgIdx = 0;
3577 
3578   // Initially ArgRegsSaveSize is zero.
3579   // Then we increase this value each time we meet byval parameter.
3580   // We also increase this value in case of varargs function.
3581   AFI->setArgRegsSaveSize(0);
3582 
3583   // Calculate the amount of stack space that we need to allocate to store
3584   // byval and variadic arguments that are passed in registers.
3585   // We need to know this before we allocate the first byval or variadic
3586   // argument, as they will be allocated a stack slot below the CFA (Canonical
3587   // Frame Address, the stack pointer at entry to the function).
3588   unsigned ArgRegBegin = ARM::R4;
3589   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3590     if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
3591       break;
3592 
3593     CCValAssign &VA = ArgLocs[i];
3594     unsigned Index = VA.getValNo();
3595     ISD::ArgFlagsTy Flags = Ins[Index].Flags;
3596     if (!Flags.isByVal())
3597       continue;
3598 
3599     assert(VA.isMemLoc() && "unexpected byval pointer in reg");
3600     unsigned RBegin, REnd;
3601     CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
3602     ArgRegBegin = std::min(ArgRegBegin, RBegin);
3603 
3604     CCInfo.nextInRegsParam();
3605   }
3606   CCInfo.rewindByValRegsInfo();
3607 
3608   int lastInsIndex = -1;
3609   if (isVarArg && MFI.hasVAStart()) {
3610     unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3611     if (RegIdx != array_lengthof(GPRArgRegs))
3612       ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
3613   }
3614 
3615   unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
3616   AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
3617   auto PtrVT = getPointerTy(DAG.getDataLayout());
3618 
3619   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3620     CCValAssign &VA = ArgLocs[i];
3621     if (Ins[VA.getValNo()].isOrigArg()) {
3622       std::advance(CurOrigArg,
3623                    Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
3624       CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
3625     }
3626     // Arguments stored in registers.
3627     if (VA.isRegLoc()) {
3628       EVT RegVT = VA.getLocVT();
3629 
3630       if (VA.needsCustom()) {
3631         // f64 and vector types are split up into multiple registers or
3632         // combinations of registers and stack slots.
3633         if (VA.getLocVT() == MVT::v2f64) {
3634           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3635                                                    Chain, DAG, dl);
3636           VA = ArgLocs[++i]; // skip ahead to next loc
3637           SDValue ArgValue2;
3638           if (VA.isMemLoc()) {
3639             int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
3640             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3641             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
3642                                     MachinePointerInfo::getFixedStack(
3643                                         DAG.getMachineFunction(), FI));
3644           } else {
3645             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3646                                              Chain, DAG, dl);
3647           }
3648           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
3649           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3650                                  ArgValue, ArgValue1,
3651                                  DAG.getIntPtrConstant(0, dl));
3652           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3653                                  ArgValue, ArgValue2,
3654                                  DAG.getIntPtrConstant(1, dl));
3655         } else
3656           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3657 
3658       } else {
3659         const TargetRegisterClass *RC;
3660 
3661         if (RegVT == MVT::f32)
3662           RC = &ARM::SPRRegClass;
3663         else if (RegVT == MVT::f64)
3664           RC = &ARM::DPRRegClass;
3665         else if (RegVT == MVT::v2f64)
3666           RC = &ARM::QPRRegClass;
3667         else if (RegVT == MVT::i32)
3668           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
3669                                            : &ARM::GPRRegClass;
3670         else
3671           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
3672 
3673         // Transform the arguments in physical registers into virtual ones.
3674         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3675         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3676       }
3677 
3678       // If this is an 8 or 16-bit value, it is really passed promoted
3679       // to 32 bits.  Insert an assert[sz]ext to capture this, then
3680       // truncate to the right size.
3681       switch (VA.getLocInfo()) {
3682       default: llvm_unreachable("Unknown loc info!");
3683       case CCValAssign::Full: break;
3684       case CCValAssign::BCvt:
3685         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
3686         break;
3687       case CCValAssign::SExt:
3688         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3689                                DAG.getValueType(VA.getValVT()));
3690         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3691         break;
3692       case CCValAssign::ZExt:
3693         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3694                                DAG.getValueType(VA.getValVT()));
3695         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3696         break;
3697       }
3698 
3699       InVals.push_back(ArgValue);
3700 
3701     } else { // VA.isRegLoc()
3702       // sanity check
3703       assert(VA.isMemLoc());
3704       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
3705 
3706       int index = VA.getValNo();
3707 
3708       // Some Ins[] entries become multiple ArgLoc[] entries.
3709       // Process them only once.
3710       if (index != lastInsIndex)
3711         {
3712           ISD::ArgFlagsTy Flags = Ins[index].Flags;
3713           // FIXME: For now, all byval parameter objects are marked mutable.
3714           // This can be changed with more analysis.
3715           // In case of tail call optimization mark all arguments mutable.
3716           // Since they could be overwritten by lowering of arguments in case of
3717           // a tail call.
3718           if (Flags.isByVal()) {
3719             assert(Ins[index].isOrigArg() &&
3720                    "Byval arguments cannot be implicit");
3721             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
3722 
3723             int FrameIndex = StoreByValRegs(
3724                 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
3725                 VA.getLocMemOffset(), Flags.getByValSize());
3726             InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
3727             CCInfo.nextInRegsParam();
3728           } else {
3729             unsigned FIOffset = VA.getLocMemOffset();
3730             int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
3731                                            FIOffset, true);
3732 
3733             // Create load nodes to retrieve arguments from the stack.
3734             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3735             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
3736                                          MachinePointerInfo::getFixedStack(
3737                                              DAG.getMachineFunction(), FI)));
3738           }
3739           lastInsIndex = index;
3740         }
3741     }
3742   }
3743 
3744   // varargs
3745   if (isVarArg && MFI.hasVAStart())
3746     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3747                          CCInfo.getNextStackOffset(),
3748                          TotalArgRegsSaveSize);
3749 
3750   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
3751 
3752   return Chain;
3753 }
3754 
3755 /// isFloatingPointZero - Return true if this is +0.0.
3756 static bool isFloatingPointZero(SDValue Op) {
3757   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
3758     return CFP->getValueAPF().isPosZero();
3759   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
3760     // Maybe this has already been legalized into the constant pool?
3761     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
3762       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
3763       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
3764         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
3765           return CFP->getValueAPF().isPosZero();
3766     }
3767   } else if (Op->getOpcode() == ISD::BITCAST &&
3768              Op->getValueType(0) == MVT::f64) {
3769     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
3770     // created by LowerConstantFP().
3771     SDValue BitcastOp = Op->getOperand(0);
3772     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
3773         isNullConstant(BitcastOp->getOperand(0)))
3774       return true;
3775   }
3776   return false;
3777 }
3778 
3779 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
3780 /// the given operands.
3781 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3782                                      SDValue &ARMcc, SelectionDAG &DAG,
3783                                      const SDLoc &dl) const {
3784   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
3785     unsigned C = RHSC->getZExtValue();
3786     if (!isLegalICmpImmediate(C)) {
3787       // Constant does not fit, try adjusting it by one?
3788       switch (CC) {
3789       default: break;
3790       case ISD::SETLT:
3791       case ISD::SETGE:
3792         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
3793           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
3794           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
3795         }
3796         break;
3797       case ISD::SETULT:
3798       case ISD::SETUGE:
3799         if (C != 0 && isLegalICmpImmediate(C-1)) {
3800           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
3801           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
3802         }
3803         break;
3804       case ISD::SETLE:
3805       case ISD::SETGT:
3806         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
3807           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
3808           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
3809         }
3810         break;
3811       case ISD::SETULE:
3812       case ISD::SETUGT:
3813         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
3814           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3815           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
3816         }
3817         break;
3818       }
3819     }
3820   }
3821 
3822   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
3823   ARMISD::NodeType CompareType;
3824   switch (CondCode) {
3825   default:
3826     CompareType = ARMISD::CMP;
3827     break;
3828   case ARMCC::EQ:
3829   case ARMCC::NE:
3830     // Uses only Z Flag
3831     CompareType = ARMISD::CMPZ;
3832     break;
3833   }
3834   ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
3835   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
3836 }
3837 
3838 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
3839 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
3840                                      SelectionDAG &DAG, const SDLoc &dl,
3841                                      bool InvalidOnQNaN) const {
3842   assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
3843   SDValue Cmp;
3844   SDValue C = DAG.getConstant(InvalidOnQNaN, dl, MVT::i32);
3845   if (!isFloatingPointZero(RHS))
3846     Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS, C);
3847   else
3848     Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS, C);
3849   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
3850 }
3851 
3852 /// duplicateCmp - Glue values can have only one use, so this function
3853 /// duplicates a comparison node.
3854 SDValue
3855 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
3856   unsigned Opc = Cmp.getOpcode();
3857   SDLoc DL(Cmp);
3858   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
3859     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
3860 
3861   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
3862   Cmp = Cmp.getOperand(0);
3863   Opc = Cmp.getOpcode();
3864   if (Opc == ARMISD::CMPFP)
3865     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),
3866                       Cmp.getOperand(1), Cmp.getOperand(2));
3867   else {
3868     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
3869     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),
3870                       Cmp.getOperand(1));
3871   }
3872   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
3873 }
3874 
3875 std::pair<SDValue, SDValue>
3876 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
3877                                  SDValue &ARMcc) const {
3878   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
3879 
3880   SDValue Value, OverflowCmp;
3881   SDValue LHS = Op.getOperand(0);
3882   SDValue RHS = Op.getOperand(1);
3883   SDLoc dl(Op);
3884 
3885   // FIXME: We are currently always generating CMPs because we don't support
3886   // generating CMN through the backend. This is not as good as the natural
3887   // CMP case because it causes a register dependency and cannot be folded
3888   // later.
3889 
3890   switch (Op.getOpcode()) {
3891   default:
3892     llvm_unreachable("Unknown overflow instruction!");
3893   case ISD::SADDO:
3894     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
3895     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
3896     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
3897     break;
3898   case ISD::UADDO:
3899     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
3900     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
3901     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
3902     break;
3903   case ISD::SSUBO:
3904     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
3905     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
3906     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
3907     break;
3908   case ISD::USUBO:
3909     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
3910     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
3911     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
3912     break;
3913   } // switch (...)
3914 
3915   return std::make_pair(Value, OverflowCmp);
3916 }
3917 
3918 SDValue
3919 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
3920   // Let legalize expand this if it isn't a legal type yet.
3921   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
3922     return SDValue();
3923 
3924   SDValue Value, OverflowCmp;
3925   SDValue ARMcc;
3926   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
3927   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3928   SDLoc dl(Op);
3929   // We use 0 and 1 as false and true values.
3930   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3931   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3932   EVT VT = Op.getValueType();
3933 
3934   SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
3935                                  ARMcc, CCR, OverflowCmp);
3936 
3937   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
3938   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
3939 }
3940 
3941 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3942   SDValue Cond = Op.getOperand(0);
3943   SDValue SelectTrue = Op.getOperand(1);
3944   SDValue SelectFalse = Op.getOperand(2);
3945   SDLoc dl(Op);
3946   unsigned Opc = Cond.getOpcode();
3947 
3948   if (Cond.getResNo() == 1 &&
3949       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
3950        Opc == ISD::USUBO)) {
3951     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
3952       return SDValue();
3953 
3954     SDValue Value, OverflowCmp;
3955     SDValue ARMcc;
3956     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
3957     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3958     EVT VT = Op.getValueType();
3959 
3960     return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
3961                    OverflowCmp, DAG);
3962   }
3963 
3964   // Convert:
3965   //
3966   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
3967   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
3968   //
3969   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
3970     const ConstantSDNode *CMOVTrue =
3971       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
3972     const ConstantSDNode *CMOVFalse =
3973       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3974 
3975     if (CMOVTrue && CMOVFalse) {
3976       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
3977       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
3978 
3979       SDValue True;
3980       SDValue False;
3981       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
3982         True = SelectTrue;
3983         False = SelectFalse;
3984       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
3985         True = SelectFalse;
3986         False = SelectTrue;
3987       }
3988 
3989       if (True.getNode() && False.getNode()) {
3990         EVT VT = Op.getValueType();
3991         SDValue ARMcc = Cond.getOperand(2);
3992         SDValue CCR = Cond.getOperand(3);
3993         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
3994         assert(True.getValueType() == VT);
3995         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
3996       }
3997     }
3998   }
3999 
4000   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
4001   // undefined bits before doing a full-word comparison with zero.
4002   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
4003                      DAG.getConstant(1, dl, Cond.getValueType()));
4004 
4005   return DAG.getSelectCC(dl, Cond,
4006                          DAG.getConstant(0, dl, Cond.getValueType()),
4007                          SelectTrue, SelectFalse, ISD::SETNE);
4008 }
4009 
4010 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
4011                                  bool &swpCmpOps, bool &swpVselOps) {
4012   // Start by selecting the GE condition code for opcodes that return true for
4013   // 'equality'
4014   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
4015       CC == ISD::SETULE)
4016     CondCode = ARMCC::GE;
4017 
4018   // and GT for opcodes that return false for 'equality'.
4019   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
4020            CC == ISD::SETULT)
4021     CondCode = ARMCC::GT;
4022 
4023   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
4024   // to swap the compare operands.
4025   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
4026       CC == ISD::SETULT)
4027     swpCmpOps = true;
4028 
4029   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
4030   // If we have an unordered opcode, we need to swap the operands to the VSEL
4031   // instruction (effectively negating the condition).
4032   //
4033   // This also has the effect of swapping which one of 'less' or 'greater'
4034   // returns true, so we also swap the compare operands. It also switches
4035   // whether we return true for 'equality', so we compensate by picking the
4036   // opposite condition code to our original choice.
4037   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
4038       CC == ISD::SETUGT) {
4039     swpCmpOps = !swpCmpOps;
4040     swpVselOps = !swpVselOps;
4041     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
4042   }
4043 
4044   // 'ordered' is 'anything but unordered', so use the VS condition code and
4045   // swap the VSEL operands.
4046   if (CC == ISD::SETO) {
4047     CondCode = ARMCC::VS;
4048     swpVselOps = true;
4049   }
4050 
4051   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
4052   // code and swap the VSEL operands.
4053   if (CC == ISD::SETUNE) {
4054     CondCode = ARMCC::EQ;
4055     swpVselOps = true;
4056   }
4057 }
4058 
4059 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
4060                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
4061                                    SDValue Cmp, SelectionDAG &DAG) const {
4062   if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
4063     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4064                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
4065     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4066                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
4067 
4068     SDValue TrueLow = TrueVal.getValue(0);
4069     SDValue TrueHigh = TrueVal.getValue(1);
4070     SDValue FalseLow = FalseVal.getValue(0);
4071     SDValue FalseHigh = FalseVal.getValue(1);
4072 
4073     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
4074                               ARMcc, CCR, Cmp);
4075     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
4076                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
4077 
4078     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
4079   } else {
4080     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
4081                        Cmp);
4082   }
4083 }
4084 
4085 static bool isGTorGE(ISD::CondCode CC) {
4086   return CC == ISD::SETGT || CC == ISD::SETGE;
4087 }
4088 
4089 static bool isLTorLE(ISD::CondCode CC) {
4090   return CC == ISD::SETLT || CC == ISD::SETLE;
4091 }
4092 
4093 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
4094 // All of these conditions (and their <= and >= counterparts) will do:
4095 //          x < k ? k : x
4096 //          x > k ? x : k
4097 //          k < x ? x : k
4098 //          k > x ? k : x
4099 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
4100                             const SDValue TrueVal, const SDValue FalseVal,
4101                             const ISD::CondCode CC, const SDValue K) {
4102   return (isGTorGE(CC) &&
4103           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4104          (isLTorLE(CC) &&
4105           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4106 }
4107 
4108 // Similar to isLowerSaturate(), but checks for upper-saturating conditions.
4109 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS,
4110                             const SDValue TrueVal, const SDValue FalseVal,
4111                             const ISD::CondCode CC, const SDValue K) {
4112   return (isGTorGE(CC) &&
4113           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4114          (isLTorLE(CC) &&
4115           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4116 }
4117 
4118 // Check if two chained conditionals could be converted into SSAT.
4119 //
4120 // SSAT can replace a set of two conditional selectors that bound a number to an
4121 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
4122 //
4123 //     x < -k ? -k : (x > k ? k : x)
4124 //     x < -k ? -k : (x < k ? x : k)
4125 //     x > -k ? (x > k ? k : x) : -k
4126 //     x < k ? (x < -k ? -k : x) : k
4127 //     etc.
4128 //
4129 // It returns true if the conversion can be done, false otherwise.
4130 // Additionally, the variable is returned in parameter V and the constant in K.
4131 static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
4132                                     uint64_t &K) {
4133   SDValue LHS1 = Op.getOperand(0);
4134   SDValue RHS1 = Op.getOperand(1);
4135   SDValue TrueVal1 = Op.getOperand(2);
4136   SDValue FalseVal1 = Op.getOperand(3);
4137   ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4138 
4139   const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4140   if (Op2.getOpcode() != ISD::SELECT_CC)
4141     return false;
4142 
4143   SDValue LHS2 = Op2.getOperand(0);
4144   SDValue RHS2 = Op2.getOperand(1);
4145   SDValue TrueVal2 = Op2.getOperand(2);
4146   SDValue FalseVal2 = Op2.getOperand(3);
4147   ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
4148 
4149   // Find out which are the constants and which are the variables
4150   // in each conditional
4151   SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4152                                                         ? &RHS1
4153                                                         : nullptr;
4154   SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4155                                                         ? &RHS2
4156                                                         : nullptr;
4157   SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4158   SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4159   SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4160   SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4161 
4162   // We must detect cases where the original operations worked with 16- or
4163   // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
4164   // must work with sign-extended values but the select operations return
4165   // the original non-extended value.
4166   SDValue V2TmpReg = V2Tmp;
4167   if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG)
4168     V2TmpReg = V2Tmp->getOperand(0);
4169 
4170   // Check that the registers and the constants have the correct values
4171   // in both conditionals
4172   if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4173       V2TmpReg != V2)
4174     return false;
4175 
4176   // Figure out which conditional is saturating the lower/upper bound.
4177   const SDValue *LowerCheckOp =
4178       isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4179           ? &Op
4180           : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
4181                 ? &Op2
4182                 : nullptr;
4183   const SDValue *UpperCheckOp =
4184       isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4185           ? &Op
4186           : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
4187                 ? &Op2
4188                 : nullptr;
4189 
4190   if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4191     return false;
4192 
4193   // Check that the constant in the lower-bound check is
4194   // the opposite of the constant in the upper-bound check
4195   // in 1's complement.
4196   int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4197   int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4198   int64_t PosVal = std::max(Val1, Val2);
4199 
4200   if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4201        (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4202       Val1 == ~Val2 && isPowerOf2_64(PosVal + 1)) {
4203 
4204     V = V2;
4205     K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive
4206     return true;
4207   }
4208 
4209   return false;
4210 }
4211 
4212 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
4213   EVT VT = Op.getValueType();
4214   SDLoc dl(Op);
4215 
4216   // Try to convert two saturating conditional selects into a single SSAT
4217   SDValue SatValue;
4218   uint64_t SatConstant;
4219   if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) &&
4220       isSaturatingConditional(Op, SatValue, SatConstant))
4221     return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
4222                        DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
4223 
4224   SDValue LHS = Op.getOperand(0);
4225   SDValue RHS = Op.getOperand(1);
4226   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4227   SDValue TrueVal = Op.getOperand(2);
4228   SDValue FalseVal = Op.getOperand(3);
4229 
4230   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
4231     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
4232                                                     dl);
4233 
4234     // If softenSetCCOperands only returned one value, we should compare it to
4235     // zero.
4236     if (!RHS.getNode()) {
4237       RHS = DAG.getConstant(0, dl, LHS.getValueType());
4238       CC = ISD::SETNE;
4239     }
4240   }
4241 
4242   if (LHS.getValueType() == MVT::i32) {
4243     // Try to generate VSEL on ARMv8.
4244     // The VSEL instruction can't use all the usual ARM condition
4245     // codes: it only has two bits to select the condition code, so it's
4246     // constrained to use only GE, GT, VS and EQ.
4247     //
4248     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
4249     // swap the operands of the previous compare instruction (effectively
4250     // inverting the compare condition, swapping 'less' and 'greater') and
4251     // sometimes need to swap the operands to the VSEL (which inverts the
4252     // condition in the sense of firing whenever the previous condition didn't)
4253     if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
4254                                     TrueVal.getValueType() == MVT::f64)) {
4255       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4256       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
4257           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
4258         CC = ISD::getSetCCInverse(CC, true);
4259         std::swap(TrueVal, FalseVal);
4260       }
4261     }
4262 
4263     SDValue ARMcc;
4264     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4265     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4266     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4267   }
4268 
4269   ARMCC::CondCodes CondCode, CondCode2;
4270   bool InvalidOnQNaN;
4271   FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4272 
4273   // Try to generate VMAXNM/VMINNM on ARMv8.
4274   if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
4275                                   TrueVal.getValueType() == MVT::f64)) {
4276     bool swpCmpOps = false;
4277     bool swpVselOps = false;
4278     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
4279 
4280     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
4281         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
4282       if (swpCmpOps)
4283         std::swap(LHS, RHS);
4284       if (swpVselOps)
4285         std::swap(TrueVal, FalseVal);
4286     }
4287   }
4288 
4289   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4290   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4291   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4292   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4293   if (CondCode2 != ARMCC::AL) {
4294     SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
4295     // FIXME: Needs another CMP because flag can have but one use.
4296     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4297     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
4298   }
4299   return Result;
4300 }
4301 
4302 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
4303 /// to morph to an integer compare sequence.
4304 static bool canChangeToInt(SDValue Op, bool &SeenZero,
4305                            const ARMSubtarget *Subtarget) {
4306   SDNode *N = Op.getNode();
4307   if (!N->hasOneUse())
4308     // Otherwise it requires moving the value from fp to integer registers.
4309     return false;
4310   if (!N->getNumValues())
4311     return false;
4312   EVT VT = Op.getValueType();
4313   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
4314     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
4315     // vmrs are very slow, e.g. cortex-a8.
4316     return false;
4317 
4318   if (isFloatingPointZero(Op)) {
4319     SeenZero = true;
4320     return true;
4321   }
4322   return ISD::isNormalLoad(N);
4323 }
4324 
4325 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
4326   if (isFloatingPointZero(Op))
4327     return DAG.getConstant(0, SDLoc(Op), MVT::i32);
4328 
4329   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
4330     return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
4331                        Ld->getPointerInfo(), Ld->getAlignment(),
4332                        Ld->getMemOperand()->getFlags());
4333 
4334   llvm_unreachable("Unknown VFP cmp argument!");
4335 }
4336 
4337 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
4338                            SDValue &RetVal1, SDValue &RetVal2) {
4339   SDLoc dl(Op);
4340 
4341   if (isFloatingPointZero(Op)) {
4342     RetVal1 = DAG.getConstant(0, dl, MVT::i32);
4343     RetVal2 = DAG.getConstant(0, dl, MVT::i32);
4344     return;
4345   }
4346 
4347   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
4348     SDValue Ptr = Ld->getBasePtr();
4349     RetVal1 =
4350         DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
4351                     Ld->getAlignment(), Ld->getMemOperand()->getFlags());
4352 
4353     EVT PtrType = Ptr.getValueType();
4354     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
4355     SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
4356                                  PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
4357     RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
4358                           Ld->getPointerInfo().getWithOffset(4), NewAlign,
4359                           Ld->getMemOperand()->getFlags());
4360     return;
4361   }
4362 
4363   llvm_unreachable("Unknown VFP cmp argument!");
4364 }
4365 
4366 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
4367 /// f32 and even f64 comparisons to integer ones.
4368 SDValue
4369 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
4370   SDValue Chain = Op.getOperand(0);
4371   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4372   SDValue LHS = Op.getOperand(2);
4373   SDValue RHS = Op.getOperand(3);
4374   SDValue Dest = Op.getOperand(4);
4375   SDLoc dl(Op);
4376 
4377   bool LHSSeenZero = false;
4378   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
4379   bool RHSSeenZero = false;
4380   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
4381   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
4382     // If unsafe fp math optimization is enabled and there are no other uses of
4383     // the CMP operands, and the condition code is EQ or NE, we can optimize it
4384     // to an integer comparison.
4385     if (CC == ISD::SETOEQ)
4386       CC = ISD::SETEQ;
4387     else if (CC == ISD::SETUNE)
4388       CC = ISD::SETNE;
4389 
4390     SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
4391     SDValue ARMcc;
4392     if (LHS.getValueType() == MVT::f32) {
4393       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4394                         bitcastf32Toi32(LHS, DAG), Mask);
4395       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4396                         bitcastf32Toi32(RHS, DAG), Mask);
4397       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4398       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4399       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4400                          Chain, Dest, ARMcc, CCR, Cmp);
4401     }
4402 
4403     SDValue LHS1, LHS2;
4404     SDValue RHS1, RHS2;
4405     expandf64Toi32(LHS, DAG, LHS1, LHS2);
4406     expandf64Toi32(RHS, DAG, RHS1, RHS2);
4407     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
4408     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
4409     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4410     ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4411     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4412     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
4413     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
4414   }
4415 
4416   return SDValue();
4417 }
4418 
4419 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
4420   SDValue Chain = Op.getOperand(0);
4421   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4422   SDValue LHS = Op.getOperand(2);
4423   SDValue RHS = Op.getOperand(3);
4424   SDValue Dest = Op.getOperand(4);
4425   SDLoc dl(Op);
4426 
4427   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
4428     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
4429                                                     dl);
4430 
4431     // If softenSetCCOperands only returned one value, we should compare it to
4432     // zero.
4433     if (!RHS.getNode()) {
4434       RHS = DAG.getConstant(0, dl, LHS.getValueType());
4435       CC = ISD::SETNE;
4436     }
4437   }
4438 
4439   if (LHS.getValueType() == MVT::i32) {
4440     SDValue ARMcc;
4441     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4442     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4443     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4444                        Chain, Dest, ARMcc, CCR, Cmp);
4445   }
4446 
4447   assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
4448 
4449   if (getTargetMachine().Options.UnsafeFPMath &&
4450       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
4451        CC == ISD::SETNE || CC == ISD::SETUNE)) {
4452     if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
4453       return Result;
4454   }
4455 
4456   ARMCC::CondCodes CondCode, CondCode2;
4457   bool InvalidOnQNaN;
4458   FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4459 
4460   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4461   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4462   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4463   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4464   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
4465   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4466   if (CondCode2 != ARMCC::AL) {
4467     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
4468     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
4469     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4470   }
4471   return Res;
4472 }
4473 
4474 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
4475   SDValue Chain = Op.getOperand(0);
4476   SDValue Table = Op.getOperand(1);
4477   SDValue Index = Op.getOperand(2);
4478   SDLoc dl(Op);
4479 
4480   EVT PTy = getPointerTy(DAG.getDataLayout());
4481   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
4482   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
4483   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
4484   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
4485   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
4486   if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
4487     // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
4488     // which does another jump to the destination. This also makes it easier
4489     // to translate it to TBB / TBH later (Thumb2 only).
4490     // FIXME: This might not work if the function is extremely large.
4491     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
4492                        Addr, Op.getOperand(2), JTI);
4493   }
4494   if (isPositionIndependent() || Subtarget->isROPI()) {
4495     Addr =
4496         DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
4497                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4498     Chain = Addr.getValue(1);
4499     Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
4500     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4501   } else {
4502     Addr =
4503         DAG.getLoad(PTy, dl, Chain, Addr,
4504                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4505     Chain = Addr.getValue(1);
4506     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4507   }
4508 }
4509 
4510 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
4511   EVT VT = Op.getValueType();
4512   SDLoc dl(Op);
4513 
4514   if (Op.getValueType().getVectorElementType() == MVT::i32) {
4515     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
4516       return Op;
4517     return DAG.UnrollVectorOp(Op.getNode());
4518   }
4519 
4520   assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
4521          "Invalid type for custom lowering!");
4522   if (VT != MVT::v4i16)
4523     return DAG.UnrollVectorOp(Op.getNode());
4524 
4525   Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
4526   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
4527 }
4528 
4529 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
4530   EVT VT = Op.getValueType();
4531   if (VT.isVector())
4532     return LowerVectorFP_TO_INT(Op, DAG);
4533   if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) {
4534     RTLIB::Libcall LC;
4535     if (Op.getOpcode() == ISD::FP_TO_SINT)
4536       LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
4537                               Op.getValueType());
4538     else
4539       LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
4540                               Op.getValueType());
4541     return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
4542                        /*isSigned*/ false, SDLoc(Op)).first;
4543   }
4544 
4545   return Op;
4546 }
4547 
4548 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
4549   EVT VT = Op.getValueType();
4550   SDLoc dl(Op);
4551 
4552   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
4553     if (VT.getVectorElementType() == MVT::f32)
4554       return Op;
4555     return DAG.UnrollVectorOp(Op.getNode());
4556   }
4557 
4558   assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
4559          "Invalid type for custom lowering!");
4560   if (VT != MVT::v4f32)
4561     return DAG.UnrollVectorOp(Op.getNode());
4562 
4563   unsigned CastOpc;
4564   unsigned Opc;
4565   switch (Op.getOpcode()) {
4566   default: llvm_unreachable("Invalid opcode!");
4567   case ISD::SINT_TO_FP:
4568     CastOpc = ISD::SIGN_EXTEND;
4569     Opc = ISD::SINT_TO_FP;
4570     break;
4571   case ISD::UINT_TO_FP:
4572     CastOpc = ISD::ZERO_EXTEND;
4573     Opc = ISD::UINT_TO_FP;
4574     break;
4575   }
4576 
4577   Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
4578   return DAG.getNode(Opc, dl, VT, Op);
4579 }
4580 
4581 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
4582   EVT VT = Op.getValueType();
4583   if (VT.isVector())
4584     return LowerVectorINT_TO_FP(Op, DAG);
4585   if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) {
4586     RTLIB::Libcall LC;
4587     if (Op.getOpcode() == ISD::SINT_TO_FP)
4588       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
4589                               Op.getValueType());
4590     else
4591       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
4592                               Op.getValueType());
4593     return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
4594                        /*isSigned*/ false, SDLoc(Op)).first;
4595   }
4596 
4597   return Op;
4598 }
4599 
4600 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
4601   // Implement fcopysign with a fabs and a conditional fneg.
4602   SDValue Tmp0 = Op.getOperand(0);
4603   SDValue Tmp1 = Op.getOperand(1);
4604   SDLoc dl(Op);
4605   EVT VT = Op.getValueType();
4606   EVT SrcVT = Tmp1.getValueType();
4607   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
4608     Tmp0.getOpcode() == ARMISD::VMOVDRR;
4609   bool UseNEON = !InGPR && Subtarget->hasNEON();
4610 
4611   if (UseNEON) {
4612     // Use VBSL to copy the sign bit.
4613     unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
4614     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
4615                                DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
4616     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
4617     if (VT == MVT::f64)
4618       Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
4619                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
4620                          DAG.getConstant(32, dl, MVT::i32));
4621     else /*if (VT == MVT::f32)*/
4622       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
4623     if (SrcVT == MVT::f32) {
4624       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
4625       if (VT == MVT::f64)
4626         Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
4627                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
4628                            DAG.getConstant(32, dl, MVT::i32));
4629     } else if (VT == MVT::f32)
4630       Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
4631                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
4632                          DAG.getConstant(32, dl, MVT::i32));
4633     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
4634     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
4635 
4636     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
4637                                             dl, MVT::i32);
4638     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
4639     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
4640                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
4641 
4642     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
4643                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
4644                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
4645     if (VT == MVT::f32) {
4646       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
4647       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
4648                         DAG.getConstant(0, dl, MVT::i32));
4649     } else {
4650       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
4651     }
4652 
4653     return Res;
4654   }
4655 
4656   // Bitcast operand 1 to i32.
4657   if (SrcVT == MVT::f64)
4658     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
4659                        Tmp1).getValue(1);
4660   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
4661 
4662   // Or in the signbit with integer operations.
4663   SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
4664   SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
4665   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
4666   if (VT == MVT::f32) {
4667     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
4668                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
4669     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4670                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
4671   }
4672 
4673   // f64: Or the high part with signbit and then combine two parts.
4674   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
4675                      Tmp0);
4676   SDValue Lo = Tmp0.getValue(0);
4677   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
4678   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
4679   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
4680 }
4681 
4682 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
4683   MachineFunction &MF = DAG.getMachineFunction();
4684   MachineFrameInfo &MFI = MF.getFrameInfo();
4685   MFI.setReturnAddressIsTaken(true);
4686 
4687   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
4688     return SDValue();
4689 
4690   EVT VT = Op.getValueType();
4691   SDLoc dl(Op);
4692   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4693   if (Depth) {
4694     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
4695     SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
4696     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
4697                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
4698                        MachinePointerInfo());
4699   }
4700 
4701   // Return LR, which contains the return address. Mark it an implicit live-in.
4702   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
4703   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
4704 }
4705 
4706 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
4707   const ARMBaseRegisterInfo &ARI =
4708     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
4709   MachineFunction &MF = DAG.getMachineFunction();
4710   MachineFrameInfo &MFI = MF.getFrameInfo();
4711   MFI.setFrameAddressIsTaken(true);
4712 
4713   EVT VT = Op.getValueType();
4714   SDLoc dl(Op);  // FIXME probably not meaningful
4715   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4716   unsigned FrameReg = ARI.getFrameRegister(MF);
4717   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
4718   while (Depth--)
4719     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
4720                             MachinePointerInfo());
4721   return FrameAddr;
4722 }
4723 
4724 // FIXME? Maybe this could be a TableGen attribute on some registers and
4725 // this table could be generated automatically from RegInfo.
4726 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT,
4727                                               SelectionDAG &DAG) const {
4728   unsigned Reg = StringSwitch<unsigned>(RegName)
4729                        .Case("sp", ARM::SP)
4730                        .Default(0);
4731   if (Reg)
4732     return Reg;
4733   report_fatal_error(Twine("Invalid register name \""
4734                               + StringRef(RegName)  + "\"."));
4735 }
4736 
4737 // Result is 64 bit value so split into two 32 bit values and return as a
4738 // pair of values.
4739 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
4740                                 SelectionDAG &DAG) {
4741   SDLoc DL(N);
4742 
4743   // This function is only supposed to be called for i64 type destination.
4744   assert(N->getValueType(0) == MVT::i64
4745           && "ExpandREAD_REGISTER called for non-i64 type result.");
4746 
4747   SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
4748                              DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
4749                              N->getOperand(0),
4750                              N->getOperand(1));
4751 
4752   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
4753                     Read.getValue(1)));
4754   Results.push_back(Read.getOperand(0));
4755 }
4756 
4757 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
4758 /// When \p DstVT, the destination type of \p BC, is on the vector
4759 /// register bank and the source of bitcast, \p Op, operates on the same bank,
4760 /// it might be possible to combine them, such that everything stays on the
4761 /// vector register bank.
4762 /// \p return The node that would replace \p BT, if the combine
4763 /// is possible.
4764 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
4765                                                 SelectionDAG &DAG) {
4766   SDValue Op = BC->getOperand(0);
4767   EVT DstVT = BC->getValueType(0);
4768 
4769   // The only vector instruction that can produce a scalar (remember,
4770   // since the bitcast was about to be turned into VMOVDRR, the source
4771   // type is i64) from a vector is EXTRACT_VECTOR_ELT.
4772   // Moreover, we can do this combine only if there is one use.
4773   // Finally, if the destination type is not a vector, there is not
4774   // much point on forcing everything on the vector bank.
4775   if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4776       !Op.hasOneUse())
4777     return SDValue();
4778 
4779   // If the index is not constant, we will introduce an additional
4780   // multiply that will stick.
4781   // Give up in that case.
4782   ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
4783   if (!Index)
4784     return SDValue();
4785   unsigned DstNumElt = DstVT.getVectorNumElements();
4786 
4787   // Compute the new index.
4788   const APInt &APIntIndex = Index->getAPIntValue();
4789   APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
4790   NewIndex *= APIntIndex;
4791   // Check if the new constant index fits into i32.
4792   if (NewIndex.getBitWidth() > 32)
4793     return SDValue();
4794 
4795   // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
4796   // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
4797   SDLoc dl(Op);
4798   SDValue ExtractSrc = Op.getOperand(0);
4799   EVT VecVT = EVT::getVectorVT(
4800       *DAG.getContext(), DstVT.getScalarType(),
4801       ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
4802   SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
4803   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
4804                      DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
4805 }
4806 
4807 /// ExpandBITCAST - If the target supports VFP, this function is called to
4808 /// expand a bit convert where either the source or destination type is i64 to
4809 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
4810 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
4811 /// vectors), since the legalizer won't know what to do with that.
4812 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
4813   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4814   SDLoc dl(N);
4815   SDValue Op = N->getOperand(0);
4816 
4817   // This function is only supposed to be called for i64 types, either as the
4818   // source or destination of the bit convert.
4819   EVT SrcVT = Op.getValueType();
4820   EVT DstVT = N->getValueType(0);
4821   assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
4822          "ExpandBITCAST called for non-i64 type");
4823 
4824   // Turn i64->f64 into VMOVDRR.
4825   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
4826     // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
4827     // if we can combine the bitcast with its source.
4828     if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
4829       return Val;
4830 
4831     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
4832                              DAG.getConstant(0, dl, MVT::i32));
4833     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
4834                              DAG.getConstant(1, dl, MVT::i32));
4835     return DAG.getNode(ISD::BITCAST, dl, DstVT,
4836                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
4837   }
4838 
4839   // Turn f64->i64 into VMOVRRD.
4840   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
4841     SDValue Cvt;
4842     if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
4843         SrcVT.getVectorNumElements() > 1)
4844       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
4845                         DAG.getVTList(MVT::i32, MVT::i32),
4846                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
4847     else
4848       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
4849                         DAG.getVTList(MVT::i32, MVT::i32), Op);
4850     // Merge the pieces into a single i64 value.
4851     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
4852   }
4853 
4854   return SDValue();
4855 }
4856 
4857 /// getZeroVector - Returns a vector of specified type with all zero elements.
4858 /// Zero vectors are used to represent vector negation and in those cases
4859 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
4860 /// not support i64 elements, so sometimes the zero vectors will need to be
4861 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
4862 /// zero vector.
4863 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
4864   assert(VT.isVector() && "Expected a vector type");
4865   // The canonical modified immediate encoding of a zero vector is....0!
4866   SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
4867   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
4868   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
4869   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
4870 }
4871 
4872 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
4873 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
4874 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
4875                                                 SelectionDAG &DAG) const {
4876   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
4877   EVT VT = Op.getValueType();
4878   unsigned VTBits = VT.getSizeInBits();
4879   SDLoc dl(Op);
4880   SDValue ShOpLo = Op.getOperand(0);
4881   SDValue ShOpHi = Op.getOperand(1);
4882   SDValue ShAmt  = Op.getOperand(2);
4883   SDValue ARMcc;
4884   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4885   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
4886 
4887   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
4888 
4889   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
4890                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
4891   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
4892   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
4893                                    DAG.getConstant(VTBits, dl, MVT::i32));
4894   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
4895   SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4896   SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
4897   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4898                             ISD::SETGE, ARMcc, DAG, dl);
4899   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
4900                            ARMcc, CCR, CmpLo);
4901 
4902 
4903   SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
4904   SDValue HiBigShift = Opc == ISD::SRA
4905                            ? DAG.getNode(Opc, dl, VT, ShOpHi,
4906                                          DAG.getConstant(VTBits - 1, dl, VT))
4907                            : DAG.getConstant(0, dl, VT);
4908   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4909                             ISD::SETGE, ARMcc, DAG, dl);
4910   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
4911                            ARMcc, CCR, CmpHi);
4912 
4913   SDValue Ops[2] = { Lo, Hi };
4914   return DAG.getMergeValues(Ops, dl);
4915 }
4916 
4917 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
4918 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
4919 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
4920                                                SelectionDAG &DAG) const {
4921   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
4922   EVT VT = Op.getValueType();
4923   unsigned VTBits = VT.getSizeInBits();
4924   SDLoc dl(Op);
4925   SDValue ShOpLo = Op.getOperand(0);
4926   SDValue ShOpHi = Op.getOperand(1);
4927   SDValue ShAmt  = Op.getOperand(2);
4928   SDValue ARMcc;
4929   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4930 
4931   assert(Op.getOpcode() == ISD::SHL_PARTS);
4932   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
4933                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
4934   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
4935   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
4936   SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4937 
4938   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
4939                                    DAG.getConstant(VTBits, dl, MVT::i32));
4940   SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
4941   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4942                             ISD::SETGE, ARMcc, DAG, dl);
4943   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
4944                            ARMcc, CCR, CmpHi);
4945 
4946   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4947                           ISD::SETGE, ARMcc, DAG, dl);
4948   SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
4949   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
4950                            DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
4951 
4952   SDValue Ops[2] = { Lo, Hi };
4953   return DAG.getMergeValues(Ops, dl);
4954 }
4955 
4956 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
4957                                             SelectionDAG &DAG) const {
4958   // The rounding mode is in bits 23:22 of the FPSCR.
4959   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
4960   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
4961   // so that the shift + and get folded into a bitfield extract.
4962   SDLoc dl(Op);
4963   SDValue Ops[] = { DAG.getEntryNode(),
4964                     DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32) };
4965 
4966   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, MVT::i32, Ops);
4967   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
4968                                   DAG.getConstant(1U << 22, dl, MVT::i32));
4969   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
4970                               DAG.getConstant(22, dl, MVT::i32));
4971   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
4972                      DAG.getConstant(3, dl, MVT::i32));
4973 }
4974 
4975 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
4976                          const ARMSubtarget *ST) {
4977   SDLoc dl(N);
4978   EVT VT = N->getValueType(0);
4979   if (VT.isVector()) {
4980     assert(ST->hasNEON());
4981 
4982     // Compute the least significant set bit: LSB = X & -X
4983     SDValue X = N->getOperand(0);
4984     SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
4985     SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);
4986 
4987     EVT ElemTy = VT.getVectorElementType();
4988 
4989     if (ElemTy == MVT::i8) {
4990       // Compute with: cttz(x) = ctpop(lsb - 1)
4991       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
4992                                 DAG.getTargetConstant(1, dl, ElemTy));
4993       SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
4994       return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
4995     }
4996 
4997     if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
4998         (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
4999       // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
5000       unsigned NumBits = ElemTy.getSizeInBits();
5001       SDValue WidthMinus1 =
5002           DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5003                       DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
5004       SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
5005       return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
5006     }
5007 
5008     // Compute with: cttz(x) = ctpop(lsb - 1)
5009 
5010     // Since we can only compute the number of bits in a byte with vcnt.8, we
5011     // have to gather the result with pairwise addition (vpaddl) for i16, i32,
5012     // and i64.
5013 
5014     // Compute LSB - 1.
5015     SDValue Bits;
5016     if (ElemTy == MVT::i64) {
5017       // Load constant 0xffff'ffff'ffff'ffff to register.
5018       SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5019                                DAG.getTargetConstant(0x1eff, dl, MVT::i32));
5020       Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
5021     } else {
5022       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5023                                 DAG.getTargetConstant(1, dl, ElemTy));
5024       Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5025     }
5026 
5027     // Count #bits with vcnt.8.
5028     EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
5029     SDValue BitsVT8 = DAG.getNode(ISD::BITCAST, dl, VT8Bit, Bits);
5030     SDValue Cnt8 = DAG.getNode(ISD::CTPOP, dl, VT8Bit, BitsVT8);
5031 
5032     // Gather the #bits with vpaddl (pairwise add.)
5033     EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
5034     SDValue Cnt16 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT16Bit,
5035         DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5036         Cnt8);
5037     if (ElemTy == MVT::i16)
5038       return Cnt16;
5039 
5040     EVT VT32Bit = VT.is64BitVector() ? MVT::v2i32 : MVT::v4i32;
5041     SDValue Cnt32 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT32Bit,
5042         DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5043         Cnt16);
5044     if (ElemTy == MVT::i32)
5045       return Cnt32;
5046 
5047     assert(ElemTy == MVT::i64);
5048     SDValue Cnt64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5049         DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5050         Cnt32);
5051     return Cnt64;
5052   }
5053 
5054   if (!ST->hasV6T2Ops())
5055     return SDValue();
5056 
5057   SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
5058   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
5059 }
5060 
5061 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count
5062 /// for each 16-bit element from operand, repeated.  The basic idea is to
5063 /// leverage vcnt to get the 8-bit counts, gather and add the results.
5064 ///
5065 /// Trace for v4i16:
5066 /// input    = [v0    v1    v2    v3   ] (vi 16-bit element)
5067 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element)
5068 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi)
5069 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6]
5070 ///            [b0 b1 b2 b3 b4 b5 b6 b7]
5071 ///           +[b1 b0 b3 b2 b5 b4 b7 b6]
5072 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0,
5073 /// vuzp:    = [k0 k1 k2 k3 k0 k1 k2 k3]  each ki is 8-bits)
5074 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
5075   EVT VT = N->getValueType(0);
5076   SDLoc DL(N);
5077 
5078   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
5079   SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0));
5080   SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0);
5081   SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1);
5082   SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2);
5083   return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3);
5084 }
5085 
5086 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the
5087 /// bit-count for each 16-bit element from the operand.  We need slightly
5088 /// different sequencing for v4i16 and v8i16 to stay within NEON's available
5089 /// 64/128-bit registers.
5090 ///
5091 /// Trace for v4i16:
5092 /// input           = [v0    v1    v2    v3    ] (vi 16-bit element)
5093 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi)
5094 /// v8i16:Extended  = [k0    k1    k2    k3    k0    k1    k2    k3    ]
5095 /// v4i16:Extracted = [k0    k1    k2    k3    ]
5096 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
5097   EVT VT = N->getValueType(0);
5098   SDLoc DL(N);
5099 
5100   SDValue BitCounts = getCTPOP16BitCounts(N, DAG);
5101   if (VT.is64BitVector()) {
5102     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts);
5103     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended,
5104                        DAG.getIntPtrConstant(0, DL));
5105   } else {
5106     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8,
5107                                     BitCounts, DAG.getIntPtrConstant(0, DL));
5108     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted);
5109   }
5110 }
5111 
5112 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the
5113 /// bit-count for each 32-bit element from the operand.  The idea here is
5114 /// to split the vector into 16-bit elements, leverage the 16-bit count
5115 /// routine, and then combine the results.
5116 ///
5117 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged):
5118 /// input    = [v0    v1    ] (vi: 32-bit elements)
5119 /// Bitcast  = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1])
5120 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi)
5121 /// vrev: N0 = [k1 k0 k3 k2 ]
5122 ///            [k0 k1 k2 k3 ]
5123 ///       N1 =+[k1 k0 k3 k2 ]
5124 ///            [k0 k2 k1 k3 ]
5125 ///       N2 =+[k1 k3 k0 k2 ]
5126 ///            [k0    k2    k1    k3    ]
5127 /// Extended =+[k1    k3    k0    k2    ]
5128 ///            [k0    k2    ]
5129 /// Extracted=+[k1    k3    ]
5130 ///
5131 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
5132   EVT VT = N->getValueType(0);
5133   SDLoc DL(N);
5134 
5135   EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
5136 
5137   SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0));
5138   SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG);
5139   SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16);
5140   SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0);
5141   SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1);
5142 
5143   if (VT.is64BitVector()) {
5144     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2);
5145     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended,
5146                        DAG.getIntPtrConstant(0, DL));
5147   } else {
5148     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2,
5149                                     DAG.getIntPtrConstant(0, DL));
5150     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted);
5151   }
5152 }
5153 
5154 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
5155                           const ARMSubtarget *ST) {
5156   EVT VT = N->getValueType(0);
5157 
5158   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
5159   assert((VT == MVT::v2i32 || VT == MVT::v4i32 ||
5160           VT == MVT::v4i16 || VT == MVT::v8i16) &&
5161          "Unexpected type for custom ctpop lowering");
5162 
5163   if (VT.getVectorElementType() == MVT::i32)
5164     return lowerCTPOP32BitElements(N, DAG);
5165   else
5166     return lowerCTPOP16BitElements(N, DAG);
5167 }
5168 
5169 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
5170                           const ARMSubtarget *ST) {
5171   EVT VT = N->getValueType(0);
5172   SDLoc dl(N);
5173 
5174   if (!VT.isVector())
5175     return SDValue();
5176 
5177   // Lower vector shifts on NEON to use VSHL.
5178   assert(ST->hasNEON() && "unexpected vector shift");
5179 
5180   // Left shifts translate directly to the vshiftu intrinsic.
5181   if (N->getOpcode() == ISD::SHL)
5182     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5183                        DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl,
5184                                        MVT::i32),
5185                        N->getOperand(0), N->getOperand(1));
5186 
5187   assert((N->getOpcode() == ISD::SRA ||
5188           N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
5189 
5190   // NEON uses the same intrinsics for both left and right shifts.  For
5191   // right shifts, the shift amounts are negative, so negate the vector of
5192   // shift amounts.
5193   EVT ShiftVT = N->getOperand(1).getValueType();
5194   SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
5195                                      getZeroVector(ShiftVT, DAG, dl),
5196                                      N->getOperand(1));
5197   Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
5198                              Intrinsic::arm_neon_vshifts :
5199                              Intrinsic::arm_neon_vshiftu);
5200   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5201                      DAG.getConstant(vshiftInt, dl, MVT::i32),
5202                      N->getOperand(0), NegatedCount);
5203 }
5204 
5205 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
5206                                 const ARMSubtarget *ST) {
5207   EVT VT = N->getValueType(0);
5208   SDLoc dl(N);
5209 
5210   // We can get here for a node like i32 = ISD::SHL i32, i64
5211   if (VT != MVT::i64)
5212     return SDValue();
5213 
5214   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
5215          "Unknown shift to lower!");
5216 
5217   // We only lower SRA, SRL of 1 here, all others use generic lowering.
5218   if (!isOneConstant(N->getOperand(1)))
5219     return SDValue();
5220 
5221   // If we are in thumb mode, we don't have RRX.
5222   if (ST->isThumb1Only()) return SDValue();
5223 
5224   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
5225   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5226                            DAG.getConstant(0, dl, MVT::i32));
5227   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5228                            DAG.getConstant(1, dl, MVT::i32));
5229 
5230   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
5231   // captures the result into a carry flag.
5232   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
5233   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
5234 
5235   // The low part is an ARMISD::RRX operand, which shifts the carry in.
5236   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
5237 
5238   // Merge the pieces into a single i64 value.
5239  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5240 }
5241 
5242 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
5243   SDValue TmpOp0, TmpOp1;
5244   bool Invert = false;
5245   bool Swap = false;
5246   unsigned Opc = 0;
5247 
5248   SDValue Op0 = Op.getOperand(0);
5249   SDValue Op1 = Op.getOperand(1);
5250   SDValue CC = Op.getOperand(2);
5251   EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
5252   EVT VT = Op.getValueType();
5253   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
5254   SDLoc dl(Op);
5255 
5256   if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
5257       (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
5258     // Special-case integer 64-bit equality comparisons. They aren't legal,
5259     // but they can be lowered with a few vector instructions.
5260     unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
5261     EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
5262     SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
5263     SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
5264     SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
5265                               DAG.getCondCode(ISD::SETEQ));
5266     SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
5267     SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
5268     Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
5269     if (SetCCOpcode == ISD::SETNE)
5270       Merged = DAG.getNOT(dl, Merged, CmpVT);
5271     Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
5272     return Merged;
5273   }
5274 
5275   if (CmpVT.getVectorElementType() == MVT::i64)
5276     // 64-bit comparisons are not legal in general.
5277     return SDValue();
5278 
5279   if (Op1.getValueType().isFloatingPoint()) {
5280     switch (SetCCOpcode) {
5281     default: llvm_unreachable("Illegal FP comparison");
5282     case ISD::SETUNE:
5283     case ISD::SETNE:  Invert = true; LLVM_FALLTHROUGH;
5284     case ISD::SETOEQ:
5285     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
5286     case ISD::SETOLT:
5287     case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
5288     case ISD::SETOGT:
5289     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
5290     case ISD::SETOLE:
5291     case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
5292     case ISD::SETOGE:
5293     case ISD::SETGE: Opc = ARMISD::VCGE; break;
5294     case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
5295     case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
5296     case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
5297     case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
5298     case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
5299     case ISD::SETONE:
5300       // Expand this to (OLT | OGT).
5301       TmpOp0 = Op0;
5302       TmpOp1 = Op1;
5303       Opc = ISD::OR;
5304       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5305       Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
5306       break;
5307     case ISD::SETUO:
5308       Invert = true;
5309       LLVM_FALLTHROUGH;
5310     case ISD::SETO:
5311       // Expand this to (OLT | OGE).
5312       TmpOp0 = Op0;
5313       TmpOp1 = Op1;
5314       Opc = ISD::OR;
5315       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5316       Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1);
5317       break;
5318     }
5319   } else {
5320     // Integer comparisons.
5321     switch (SetCCOpcode) {
5322     default: llvm_unreachable("Illegal integer comparison");
5323     case ISD::SETNE:  Invert = true;
5324     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
5325     case ISD::SETLT:  Swap = true;
5326     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
5327     case ISD::SETLE:  Swap = true;
5328     case ISD::SETGE:  Opc = ARMISD::VCGE; break;
5329     case ISD::SETULT: Swap = true;
5330     case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
5331     case ISD::SETULE: Swap = true;
5332     case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
5333     }
5334 
5335     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
5336     if (Opc == ARMISD::VCEQ) {
5337 
5338       SDValue AndOp;
5339       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5340         AndOp = Op0;
5341       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
5342         AndOp = Op1;
5343 
5344       // Ignore bitconvert.
5345       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
5346         AndOp = AndOp.getOperand(0);
5347 
5348       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
5349         Opc = ARMISD::VTST;
5350         Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
5351         Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
5352         Invert = !Invert;
5353       }
5354     }
5355   }
5356 
5357   if (Swap)
5358     std::swap(Op0, Op1);
5359 
5360   // If one of the operands is a constant vector zero, attempt to fold the
5361   // comparison to a specialized compare-against-zero form.
5362   SDValue SingleOp;
5363   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5364     SingleOp = Op0;
5365   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
5366     if (Opc == ARMISD::VCGE)
5367       Opc = ARMISD::VCLEZ;
5368     else if (Opc == ARMISD::VCGT)
5369       Opc = ARMISD::VCLTZ;
5370     SingleOp = Op1;
5371   }
5372 
5373   SDValue Result;
5374   if (SingleOp.getNode()) {
5375     switch (Opc) {
5376     case ARMISD::VCEQ:
5377       Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break;
5378     case ARMISD::VCGE:
5379       Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break;
5380     case ARMISD::VCLEZ:
5381       Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break;
5382     case ARMISD::VCGT:
5383       Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break;
5384     case ARMISD::VCLTZ:
5385       Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break;
5386     default:
5387       Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5388     }
5389   } else {
5390      Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5391   }
5392 
5393   Result = DAG.getSExtOrTrunc(Result, dl, VT);
5394 
5395   if (Invert)
5396     Result = DAG.getNOT(dl, Result, VT);
5397 
5398   return Result;
5399 }
5400 
5401 static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) {
5402   SDValue LHS = Op.getOperand(0);
5403   SDValue RHS = Op.getOperand(1);
5404   SDValue Carry = Op.getOperand(2);
5405   SDValue Cond = Op.getOperand(3);
5406   SDLoc DL(Op);
5407 
5408   assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only.");
5409 
5410   assert(Carry.getOpcode() != ISD::CARRY_FALSE);
5411   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
5412   SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
5413 
5414   SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
5415   SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
5416   SDValue ARMcc = DAG.getConstant(
5417       IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
5418   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5419   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
5420                                    Cmp.getValue(1), SDValue());
5421   return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
5422                      CCR, Chain.getValue(1));
5423 }
5424 
5425 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
5426 /// valid vector constant for a NEON instruction with a "modified immediate"
5427 /// operand (e.g., VMOV).  If so, return the encoded value.
5428 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
5429                                  unsigned SplatBitSize, SelectionDAG &DAG,
5430                                  const SDLoc &dl, EVT &VT, bool is128Bits,
5431                                  NEONModImmType type) {
5432   unsigned OpCmode, Imm;
5433 
5434   // SplatBitSize is set to the smallest size that splats the vector, so a
5435   // zero vector will always have SplatBitSize == 8.  However, NEON modified
5436   // immediate instructions others than VMOV do not support the 8-bit encoding
5437   // of a zero vector, and the default encoding of zero is supposed to be the
5438   // 32-bit version.
5439   if (SplatBits == 0)
5440     SplatBitSize = 32;
5441 
5442   switch (SplatBitSize) {
5443   case 8:
5444     if (type != VMOVModImm)
5445       return SDValue();
5446     // Any 1-byte value is OK.  Op=0, Cmode=1110.
5447     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
5448     OpCmode = 0xe;
5449     Imm = SplatBits;
5450     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
5451     break;
5452 
5453   case 16:
5454     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
5455     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
5456     if ((SplatBits & ~0xff) == 0) {
5457       // Value = 0x00nn: Op=x, Cmode=100x.
5458       OpCmode = 0x8;
5459       Imm = SplatBits;
5460       break;
5461     }
5462     if ((SplatBits & ~0xff00) == 0) {
5463       // Value = 0xnn00: Op=x, Cmode=101x.
5464       OpCmode = 0xa;
5465       Imm = SplatBits >> 8;
5466       break;
5467     }
5468     return SDValue();
5469 
5470   case 32:
5471     // NEON's 32-bit VMOV supports splat values where:
5472     // * only one byte is nonzero, or
5473     // * the least significant byte is 0xff and the second byte is nonzero, or
5474     // * the least significant 2 bytes are 0xff and the third is nonzero.
5475     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
5476     if ((SplatBits & ~0xff) == 0) {
5477       // Value = 0x000000nn: Op=x, Cmode=000x.
5478       OpCmode = 0;
5479       Imm = SplatBits;
5480       break;
5481     }
5482     if ((SplatBits & ~0xff00) == 0) {
5483       // Value = 0x0000nn00: Op=x, Cmode=001x.
5484       OpCmode = 0x2;
5485       Imm = SplatBits >> 8;
5486       break;
5487     }
5488     if ((SplatBits & ~0xff0000) == 0) {
5489       // Value = 0x00nn0000: Op=x, Cmode=010x.
5490       OpCmode = 0x4;
5491       Imm = SplatBits >> 16;
5492       break;
5493     }
5494     if ((SplatBits & ~0xff000000) == 0) {
5495       // Value = 0xnn000000: Op=x, Cmode=011x.
5496       OpCmode = 0x6;
5497       Imm = SplatBits >> 24;
5498       break;
5499     }
5500 
5501     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
5502     if (type == OtherModImm) return SDValue();
5503 
5504     if ((SplatBits & ~0xffff) == 0 &&
5505         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
5506       // Value = 0x0000nnff: Op=x, Cmode=1100.
5507       OpCmode = 0xc;
5508       Imm = SplatBits >> 8;
5509       break;
5510     }
5511 
5512     if ((SplatBits & ~0xffffff) == 0 &&
5513         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
5514       // Value = 0x00nnffff: Op=x, Cmode=1101.
5515       OpCmode = 0xd;
5516       Imm = SplatBits >> 16;
5517       break;
5518     }
5519 
5520     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
5521     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
5522     // VMOV.I32.  A (very) minor optimization would be to replicate the value
5523     // and fall through here to test for a valid 64-bit splat.  But, then the
5524     // caller would also need to check and handle the change in size.
5525     return SDValue();
5526 
5527   case 64: {
5528     if (type != VMOVModImm)
5529       return SDValue();
5530     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
5531     uint64_t BitMask = 0xff;
5532     uint64_t Val = 0;
5533     unsigned ImmMask = 1;
5534     Imm = 0;
5535     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
5536       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
5537         Val |= BitMask;
5538         Imm |= ImmMask;
5539       } else if ((SplatBits & BitMask) != 0) {
5540         return SDValue();
5541       }
5542       BitMask <<= 8;
5543       ImmMask <<= 1;
5544     }
5545 
5546     if (DAG.getDataLayout().isBigEndian())
5547       // swap higher and lower 32 bit word
5548       Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
5549 
5550     // Op=1, Cmode=1110.
5551     OpCmode = 0x1e;
5552     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
5553     break;
5554   }
5555 
5556   default:
5557     llvm_unreachable("unexpected size for isNEONModifiedImm");
5558   }
5559 
5560   unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
5561   return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
5562 }
5563 
5564 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
5565                                            const ARMSubtarget *ST) const {
5566   bool IsDouble = Op.getValueType() == MVT::f64;
5567   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
5568   const APFloat &FPVal = CFP->getValueAPF();
5569 
5570   // Prevent floating-point constants from using literal loads
5571   // when execute-only is enabled.
5572   if (ST->genExecuteOnly()) {
5573     APInt INTVal = FPVal.bitcastToAPInt();
5574     SDLoc DL(CFP);
5575     if (IsDouble) {
5576       SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
5577       SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
5578       if (!ST->isLittle())
5579         std::swap(Lo, Hi);
5580       return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
5581     } else {
5582       return DAG.getConstant(INTVal, DL, MVT::i32);
5583     }
5584   }
5585 
5586   if (!ST->hasVFP3())
5587     return SDValue();
5588 
5589   // Use the default (constant pool) lowering for double constants when we have
5590   // an SP-only FPU
5591   if (IsDouble && Subtarget->isFPOnlySP())
5592     return SDValue();
5593 
5594   // Try splatting with a VMOV.f32...
5595   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
5596 
5597   if (ImmVal != -1) {
5598     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
5599       // We have code in place to select a valid ConstantFP already, no need to
5600       // do any mangling.
5601       return Op;
5602     }
5603 
5604     // It's a float and we are trying to use NEON operations where
5605     // possible. Lower it to a splat followed by an extract.
5606     SDLoc DL(Op);
5607     SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
5608     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
5609                                       NewVal);
5610     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
5611                        DAG.getConstant(0, DL, MVT::i32));
5612   }
5613 
5614   // The rest of our options are NEON only, make sure that's allowed before
5615   // proceeding..
5616   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
5617     return SDValue();
5618 
5619   EVT VMovVT;
5620   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
5621 
5622   // It wouldn't really be worth bothering for doubles except for one very
5623   // important value, which does happen to match: 0.0. So make sure we don't do
5624   // anything stupid.
5625   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
5626     return SDValue();
5627 
5628   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
5629   SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
5630                                      VMovVT, false, VMOVModImm);
5631   if (NewVal != SDValue()) {
5632     SDLoc DL(Op);
5633     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
5634                                       NewVal);
5635     if (IsDouble)
5636       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
5637 
5638     // It's a float: cast and extract a vector element.
5639     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
5640                                        VecConstant);
5641     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
5642                        DAG.getConstant(0, DL, MVT::i32));
5643   }
5644 
5645   // Finally, try a VMVN.i32
5646   NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
5647                              false, VMVNModImm);
5648   if (NewVal != SDValue()) {
5649     SDLoc DL(Op);
5650     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
5651 
5652     if (IsDouble)
5653       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
5654 
5655     // It's a float: cast and extract a vector element.
5656     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
5657                                        VecConstant);
5658     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
5659                        DAG.getConstant(0, DL, MVT::i32));
5660   }
5661 
5662   return SDValue();
5663 }
5664 
5665 // check if an VEXT instruction can handle the shuffle mask when the
5666 // vector sources of the shuffle are the same.
5667 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
5668   unsigned NumElts = VT.getVectorNumElements();
5669 
5670   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
5671   if (M[0] < 0)
5672     return false;
5673 
5674   Imm = M[0];
5675 
5676   // If this is a VEXT shuffle, the immediate value is the index of the first
5677   // element.  The other shuffle indices must be the successive elements after
5678   // the first one.
5679   unsigned ExpectedElt = Imm;
5680   for (unsigned i = 1; i < NumElts; ++i) {
5681     // Increment the expected index.  If it wraps around, just follow it
5682     // back to index zero and keep going.
5683     ++ExpectedElt;
5684     if (ExpectedElt == NumElts)
5685       ExpectedElt = 0;
5686 
5687     if (M[i] < 0) continue; // ignore UNDEF indices
5688     if (ExpectedElt != static_cast<unsigned>(M[i]))
5689       return false;
5690   }
5691 
5692   return true;
5693 }
5694 
5695 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
5696                        bool &ReverseVEXT, unsigned &Imm) {
5697   unsigned NumElts = VT.getVectorNumElements();
5698   ReverseVEXT = false;
5699 
5700   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
5701   if (M[0] < 0)
5702     return false;
5703 
5704   Imm = M[0];
5705 
5706   // If this is a VEXT shuffle, the immediate value is the index of the first
5707   // element.  The other shuffle indices must be the successive elements after
5708   // the first one.
5709   unsigned ExpectedElt = Imm;
5710   for (unsigned i = 1; i < NumElts; ++i) {
5711     // Increment the expected index.  If it wraps around, it may still be
5712     // a VEXT but the source vectors must be swapped.
5713     ExpectedElt += 1;
5714     if (ExpectedElt == NumElts * 2) {
5715       ExpectedElt = 0;
5716       ReverseVEXT = true;
5717     }
5718 
5719     if (M[i] < 0) continue; // ignore UNDEF indices
5720     if (ExpectedElt != static_cast<unsigned>(M[i]))
5721       return false;
5722   }
5723 
5724   // Adjust the index value if the source operands will be swapped.
5725   if (ReverseVEXT)
5726     Imm -= NumElts;
5727 
5728   return true;
5729 }
5730 
5731 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
5732 /// instruction with the specified blocksize.  (The order of the elements
5733 /// within each block of the vector is reversed.)
5734 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
5735   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
5736          "Only possible block sizes for VREV are: 16, 32, 64");
5737 
5738   unsigned EltSz = VT.getScalarSizeInBits();
5739   if (EltSz == 64)
5740     return false;
5741 
5742   unsigned NumElts = VT.getVectorNumElements();
5743   unsigned BlockElts = M[0] + 1;
5744   // If the first shuffle index is UNDEF, be optimistic.
5745   if (M[0] < 0)
5746     BlockElts = BlockSize / EltSz;
5747 
5748   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
5749     return false;
5750 
5751   for (unsigned i = 0; i < NumElts; ++i) {
5752     if (M[i] < 0) continue; // ignore UNDEF indices
5753     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
5754       return false;
5755   }
5756 
5757   return true;
5758 }
5759 
5760 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
5761   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
5762   // range, then 0 is placed into the resulting vector. So pretty much any mask
5763   // of 8 elements can work here.
5764   return VT == MVT::v8i8 && M.size() == 8;
5765 }
5766 
5767 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
5768 // checking that pairs of elements in the shuffle mask represent the same index
5769 // in each vector, incrementing the expected index by 2 at each step.
5770 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
5771 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
5772 //  v2={e,f,g,h}
5773 // WhichResult gives the offset for each element in the mask based on which
5774 // of the two results it belongs to.
5775 //
5776 // The transpose can be represented either as:
5777 // result1 = shufflevector v1, v2, result1_shuffle_mask
5778 // result2 = shufflevector v1, v2, result2_shuffle_mask
5779 // where v1/v2 and the shuffle masks have the same number of elements
5780 // (here WhichResult (see below) indicates which result is being checked)
5781 //
5782 // or as:
5783 // results = shufflevector v1, v2, shuffle_mask
5784 // where both results are returned in one vector and the shuffle mask has twice
5785 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
5786 // want to check the low half and high half of the shuffle mask as if it were
5787 // the other case
5788 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5789   unsigned EltSz = VT.getScalarSizeInBits();
5790   if (EltSz == 64)
5791     return false;
5792 
5793   unsigned NumElts = VT.getVectorNumElements();
5794   if (M.size() != NumElts && M.size() != NumElts*2)
5795     return false;
5796 
5797   // If the mask is twice as long as the input vector then we need to check the
5798   // upper and lower parts of the mask with a matching value for WhichResult
5799   // FIXME: A mask with only even values will be rejected in case the first
5800   // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
5801   // M[0] is used to determine WhichResult
5802   for (unsigned i = 0; i < M.size(); i += NumElts) {
5803     if (M.size() == NumElts * 2)
5804       WhichResult = i / NumElts;
5805     else
5806       WhichResult = M[i] == 0 ? 0 : 1;
5807     for (unsigned j = 0; j < NumElts; j += 2) {
5808       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
5809           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
5810         return false;
5811     }
5812   }
5813 
5814   if (M.size() == NumElts*2)
5815     WhichResult = 0;
5816 
5817   return true;
5818 }
5819 
5820 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
5821 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5822 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
5823 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5824   unsigned EltSz = VT.getScalarSizeInBits();
5825   if (EltSz == 64)
5826     return false;
5827 
5828   unsigned NumElts = VT.getVectorNumElements();
5829   if (M.size() != NumElts && M.size() != NumElts*2)
5830     return false;
5831 
5832   for (unsigned i = 0; i < M.size(); i += NumElts) {
5833     if (M.size() == NumElts * 2)
5834       WhichResult = i / NumElts;
5835     else
5836       WhichResult = M[i] == 0 ? 0 : 1;
5837     for (unsigned j = 0; j < NumElts; j += 2) {
5838       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
5839           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
5840         return false;
5841     }
5842   }
5843 
5844   if (M.size() == NumElts*2)
5845     WhichResult = 0;
5846 
5847   return true;
5848 }
5849 
5850 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
5851 // that the mask elements are either all even and in steps of size 2 or all odd
5852 // and in steps of size 2.
5853 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
5854 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
5855 //  v2={e,f,g,h}
5856 // Requires similar checks to that of isVTRNMask with
5857 // respect the how results are returned.
5858 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5859   unsigned EltSz = VT.getScalarSizeInBits();
5860   if (EltSz == 64)
5861     return false;
5862 
5863   unsigned NumElts = VT.getVectorNumElements();
5864   if (M.size() != NumElts && M.size() != NumElts*2)
5865     return false;
5866 
5867   for (unsigned i = 0; i < M.size(); i += NumElts) {
5868     WhichResult = M[i] == 0 ? 0 : 1;
5869     for (unsigned j = 0; j < NumElts; ++j) {
5870       if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
5871         return false;
5872     }
5873   }
5874 
5875   if (M.size() == NumElts*2)
5876     WhichResult = 0;
5877 
5878   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5879   if (VT.is64BitVector() && EltSz == 32)
5880     return false;
5881 
5882   return true;
5883 }
5884 
5885 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
5886 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5887 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
5888 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5889   unsigned EltSz = VT.getScalarSizeInBits();
5890   if (EltSz == 64)
5891     return false;
5892 
5893   unsigned NumElts = VT.getVectorNumElements();
5894   if (M.size() != NumElts && M.size() != NumElts*2)
5895     return false;
5896 
5897   unsigned Half = NumElts / 2;
5898   for (unsigned i = 0; i < M.size(); i += NumElts) {
5899     WhichResult = M[i] == 0 ? 0 : 1;
5900     for (unsigned j = 0; j < NumElts; j += Half) {
5901       unsigned Idx = WhichResult;
5902       for (unsigned k = 0; k < Half; ++k) {
5903         int MIdx = M[i + j + k];
5904         if (MIdx >= 0 && (unsigned) MIdx != Idx)
5905           return false;
5906         Idx += 2;
5907       }
5908     }
5909   }
5910 
5911   if (M.size() == NumElts*2)
5912     WhichResult = 0;
5913 
5914   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5915   if (VT.is64BitVector() && EltSz == 32)
5916     return false;
5917 
5918   return true;
5919 }
5920 
5921 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
5922 // that pairs of elements of the shufflemask represent the same index in each
5923 // vector incrementing sequentially through the vectors.
5924 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
5925 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
5926 //  v2={e,f,g,h}
5927 // Requires similar checks to that of isVTRNMask with respect the how results
5928 // are returned.
5929 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5930   unsigned EltSz = VT.getScalarSizeInBits();
5931   if (EltSz == 64)
5932     return false;
5933 
5934   unsigned NumElts = VT.getVectorNumElements();
5935   if (M.size() != NumElts && M.size() != NumElts*2)
5936     return false;
5937 
5938   for (unsigned i = 0; i < M.size(); i += NumElts) {
5939     WhichResult = M[i] == 0 ? 0 : 1;
5940     unsigned Idx = WhichResult * NumElts / 2;
5941     for (unsigned j = 0; j < NumElts; j += 2) {
5942       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
5943           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
5944         return false;
5945       Idx += 1;
5946     }
5947   }
5948 
5949   if (M.size() == NumElts*2)
5950     WhichResult = 0;
5951 
5952   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5953   if (VT.is64BitVector() && EltSz == 32)
5954     return false;
5955 
5956   return true;
5957 }
5958 
5959 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
5960 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5961 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
5962 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5963   unsigned EltSz = VT.getScalarSizeInBits();
5964   if (EltSz == 64)
5965     return false;
5966 
5967   unsigned NumElts = VT.getVectorNumElements();
5968   if (M.size() != NumElts && M.size() != NumElts*2)
5969     return false;
5970 
5971   for (unsigned i = 0; i < M.size(); i += NumElts) {
5972     WhichResult = M[i] == 0 ? 0 : 1;
5973     unsigned Idx = WhichResult * NumElts / 2;
5974     for (unsigned j = 0; j < NumElts; j += 2) {
5975       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
5976           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
5977         return false;
5978       Idx += 1;
5979     }
5980   }
5981 
5982   if (M.size() == NumElts*2)
5983     WhichResult = 0;
5984 
5985   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5986   if (VT.is64BitVector() && EltSz == 32)
5987     return false;
5988 
5989   return true;
5990 }
5991 
5992 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
5993 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
5994 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
5995                                            unsigned &WhichResult,
5996                                            bool &isV_UNDEF) {
5997   isV_UNDEF = false;
5998   if (isVTRNMask(ShuffleMask, VT, WhichResult))
5999     return ARMISD::VTRN;
6000   if (isVUZPMask(ShuffleMask, VT, WhichResult))
6001     return ARMISD::VUZP;
6002   if (isVZIPMask(ShuffleMask, VT, WhichResult))
6003     return ARMISD::VZIP;
6004 
6005   isV_UNDEF = true;
6006   if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
6007     return ARMISD::VTRN;
6008   if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6009     return ARMISD::VUZP;
6010   if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6011     return ARMISD::VZIP;
6012 
6013   return 0;
6014 }
6015 
6016 /// \return true if this is a reverse operation on an vector.
6017 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
6018   unsigned NumElts = VT.getVectorNumElements();
6019   // Make sure the mask has the right size.
6020   if (NumElts != M.size())
6021       return false;
6022 
6023   // Look for <15, ..., 3, -1, 1, 0>.
6024   for (unsigned i = 0; i != NumElts; ++i)
6025     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
6026       return false;
6027 
6028   return true;
6029 }
6030 
6031 // If N is an integer constant that can be moved into a register in one
6032 // instruction, return an SDValue of such a constant (will become a MOV
6033 // instruction).  Otherwise return null.
6034 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
6035                                      const ARMSubtarget *ST, const SDLoc &dl) {
6036   uint64_t Val;
6037   if (!isa<ConstantSDNode>(N))
6038     return SDValue();
6039   Val = cast<ConstantSDNode>(N)->getZExtValue();
6040 
6041   if (ST->isThumb1Only()) {
6042     if (Val <= 255 || ~Val <= 255)
6043       return DAG.getConstant(Val, dl, MVT::i32);
6044   } else {
6045     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
6046       return DAG.getConstant(Val, dl, MVT::i32);
6047   }
6048   return SDValue();
6049 }
6050 
6051 // If this is a case we can't handle, return null and let the default
6052 // expansion code take care of it.
6053 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
6054                                              const ARMSubtarget *ST) const {
6055   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
6056   SDLoc dl(Op);
6057   EVT VT = Op.getValueType();
6058 
6059   APInt SplatBits, SplatUndef;
6060   unsigned SplatBitSize;
6061   bool HasAnyUndefs;
6062   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6063     if (SplatUndef.isAllOnesValue())
6064       return DAG.getUNDEF(VT);
6065 
6066     if (SplatBitSize <= 64) {
6067       // Check if an immediate VMOV works.
6068       EVT VmovVT;
6069       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
6070                                       SplatUndef.getZExtValue(), SplatBitSize,
6071                                       DAG, dl, VmovVT, VT.is128BitVector(),
6072                                       VMOVModImm);
6073       if (Val.getNode()) {
6074         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
6075         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6076       }
6077 
6078       // Try an immediate VMVN.
6079       uint64_t NegatedImm = (~SplatBits).getZExtValue();
6080       Val = isNEONModifiedImm(NegatedImm,
6081                                       SplatUndef.getZExtValue(), SplatBitSize,
6082                                       DAG, dl, VmovVT, VT.is128BitVector(),
6083                                       VMVNModImm);
6084       if (Val.getNode()) {
6085         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
6086         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6087       }
6088 
6089       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
6090       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
6091         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
6092         if (ImmVal != -1) {
6093           SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
6094           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
6095         }
6096       }
6097     }
6098   }
6099 
6100   // Scan through the operands to see if only one value is used.
6101   //
6102   // As an optimisation, even if more than one value is used it may be more
6103   // profitable to splat with one value then change some lanes.
6104   //
6105   // Heuristically we decide to do this if the vector has a "dominant" value,
6106   // defined as splatted to more than half of the lanes.
6107   unsigned NumElts = VT.getVectorNumElements();
6108   bool isOnlyLowElement = true;
6109   bool usesOnlyOneValue = true;
6110   bool hasDominantValue = false;
6111   bool isConstant = true;
6112 
6113   // Map of the number of times a particular SDValue appears in the
6114   // element list.
6115   DenseMap<SDValue, unsigned> ValueCounts;
6116   SDValue Value;
6117   for (unsigned i = 0; i < NumElts; ++i) {
6118     SDValue V = Op.getOperand(i);
6119     if (V.isUndef())
6120       continue;
6121     if (i > 0)
6122       isOnlyLowElement = false;
6123     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
6124       isConstant = false;
6125 
6126     ValueCounts.insert(std::make_pair(V, 0));
6127     unsigned &Count = ValueCounts[V];
6128 
6129     // Is this value dominant? (takes up more than half of the lanes)
6130     if (++Count > (NumElts / 2)) {
6131       hasDominantValue = true;
6132       Value = V;
6133     }
6134   }
6135   if (ValueCounts.size() != 1)
6136     usesOnlyOneValue = false;
6137   if (!Value.getNode() && !ValueCounts.empty())
6138     Value = ValueCounts.begin()->first;
6139 
6140   if (ValueCounts.empty())
6141     return DAG.getUNDEF(VT);
6142 
6143   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
6144   // Keep going if we are hitting this case.
6145   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
6146     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
6147 
6148   unsigned EltSize = VT.getScalarSizeInBits();
6149 
6150   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
6151   // i32 and try again.
6152   if (hasDominantValue && EltSize <= 32) {
6153     if (!isConstant) {
6154       SDValue N;
6155 
6156       // If we are VDUPing a value that comes directly from a vector, that will
6157       // cause an unnecessary move to and from a GPR, where instead we could
6158       // just use VDUPLANE. We can only do this if the lane being extracted
6159       // is at a constant index, as the VDUP from lane instructions only have
6160       // constant-index forms.
6161       ConstantSDNode *constIndex;
6162       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6163           (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
6164         // We need to create a new undef vector to use for the VDUPLANE if the
6165         // size of the vector from which we get the value is different than the
6166         // size of the vector that we need to create. We will insert the element
6167         // such that the register coalescer will remove unnecessary copies.
6168         if (VT != Value->getOperand(0).getValueType()) {
6169           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
6170                              VT.getVectorNumElements();
6171           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6172                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
6173                         Value, DAG.getConstant(index, dl, MVT::i32)),
6174                            DAG.getConstant(index, dl, MVT::i32));
6175         } else
6176           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6177                         Value->getOperand(0), Value->getOperand(1));
6178       } else
6179         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
6180 
6181       if (!usesOnlyOneValue) {
6182         // The dominant value was splatted as 'N', but we now have to insert
6183         // all differing elements.
6184         for (unsigned I = 0; I < NumElts; ++I) {
6185           if (Op.getOperand(I) == Value)
6186             continue;
6187           SmallVector<SDValue, 3> Ops;
6188           Ops.push_back(N);
6189           Ops.push_back(Op.getOperand(I));
6190           Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
6191           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
6192         }
6193       }
6194       return N;
6195     }
6196     if (VT.getVectorElementType().isFloatingPoint()) {
6197       SmallVector<SDValue, 8> Ops;
6198       for (unsigned i = 0; i < NumElts; ++i)
6199         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
6200                                   Op.getOperand(i)));
6201       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
6202       SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
6203       Val = LowerBUILD_VECTOR(Val, DAG, ST);
6204       if (Val.getNode())
6205         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6206     }
6207     if (usesOnlyOneValue) {
6208       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
6209       if (isConstant && Val.getNode())
6210         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
6211     }
6212   }
6213 
6214   // If all elements are constants and the case above didn't get hit, fall back
6215   // to the default expansion, which will generate a load from the constant
6216   // pool.
6217   if (isConstant)
6218     return SDValue();
6219 
6220   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
6221   if (NumElts >= 4) {
6222     SDValue shuffle = ReconstructShuffle(Op, DAG);
6223     if (shuffle != SDValue())
6224       return shuffle;
6225   }
6226 
6227   if (VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
6228     // If we haven't found an efficient lowering, try splitting a 128-bit vector
6229     // into two 64-bit vectors; we might discover a better way to lower it.
6230     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
6231     EVT ExtVT = VT.getVectorElementType();
6232     EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
6233     SDValue Lower =
6234         DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2));
6235     if (Lower.getOpcode() == ISD::BUILD_VECTOR)
6236       Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
6237     SDValue Upper = DAG.getBuildVector(
6238         HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
6239     if (Upper.getOpcode() == ISD::BUILD_VECTOR)
6240       Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
6241     if (Lower && Upper)
6242       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
6243   }
6244 
6245   // Vectors with 32- or 64-bit elements can be built by directly assigning
6246   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
6247   // will be legalized.
6248   if (EltSize >= 32) {
6249     // Do the expansion with floating-point types, since that is what the VFP
6250     // registers are defined to use, and since i64 is not legal.
6251     EVT EltVT = EVT::getFloatingPointVT(EltSize);
6252     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
6253     SmallVector<SDValue, 8> Ops;
6254     for (unsigned i = 0; i < NumElts; ++i)
6255       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
6256     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
6257     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6258   }
6259 
6260   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
6261   // know the default expansion would otherwise fall back on something even
6262   // worse. For a vector with one or two non-undef values, that's
6263   // scalar_to_vector for the elements followed by a shuffle (provided the
6264   // shuffle is valid for the target) and materialization element by element
6265   // on the stack followed by a load for everything else.
6266   if (!isConstant && !usesOnlyOneValue) {
6267     SDValue Vec = DAG.getUNDEF(VT);
6268     for (unsigned i = 0 ; i < NumElts; ++i) {
6269       SDValue V = Op.getOperand(i);
6270       if (V.isUndef())
6271         continue;
6272       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
6273       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
6274     }
6275     return Vec;
6276   }
6277 
6278   return SDValue();
6279 }
6280 
6281 // Gather data to see if the operation can be modelled as a
6282 // shuffle in combination with VEXTs.
6283 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
6284                                               SelectionDAG &DAG) const {
6285   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
6286   SDLoc dl(Op);
6287   EVT VT = Op.getValueType();
6288   unsigned NumElts = VT.getVectorNumElements();
6289 
6290   struct ShuffleSourceInfo {
6291     SDValue Vec;
6292     unsigned MinElt = std::numeric_limits<unsigned>::max();
6293     unsigned MaxElt = 0;
6294 
6295     // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
6296     // be compatible with the shuffle we intend to construct. As a result
6297     // ShuffleVec will be some sliding window into the original Vec.
6298     SDValue ShuffleVec;
6299 
6300     // Code should guarantee that element i in Vec starts at element "WindowBase
6301     // + i * WindowScale in ShuffleVec".
6302     int WindowBase = 0;
6303     int WindowScale = 1;
6304 
6305     ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
6306 
6307     bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
6308   };
6309 
6310   // First gather all vectors used as an immediate source for this BUILD_VECTOR
6311   // node.
6312   SmallVector<ShuffleSourceInfo, 2> Sources;
6313   for (unsigned i = 0; i < NumElts; ++i) {
6314     SDValue V = Op.getOperand(i);
6315     if (V.isUndef())
6316       continue;
6317     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
6318       // A shuffle can only come from building a vector from various
6319       // elements of other vectors.
6320       return SDValue();
6321     } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
6322       // Furthermore, shuffles require a constant mask, whereas extractelts
6323       // accept variable indices.
6324       return SDValue();
6325     }
6326 
6327     // Add this element source to the list if it's not already there.
6328     SDValue SourceVec = V.getOperand(0);
6329     auto Source = llvm::find(Sources, SourceVec);
6330     if (Source == Sources.end())
6331       Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
6332 
6333     // Update the minimum and maximum lane number seen.
6334     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
6335     Source->MinElt = std::min(Source->MinElt, EltNo);
6336     Source->MaxElt = std::max(Source->MaxElt, EltNo);
6337   }
6338 
6339   // Currently only do something sane when at most two source vectors
6340   // are involved.
6341   if (Sources.size() > 2)
6342     return SDValue();
6343 
6344   // Find out the smallest element size among result and two sources, and use
6345   // it as element size to build the shuffle_vector.
6346   EVT SmallestEltTy = VT.getVectorElementType();
6347   for (auto &Source : Sources) {
6348     EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
6349     if (SrcEltTy.bitsLT(SmallestEltTy))
6350       SmallestEltTy = SrcEltTy;
6351   }
6352   unsigned ResMultiplier =
6353       VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
6354   NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
6355   EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
6356 
6357   // If the source vector is too wide or too narrow, we may nevertheless be able
6358   // to construct a compatible shuffle either by concatenating it with UNDEF or
6359   // extracting a suitable range of elements.
6360   for (auto &Src : Sources) {
6361     EVT SrcVT = Src.ShuffleVec.getValueType();
6362 
6363     if (SrcVT.getSizeInBits() == VT.getSizeInBits())
6364       continue;
6365 
6366     // This stage of the search produces a source with the same element type as
6367     // the original, but with a total width matching the BUILD_VECTOR output.
6368     EVT EltVT = SrcVT.getVectorElementType();
6369     unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits();
6370     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
6371 
6372     if (SrcVT.getSizeInBits() < VT.getSizeInBits()) {
6373       if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits())
6374         return SDValue();
6375       // We can pad out the smaller vector for free, so if it's part of a
6376       // shuffle...
6377       Src.ShuffleVec =
6378           DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
6379                       DAG.getUNDEF(Src.ShuffleVec.getValueType()));
6380       continue;
6381     }
6382 
6383     if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits())
6384       return SDValue();
6385 
6386     if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
6387       // Span too large for a VEXT to cope
6388       return SDValue();
6389     }
6390 
6391     if (Src.MinElt >= NumSrcElts) {
6392       // The extraction can just take the second half
6393       Src.ShuffleVec =
6394           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6395                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
6396       Src.WindowBase = -NumSrcElts;
6397     } else if (Src.MaxElt < NumSrcElts) {
6398       // The extraction can just take the first half
6399       Src.ShuffleVec =
6400           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6401                       DAG.getConstant(0, dl, MVT::i32));
6402     } else {
6403       // An actual VEXT is needed
6404       SDValue VEXTSrc1 =
6405           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6406                       DAG.getConstant(0, dl, MVT::i32));
6407       SDValue VEXTSrc2 =
6408           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6409                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
6410 
6411       Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
6412                                    VEXTSrc2,
6413                                    DAG.getConstant(Src.MinElt, dl, MVT::i32));
6414       Src.WindowBase = -Src.MinElt;
6415     }
6416   }
6417 
6418   // Another possible incompatibility occurs from the vector element types. We
6419   // can fix this by bitcasting the source vectors to the same type we intend
6420   // for the shuffle.
6421   for (auto &Src : Sources) {
6422     EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
6423     if (SrcEltTy == SmallestEltTy)
6424       continue;
6425     assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
6426     Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
6427     Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
6428     Src.WindowBase *= Src.WindowScale;
6429   }
6430 
6431   // Final sanity check before we try to actually produce a shuffle.
6432   DEBUG(
6433     for (auto Src : Sources)
6434       assert(Src.ShuffleVec.getValueType() == ShuffleVT);
6435   );
6436 
6437   // The stars all align, our next step is to produce the mask for the shuffle.
6438   SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
6439   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
6440   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
6441     SDValue Entry = Op.getOperand(i);
6442     if (Entry.isUndef())
6443       continue;
6444 
6445     auto Src = llvm::find(Sources, Entry.getOperand(0));
6446     int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
6447 
6448     // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
6449     // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
6450     // segment.
6451     EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
6452     int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
6453                                VT.getScalarSizeInBits());
6454     int LanesDefined = BitsDefined / BitsPerShuffleLane;
6455 
6456     // This source is expected to fill ResMultiplier lanes of the final shuffle,
6457     // starting at the appropriate offset.
6458     int *LaneMask = &Mask[i * ResMultiplier];
6459 
6460     int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
6461     ExtractBase += NumElts * (Src - Sources.begin());
6462     for (int j = 0; j < LanesDefined; ++j)
6463       LaneMask[j] = ExtractBase + j;
6464   }
6465 
6466   // Final check before we try to produce nonsense...
6467   if (!isShuffleMaskLegal(Mask, ShuffleVT))
6468     return SDValue();
6469 
6470   // We can't handle more than two sources. This should have already
6471   // been checked before this point.
6472   assert(Sources.size() <= 2 && "Too many sources!");
6473 
6474   SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
6475   for (unsigned i = 0; i < Sources.size(); ++i)
6476     ShuffleOps[i] = Sources[i].ShuffleVec;
6477 
6478   SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
6479                                          ShuffleOps[1], Mask);
6480   return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
6481 }
6482 
6483 /// isShuffleMaskLegal - Targets can use this to indicate that they only
6484 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
6485 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
6486 /// are assumed to be legal.
6487 bool
6488 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
6489                                       EVT VT) const {
6490   if (VT.getVectorNumElements() == 4 &&
6491       (VT.is128BitVector() || VT.is64BitVector())) {
6492     unsigned PFIndexes[4];
6493     for (unsigned i = 0; i != 4; ++i) {
6494       if (M[i] < 0)
6495         PFIndexes[i] = 8;
6496       else
6497         PFIndexes[i] = M[i];
6498     }
6499 
6500     // Compute the index in the perfect shuffle table.
6501     unsigned PFTableIndex =
6502       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6503     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
6504     unsigned Cost = (PFEntry >> 30);
6505 
6506     if (Cost <= 4)
6507       return true;
6508   }
6509 
6510   bool ReverseVEXT, isV_UNDEF;
6511   unsigned Imm, WhichResult;
6512 
6513   unsigned EltSize = VT.getScalarSizeInBits();
6514   return (EltSize >= 32 ||
6515           ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
6516           isVREVMask(M, VT, 64) ||
6517           isVREVMask(M, VT, 32) ||
6518           isVREVMask(M, VT, 16) ||
6519           isVEXTMask(M, VT, ReverseVEXT, Imm) ||
6520           isVTBLMask(M, VT) ||
6521           isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) ||
6522           ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT)));
6523 }
6524 
6525 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
6526 /// the specified operations to build the shuffle.
6527 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
6528                                       SDValue RHS, SelectionDAG &DAG,
6529                                       const SDLoc &dl) {
6530   unsigned OpNum = (PFEntry >> 26) & 0x0F;
6531   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
6532   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
6533 
6534   enum {
6535     OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
6536     OP_VREV,
6537     OP_VDUP0,
6538     OP_VDUP1,
6539     OP_VDUP2,
6540     OP_VDUP3,
6541     OP_VEXT1,
6542     OP_VEXT2,
6543     OP_VEXT3,
6544     OP_VUZPL, // VUZP, left result
6545     OP_VUZPR, // VUZP, right result
6546     OP_VZIPL, // VZIP, left result
6547     OP_VZIPR, // VZIP, right result
6548     OP_VTRNL, // VTRN, left result
6549     OP_VTRNR  // VTRN, right result
6550   };
6551 
6552   if (OpNum == OP_COPY) {
6553     if (LHSID == (1*9+2)*9+3) return LHS;
6554     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
6555     return RHS;
6556   }
6557 
6558   SDValue OpLHS, OpRHS;
6559   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
6560   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
6561   EVT VT = OpLHS.getValueType();
6562 
6563   switch (OpNum) {
6564   default: llvm_unreachable("Unknown shuffle opcode!");
6565   case OP_VREV:
6566     // VREV divides the vector in half and swaps within the half.
6567     if (VT.getVectorElementType() == MVT::i32 ||
6568         VT.getVectorElementType() == MVT::f32)
6569       return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
6570     // vrev <4 x i16> -> VREV32
6571     if (VT.getVectorElementType() == MVT::i16)
6572       return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
6573     // vrev <4 x i8> -> VREV16
6574     assert(VT.getVectorElementType() == MVT::i8);
6575     return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
6576   case OP_VDUP0:
6577   case OP_VDUP1:
6578   case OP_VDUP2:
6579   case OP_VDUP3:
6580     return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6581                        OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
6582   case OP_VEXT1:
6583   case OP_VEXT2:
6584   case OP_VEXT3:
6585     return DAG.getNode(ARMISD::VEXT, dl, VT,
6586                        OpLHS, OpRHS,
6587                        DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
6588   case OP_VUZPL:
6589   case OP_VUZPR:
6590     return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
6591                        OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
6592   case OP_VZIPL:
6593   case OP_VZIPR:
6594     return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
6595                        OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
6596   case OP_VTRNL:
6597   case OP_VTRNR:
6598     return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
6599                        OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
6600   }
6601 }
6602 
6603 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
6604                                        ArrayRef<int> ShuffleMask,
6605                                        SelectionDAG &DAG) {
6606   // Check to see if we can use the VTBL instruction.
6607   SDValue V1 = Op.getOperand(0);
6608   SDValue V2 = Op.getOperand(1);
6609   SDLoc DL(Op);
6610 
6611   SmallVector<SDValue, 8> VTBLMask;
6612   for (ArrayRef<int>::iterator
6613          I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
6614     VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
6615 
6616   if (V2.getNode()->isUndef())
6617     return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
6618                        DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
6619 
6620   return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
6621                      DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
6622 }
6623 
6624 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
6625                                                       SelectionDAG &DAG) {
6626   SDLoc DL(Op);
6627   SDValue OpLHS = Op.getOperand(0);
6628   EVT VT = OpLHS.getValueType();
6629 
6630   assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&
6631          "Expect an v8i16/v16i8 type");
6632   OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS);
6633   // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
6634   // extract the first 8 bytes into the top double word and the last 8 bytes
6635   // into the bottom double word. The v8i16 case is similar.
6636   unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
6637   return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
6638                      DAG.getConstant(ExtractNum, DL, MVT::i32));
6639 }
6640 
6641 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
6642   SDValue V1 = Op.getOperand(0);
6643   SDValue V2 = Op.getOperand(1);
6644   SDLoc dl(Op);
6645   EVT VT = Op.getValueType();
6646   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
6647 
6648   // Convert shuffles that are directly supported on NEON to target-specific
6649   // DAG nodes, instead of keeping them as shuffles and matching them again
6650   // during code selection.  This is more efficient and avoids the possibility
6651   // of inconsistencies between legalization and selection.
6652   // FIXME: floating-point vectors should be canonicalized to integer vectors
6653   // of the same time so that they get CSEd properly.
6654   ArrayRef<int> ShuffleMask = SVN->getMask();
6655 
6656   unsigned EltSize = VT.getScalarSizeInBits();
6657   if (EltSize <= 32) {
6658     if (SVN->isSplat()) {
6659       int Lane = SVN->getSplatIndex();
6660       // If this is undef splat, generate it via "just" vdup, if possible.
6661       if (Lane == -1) Lane = 0;
6662 
6663       // Test if V1 is a SCALAR_TO_VECTOR.
6664       if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
6665         return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
6666       }
6667       // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
6668       // (and probably will turn into a SCALAR_TO_VECTOR once legalization
6669       // reaches it).
6670       if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
6671           !isa<ConstantSDNode>(V1.getOperand(0))) {
6672         bool IsScalarToVector = true;
6673         for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
6674           if (!V1.getOperand(i).isUndef()) {
6675             IsScalarToVector = false;
6676             break;
6677           }
6678         if (IsScalarToVector)
6679           return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
6680       }
6681       return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
6682                          DAG.getConstant(Lane, dl, MVT::i32));
6683     }
6684 
6685     bool ReverseVEXT;
6686     unsigned Imm;
6687     if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
6688       if (ReverseVEXT)
6689         std::swap(V1, V2);
6690       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
6691                          DAG.getConstant(Imm, dl, MVT::i32));
6692     }
6693 
6694     if (isVREVMask(ShuffleMask, VT, 64))
6695       return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
6696     if (isVREVMask(ShuffleMask, VT, 32))
6697       return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
6698     if (isVREVMask(ShuffleMask, VT, 16))
6699       return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
6700 
6701     if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
6702       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
6703                          DAG.getConstant(Imm, dl, MVT::i32));
6704     }
6705 
6706     // Check for Neon shuffles that modify both input vectors in place.
6707     // If both results are used, i.e., if there are two shuffles with the same
6708     // source operands and with masks corresponding to both results of one of
6709     // these operations, DAG memoization will ensure that a single node is
6710     // used for both shuffles.
6711     unsigned WhichResult;
6712     bool isV_UNDEF;
6713     if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
6714             ShuffleMask, VT, WhichResult, isV_UNDEF)) {
6715       if (isV_UNDEF)
6716         V2 = V1;
6717       return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
6718           .getValue(WhichResult);
6719     }
6720 
6721     // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
6722     // shuffles that produce a result larger than their operands with:
6723     //   shuffle(concat(v1, undef), concat(v2, undef))
6724     // ->
6725     //   shuffle(concat(v1, v2), undef)
6726     // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
6727     //
6728     // This is useful in the general case, but there are special cases where
6729     // native shuffles produce larger results: the two-result ops.
6730     //
6731     // Look through the concat when lowering them:
6732     //   shuffle(concat(v1, v2), undef)
6733     // ->
6734     //   concat(VZIP(v1, v2):0, :1)
6735     //
6736     if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
6737       SDValue SubV1 = V1->getOperand(0);
6738       SDValue SubV2 = V1->getOperand(1);
6739       EVT SubVT = SubV1.getValueType();
6740 
6741       // We expect these to have been canonicalized to -1.
6742       assert(llvm::all_of(ShuffleMask, [&](int i) {
6743         return i < (int)VT.getVectorNumElements();
6744       }) && "Unexpected shuffle index into UNDEF operand!");
6745 
6746       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
6747               ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
6748         if (isV_UNDEF)
6749           SubV2 = SubV1;
6750         assert((WhichResult == 0) &&
6751                "In-place shuffle of concat can only have one result!");
6752         SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
6753                                   SubV1, SubV2);
6754         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
6755                            Res.getValue(1));
6756       }
6757     }
6758   }
6759 
6760   // If the shuffle is not directly supported and it has 4 elements, use
6761   // the PerfectShuffle-generated table to synthesize it from other shuffles.
6762   unsigned NumElts = VT.getVectorNumElements();
6763   if (NumElts == 4) {
6764     unsigned PFIndexes[4];
6765     for (unsigned i = 0; i != 4; ++i) {
6766       if (ShuffleMask[i] < 0)
6767         PFIndexes[i] = 8;
6768       else
6769         PFIndexes[i] = ShuffleMask[i];
6770     }
6771 
6772     // Compute the index in the perfect shuffle table.
6773     unsigned PFTableIndex =
6774       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6775     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
6776     unsigned Cost = (PFEntry >> 30);
6777 
6778     if (Cost <= 4)
6779       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
6780   }
6781 
6782   // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
6783   if (EltSize >= 32) {
6784     // Do the expansion with floating-point types, since that is what the VFP
6785     // registers are defined to use, and since i64 is not legal.
6786     EVT EltVT = EVT::getFloatingPointVT(EltSize);
6787     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
6788     V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
6789     V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
6790     SmallVector<SDValue, 8> Ops;
6791     for (unsigned i = 0; i < NumElts; ++i) {
6792       if (ShuffleMask[i] < 0)
6793         Ops.push_back(DAG.getUNDEF(EltVT));
6794       else
6795         Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
6796                                   ShuffleMask[i] < (int)NumElts ? V1 : V2,
6797                                   DAG.getConstant(ShuffleMask[i] & (NumElts-1),
6798                                                   dl, MVT::i32)));
6799     }
6800     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
6801     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6802   }
6803 
6804   if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
6805     return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
6806 
6807   if (VT == MVT::v8i8)
6808     if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
6809       return NewOp;
6810 
6811   return SDValue();
6812 }
6813 
6814 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
6815   // INSERT_VECTOR_ELT is legal only for immediate indexes.
6816   SDValue Lane = Op.getOperand(2);
6817   if (!isa<ConstantSDNode>(Lane))
6818     return SDValue();
6819 
6820   return Op;
6821 }
6822 
6823 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
6824   // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
6825   SDValue Lane = Op.getOperand(1);
6826   if (!isa<ConstantSDNode>(Lane))
6827     return SDValue();
6828 
6829   SDValue Vec = Op.getOperand(0);
6830   if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
6831     SDLoc dl(Op);
6832     return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
6833   }
6834 
6835   return Op;
6836 }
6837 
6838 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
6839   // The only time a CONCAT_VECTORS operation can have legal types is when
6840   // two 64-bit vectors are concatenated to a 128-bit vector.
6841   assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
6842          "unexpected CONCAT_VECTORS");
6843   SDLoc dl(Op);
6844   SDValue Val = DAG.getUNDEF(MVT::v2f64);
6845   SDValue Op0 = Op.getOperand(0);
6846   SDValue Op1 = Op.getOperand(1);
6847   if (!Op0.isUndef())
6848     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
6849                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
6850                       DAG.getIntPtrConstant(0, dl));
6851   if (!Op1.isUndef())
6852     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
6853                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
6854                       DAG.getIntPtrConstant(1, dl));
6855   return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
6856 }
6857 
6858 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
6859 /// element has been zero/sign-extended, depending on the isSigned parameter,
6860 /// from an integer type half its size.
6861 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
6862                                    bool isSigned) {
6863   // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
6864   EVT VT = N->getValueType(0);
6865   if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
6866     SDNode *BVN = N->getOperand(0).getNode();
6867     if (BVN->getValueType(0) != MVT::v4i32 ||
6868         BVN->getOpcode() != ISD::BUILD_VECTOR)
6869       return false;
6870     unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
6871     unsigned HiElt = 1 - LoElt;
6872     ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
6873     ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
6874     ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
6875     ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
6876     if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
6877       return false;
6878     if (isSigned) {
6879       if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
6880           Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
6881         return true;
6882     } else {
6883       if (Hi0->isNullValue() && Hi1->isNullValue())
6884         return true;
6885     }
6886     return false;
6887   }
6888 
6889   if (N->getOpcode() != ISD::BUILD_VECTOR)
6890     return false;
6891 
6892   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
6893     SDNode *Elt = N->getOperand(i).getNode();
6894     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
6895       unsigned EltSize = VT.getScalarSizeInBits();
6896       unsigned HalfSize = EltSize / 2;
6897       if (isSigned) {
6898         if (!isIntN(HalfSize, C->getSExtValue()))
6899           return false;
6900       } else {
6901         if (!isUIntN(HalfSize, C->getZExtValue()))
6902           return false;
6903       }
6904       continue;
6905     }
6906     return false;
6907   }
6908 
6909   return true;
6910 }
6911 
6912 /// isSignExtended - Check if a node is a vector value that is sign-extended
6913 /// or a constant BUILD_VECTOR with sign-extended elements.
6914 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
6915   if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
6916     return true;
6917   if (isExtendedBUILD_VECTOR(N, DAG, true))
6918     return true;
6919   return false;
6920 }
6921 
6922 /// isZeroExtended - Check if a node is a vector value that is zero-extended
6923 /// or a constant BUILD_VECTOR with zero-extended elements.
6924 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
6925   if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
6926     return true;
6927   if (isExtendedBUILD_VECTOR(N, DAG, false))
6928     return true;
6929   return false;
6930 }
6931 
6932 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
6933   if (OrigVT.getSizeInBits() >= 64)
6934     return OrigVT;
6935 
6936   assert(OrigVT.isSimple() && "Expecting a simple value type");
6937 
6938   MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
6939   switch (OrigSimpleTy) {
6940   default: llvm_unreachable("Unexpected Vector Type");
6941   case MVT::v2i8:
6942   case MVT::v2i16:
6943      return MVT::v2i32;
6944   case MVT::v4i8:
6945     return  MVT::v4i16;
6946   }
6947 }
6948 
6949 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
6950 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
6951 /// We insert the required extension here to get the vector to fill a D register.
6952 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
6953                                             const EVT &OrigTy,
6954                                             const EVT &ExtTy,
6955                                             unsigned ExtOpcode) {
6956   // The vector originally had a size of OrigTy. It was then extended to ExtTy.
6957   // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
6958   // 64-bits we need to insert a new extension so that it will be 64-bits.
6959   assert(ExtTy.is128BitVector() && "Unexpected extension size");
6960   if (OrigTy.getSizeInBits() >= 64)
6961     return N;
6962 
6963   // Must extend size to at least 64 bits to be used as an operand for VMULL.
6964   EVT NewVT = getExtensionTo64Bits(OrigTy);
6965 
6966   return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
6967 }
6968 
6969 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
6970 /// does not do any sign/zero extension. If the original vector is less
6971 /// than 64 bits, an appropriate extension will be added after the load to
6972 /// reach a total size of 64 bits. We have to add the extension separately
6973 /// because ARM does not have a sign/zero extending load for vectors.
6974 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
6975   EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());
6976 
6977   // The load already has the right type.
6978   if (ExtendedTy == LD->getMemoryVT())
6979     return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
6980                        LD->getBasePtr(), LD->getPointerInfo(),
6981                        LD->getAlignment(), LD->getMemOperand()->getFlags());
6982 
6983   // We need to create a zextload/sextload. We cannot just create a load
6984   // followed by a zext/zext node because LowerMUL is also run during normal
6985   // operation legalization where we can't create illegal types.
6986   return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
6987                         LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
6988                         LD->getMemoryVT(), LD->getAlignment(),
6989                         LD->getMemOperand()->getFlags());
6990 }
6991 
6992 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
6993 /// extending load, or BUILD_VECTOR with extended elements, return the
6994 /// unextended value. The unextended vector should be 64 bits so that it can
6995 /// be used as an operand to a VMULL instruction. If the original vector size
6996 /// before extension is less than 64 bits we add a an extension to resize
6997 /// the vector to 64 bits.
6998 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
6999   if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
7000     return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
7001                                         N->getOperand(0)->getValueType(0),
7002                                         N->getValueType(0),
7003                                         N->getOpcode());
7004 
7005   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
7006     assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&
7007            "Expected extending load");
7008 
7009     SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG);
7010     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1));
7011     unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
7012     SDValue extLoad =
7013         DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad);
7014     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad);
7015 
7016     return newLoad;
7017   }
7018 
7019   // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
7020   // have been legalized as a BITCAST from v4i32.
7021   if (N->getOpcode() == ISD::BITCAST) {
7022     SDNode *BVN = N->getOperand(0).getNode();
7023     assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
7024            BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
7025     unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
7026     return DAG.getBuildVector(
7027         MVT::v2i32, SDLoc(N),
7028         {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
7029   }
7030   // Construct a new BUILD_VECTOR with elements truncated to half the size.
7031   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
7032   EVT VT = N->getValueType(0);
7033   unsigned EltSize = VT.getScalarSizeInBits() / 2;
7034   unsigned NumElts = VT.getVectorNumElements();
7035   MVT TruncVT = MVT::getIntegerVT(EltSize);
7036   SmallVector<SDValue, 8> Ops;
7037   SDLoc dl(N);
7038   for (unsigned i = 0; i != NumElts; ++i) {
7039     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
7040     const APInt &CInt = C->getAPIntValue();
7041     // Element types smaller than 32 bits are not legal, so use i32 elements.
7042     // The values are implicitly truncated so sext vs. zext doesn't matter.
7043     Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
7044   }
7045   return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
7046 }
7047 
7048 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
7049   unsigned Opcode = N->getOpcode();
7050   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7051     SDNode *N0 = N->getOperand(0).getNode();
7052     SDNode *N1 = N->getOperand(1).getNode();
7053     return N0->hasOneUse() && N1->hasOneUse() &&
7054       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
7055   }
7056   return false;
7057 }
7058 
7059 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
7060   unsigned Opcode = N->getOpcode();
7061   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7062     SDNode *N0 = N->getOperand(0).getNode();
7063     SDNode *N1 = N->getOperand(1).getNode();
7064     return N0->hasOneUse() && N1->hasOneUse() &&
7065       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
7066   }
7067   return false;
7068 }
7069 
7070 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
7071   // Multiplications are only custom-lowered for 128-bit vectors so that
7072   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
7073   EVT VT = Op.getValueType();
7074   assert(VT.is128BitVector() && VT.isInteger() &&
7075          "unexpected type for custom-lowering ISD::MUL");
7076   SDNode *N0 = Op.getOperand(0).getNode();
7077   SDNode *N1 = Op.getOperand(1).getNode();
7078   unsigned NewOpc = 0;
7079   bool isMLA = false;
7080   bool isN0SExt = isSignExtended(N0, DAG);
7081   bool isN1SExt = isSignExtended(N1, DAG);
7082   if (isN0SExt && isN1SExt)
7083     NewOpc = ARMISD::VMULLs;
7084   else {
7085     bool isN0ZExt = isZeroExtended(N0, DAG);
7086     bool isN1ZExt = isZeroExtended(N1, DAG);
7087     if (isN0ZExt && isN1ZExt)
7088       NewOpc = ARMISD::VMULLu;
7089     else if (isN1SExt || isN1ZExt) {
7090       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
7091       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
7092       if (isN1SExt && isAddSubSExt(N0, DAG)) {
7093         NewOpc = ARMISD::VMULLs;
7094         isMLA = true;
7095       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
7096         NewOpc = ARMISD::VMULLu;
7097         isMLA = true;
7098       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
7099         std::swap(N0, N1);
7100         NewOpc = ARMISD::VMULLu;
7101         isMLA = true;
7102       }
7103     }
7104 
7105     if (!NewOpc) {
7106       if (VT == MVT::v2i64)
7107         // Fall through to expand this.  It is not legal.
7108         return SDValue();
7109       else
7110         // Other vector multiplications are legal.
7111         return Op;
7112     }
7113   }
7114 
7115   // Legalize to a VMULL instruction.
7116   SDLoc DL(Op);
7117   SDValue Op0;
7118   SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
7119   if (!isMLA) {
7120     Op0 = SkipExtensionForVMULL(N0, DAG);
7121     assert(Op0.getValueType().is64BitVector() &&
7122            Op1.getValueType().is64BitVector() &&
7123            "unexpected types for extended operands to VMULL");
7124     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
7125   }
7126 
7127   // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
7128   // isel lowering to take advantage of no-stall back to back vmul + vmla.
7129   //   vmull q0, d4, d6
7130   //   vmlal q0, d5, d6
7131   // is faster than
7132   //   vaddl q0, d4, d5
7133   //   vmovl q1, d6
7134   //   vmul  q0, q0, q1
7135   SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
7136   SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
7137   EVT Op1VT = Op1.getValueType();
7138   return DAG.getNode(N0->getOpcode(), DL, VT,
7139                      DAG.getNode(NewOpc, DL, VT,
7140                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
7141                      DAG.getNode(NewOpc, DL, VT,
7142                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
7143 }
7144 
7145 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
7146                               SelectionDAG &DAG) {
7147   // TODO: Should this propagate fast-math-flags?
7148 
7149   // Convert to float
7150   // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
7151   // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
7152   X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
7153   Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
7154   X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
7155   Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
7156   // Get reciprocal estimate.
7157   // float4 recip = vrecpeq_f32(yf);
7158   Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7159                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7160                    Y);
7161   // Because char has a smaller range than uchar, we can actually get away
7162   // without any newton steps.  This requires that we use a weird bias
7163   // of 0xb000, however (again, this has been exhaustively tested).
7164   // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
7165   X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
7166   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
7167   Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
7168   X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
7169   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
7170   // Convert back to short.
7171   X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
7172   X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
7173   return X;
7174 }
7175 
7176 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
7177                                SelectionDAG &DAG) {
7178   // TODO: Should this propagate fast-math-flags?
7179 
7180   SDValue N2;
7181   // Convert to float.
7182   // float4 yf = vcvt_f32_s32(vmovl_s16(y));
7183   // float4 xf = vcvt_f32_s32(vmovl_s16(x));
7184   N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
7185   N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
7186   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7187   N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7188 
7189   // Use reciprocal estimate and one refinement step.
7190   // float4 recip = vrecpeq_f32(yf);
7191   // recip *= vrecpsq_f32(yf, recip);
7192   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7193                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7194                    N1);
7195   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7196                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7197                    N1, N2);
7198   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7199   // Because short has a smaller range than ushort, we can actually get away
7200   // with only a single newton step.  This requires that we use a weird bias
7201   // of 89, however (again, this has been exhaustively tested).
7202   // float4 result = as_float4(as_int4(xf*recip) + 0x89);
7203   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7204   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7205   N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
7206   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7207   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7208   // Convert back to integer and return.
7209   // return vmovn_s32(vcvt_s32_f32(result));
7210   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7211   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7212   return N0;
7213 }
7214 
7215 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
7216   EVT VT = Op.getValueType();
7217   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7218          "unexpected type for custom-lowering ISD::SDIV");
7219 
7220   SDLoc dl(Op);
7221   SDValue N0 = Op.getOperand(0);
7222   SDValue N1 = Op.getOperand(1);
7223   SDValue N2, N3;
7224 
7225   if (VT == MVT::v8i8) {
7226     N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
7227     N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
7228 
7229     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7230                      DAG.getIntPtrConstant(4, dl));
7231     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7232                      DAG.getIntPtrConstant(4, dl));
7233     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7234                      DAG.getIntPtrConstant(0, dl));
7235     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7236                      DAG.getIntPtrConstant(0, dl));
7237 
7238     N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
7239     N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
7240 
7241     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7242     N0 = LowerCONCAT_VECTORS(N0, DAG);
7243 
7244     N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
7245     return N0;
7246   }
7247   return LowerSDIV_v4i16(N0, N1, dl, DAG);
7248 }
7249 
7250 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
7251   // TODO: Should this propagate fast-math-flags?
7252   EVT VT = Op.getValueType();
7253   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7254          "unexpected type for custom-lowering ISD::UDIV");
7255 
7256   SDLoc dl(Op);
7257   SDValue N0 = Op.getOperand(0);
7258   SDValue N1 = Op.getOperand(1);
7259   SDValue N2, N3;
7260 
7261   if (VT == MVT::v8i8) {
7262     N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
7263     N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
7264 
7265     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7266                      DAG.getIntPtrConstant(4, dl));
7267     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7268                      DAG.getIntPtrConstant(4, dl));
7269     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7270                      DAG.getIntPtrConstant(0, dl));
7271     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7272                      DAG.getIntPtrConstant(0, dl));
7273 
7274     N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
7275     N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
7276 
7277     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7278     N0 = LowerCONCAT_VECTORS(N0, DAG);
7279 
7280     N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
7281                      DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
7282                                      MVT::i32),
7283                      N0);
7284     return N0;
7285   }
7286 
7287   // v4i16 sdiv ... Convert to float.
7288   // float4 yf = vcvt_f32_s32(vmovl_u16(y));
7289   // float4 xf = vcvt_f32_s32(vmovl_u16(x));
7290   N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
7291   N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
7292   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7293   SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7294 
7295   // Use reciprocal estimate and two refinement steps.
7296   // float4 recip = vrecpeq_f32(yf);
7297   // recip *= vrecpsq_f32(yf, recip);
7298   // recip *= vrecpsq_f32(yf, recip);
7299   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7300                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7301                    BN1);
7302   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7303                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7304                    BN1, N2);
7305   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7306   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7307                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7308                    BN1, N2);
7309   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7310   // Simply multiplying by the reciprocal estimate can leave us a few ulps
7311   // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
7312   // and that it will never cause us to return an answer too large).
7313   // float4 result = as_float4(as_int4(xf*recip) + 2);
7314   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7315   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7316   N1 = DAG.getConstant(2, dl, MVT::v4i32);
7317   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7318   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7319   // Convert back to integer and return.
7320   // return vmovn_u32(vcvt_s32_f32(result));
7321   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7322   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7323   return N0;
7324 }
7325 
7326 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
7327   EVT VT = Op.getNode()->getValueType(0);
7328   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
7329 
7330   unsigned Opc;
7331   bool ExtraOp = false;
7332   switch (Op.getOpcode()) {
7333   default: llvm_unreachable("Invalid code");
7334   case ISD::ADDC: Opc = ARMISD::ADDC; break;
7335   case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break;
7336   case ISD::SUBC: Opc = ARMISD::SUBC; break;
7337   case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break;
7338   }
7339 
7340   if (!ExtraOp)
7341     return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
7342                        Op.getOperand(1));
7343   return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
7344                      Op.getOperand(1), Op.getOperand(2));
7345 }
7346 
7347 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
7348   assert(Subtarget->isTargetDarwin());
7349 
7350   // For iOS, we want to call an alternative entry point: __sincos_stret,
7351   // return values are passed via sret.
7352   SDLoc dl(Op);
7353   SDValue Arg = Op.getOperand(0);
7354   EVT ArgVT = Arg.getValueType();
7355   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
7356   auto PtrVT = getPointerTy(DAG.getDataLayout());
7357 
7358   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7359   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7360 
7361   // Pair of floats / doubles used to pass the result.
7362   Type *RetTy = StructType::get(ArgTy, ArgTy);
7363   auto &DL = DAG.getDataLayout();
7364 
7365   ArgListTy Args;
7366   bool ShouldUseSRet = Subtarget->isAPCS_ABI();
7367   SDValue SRet;
7368   if (ShouldUseSRet) {
7369     // Create stack object for sret.
7370     const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
7371     const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy);
7372     int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
7373     SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));
7374 
7375     ArgListEntry Entry;
7376     Entry.Node = SRet;
7377     Entry.Ty = RetTy->getPointerTo();
7378     Entry.IsSExt = false;
7379     Entry.IsZExt = false;
7380     Entry.IsSRet = true;
7381     Args.push_back(Entry);
7382     RetTy = Type::getVoidTy(*DAG.getContext());
7383   }
7384 
7385   ArgListEntry Entry;
7386   Entry.Node = Arg;
7387   Entry.Ty = ArgTy;
7388   Entry.IsSExt = false;
7389   Entry.IsZExt = false;
7390   Args.push_back(Entry);
7391 
7392   const char *LibcallName =
7393       (ArgVT == MVT::f64) ? "__sincos_stret" : "__sincosf_stret";
7394   RTLIB::Libcall LC =
7395       (ArgVT == MVT::f64) ? RTLIB::SINCOS_F64 : RTLIB::SINCOS_F32;
7396   CallingConv::ID CC = getLibcallCallingConv(LC);
7397   SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));
7398 
7399   TargetLowering::CallLoweringInfo CLI(DAG);
7400   CLI.setDebugLoc(dl)
7401       .setChain(DAG.getEntryNode())
7402       .setCallee(CC, RetTy, Callee, std::move(Args))
7403       .setDiscardResult(ShouldUseSRet);
7404   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
7405 
7406   if (!ShouldUseSRet)
7407     return CallResult.first;
7408 
7409   SDValue LoadSin =
7410       DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
7411 
7412   // Address of cos field.
7413   SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
7414                             DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
7415   SDValue LoadCos =
7416       DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
7417 
7418   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
7419   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
7420                      LoadSin.getValue(0), LoadCos.getValue(0));
7421 }
7422 
7423 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
7424                                                   bool Signed,
7425                                                   SDValue &Chain) const {
7426   EVT VT = Op.getValueType();
7427   assert((VT == MVT::i32 || VT == MVT::i64) &&
7428          "unexpected type for custom lowering DIV");
7429   SDLoc dl(Op);
7430 
7431   const auto &DL = DAG.getDataLayout();
7432   const auto &TLI = DAG.getTargetLoweringInfo();
7433 
7434   const char *Name = nullptr;
7435   if (Signed)
7436     Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
7437   else
7438     Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";
7439 
7440   SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));
7441 
7442   ARMTargetLowering::ArgListTy Args;
7443 
7444   for (auto AI : {1, 0}) {
7445     ArgListEntry Arg;
7446     Arg.Node = Op.getOperand(AI);
7447     Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
7448     Args.push_back(Arg);
7449   }
7450 
7451   CallLoweringInfo CLI(DAG);
7452   CLI.setDebugLoc(dl)
7453     .setChain(Chain)
7454     .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
7455                ES, std::move(Args));
7456 
7457   return LowerCallTo(CLI).first;
7458 }
7459 
7460 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
7461                                             bool Signed) const {
7462   assert(Op.getValueType() == MVT::i32 &&
7463          "unexpected type for custom lowering DIV");
7464   SDLoc dl(Op);
7465 
7466   SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
7467                                DAG.getEntryNode(), Op.getOperand(1));
7468 
7469   return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7470 }
7471 
7472 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
7473   SDLoc DL(N);
7474   SDValue Op = N->getOperand(1);
7475   if (N->getValueType(0) == MVT::i32)
7476     return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
7477   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
7478                            DAG.getConstant(0, DL, MVT::i32));
7479   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
7480                            DAG.getConstant(1, DL, MVT::i32));
7481   return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
7482                      DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
7483 }
7484 
7485 void ARMTargetLowering::ExpandDIV_Windows(
7486     SDValue Op, SelectionDAG &DAG, bool Signed,
7487     SmallVectorImpl<SDValue> &Results) const {
7488   const auto &DL = DAG.getDataLayout();
7489   const auto &TLI = DAG.getTargetLoweringInfo();
7490 
7491   assert(Op.getValueType() == MVT::i64 &&
7492          "unexpected type for custom lowering DIV");
7493   SDLoc dl(Op);
7494 
7495   SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());
7496 
7497   SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7498 
7499   SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
7500   SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
7501                               DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
7502   Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);
7503 
7504   Results.push_back(Lower);
7505   Results.push_back(Upper);
7506 }
7507 
7508 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
7509   if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
7510     // Acquire/Release load/store is not legal for targets without a dmb or
7511     // equivalent available.
7512     return SDValue();
7513 
7514   // Monotonic load/store is legal for all targets.
7515   return Op;
7516 }
7517 
7518 static void ReplaceREADCYCLECOUNTER(SDNode *N,
7519                                     SmallVectorImpl<SDValue> &Results,
7520                                     SelectionDAG &DAG,
7521                                     const ARMSubtarget *Subtarget) {
7522   SDLoc DL(N);
7523   // Under Power Management extensions, the cycle-count is:
7524   //    mrc p15, #0, <Rt>, c9, c13, #0
7525   SDValue Ops[] = { N->getOperand(0), // Chain
7526                     DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
7527                     DAG.getConstant(15, DL, MVT::i32),
7528                     DAG.getConstant(0, DL, MVT::i32),
7529                     DAG.getConstant(9, DL, MVT::i32),
7530                     DAG.getConstant(13, DL, MVT::i32),
7531                     DAG.getConstant(0, DL, MVT::i32)
7532   };
7533 
7534   SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
7535                                  DAG.getVTList(MVT::i32, MVT::Other), Ops);
7536   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
7537                                 DAG.getConstant(0, DL, MVT::i32)));
7538   Results.push_back(Cycles32.getValue(1));
7539 }
7540 
7541 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
7542   SDLoc dl(V.getNode());
7543   SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
7544   SDValue VHi = DAG.getAnyExtOrTrunc(
7545       DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
7546       dl, MVT::i32);
7547   SDValue RegClass =
7548       DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
7549   SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
7550   SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
7551   const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
7552   return SDValue(
7553       DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
7554 }
7555 
7556 static void ReplaceCMP_SWAP_64Results(SDNode *N,
7557                                        SmallVectorImpl<SDValue> & Results,
7558                                        SelectionDAG &DAG) {
7559   assert(N->getValueType(0) == MVT::i64 &&
7560          "AtomicCmpSwap on types less than 64 should be legal");
7561   SDValue Ops[] = {N->getOperand(1),
7562                    createGPRPairNode(DAG, N->getOperand(2)),
7563                    createGPRPairNode(DAG, N->getOperand(3)),
7564                    N->getOperand(0)};
7565   SDNode *CmpSwap = DAG.getMachineNode(
7566       ARM::CMP_SWAP_64, SDLoc(N),
7567       DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
7568 
7569   MachineFunction &MF = DAG.getMachineFunction();
7570   MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1);
7571   MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
7572   cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
7573 
7574   Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_0, SDLoc(N), MVT::i32,
7575                                                SDValue(CmpSwap, 0)));
7576   Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_1, SDLoc(N), MVT::i32,
7577                                                SDValue(CmpSwap, 0)));
7578   Results.push_back(SDValue(CmpSwap, 2));
7579 }
7580 
7581 static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget,
7582                           SelectionDAG &DAG) {
7583   const auto &TLI = DAG.getTargetLoweringInfo();
7584 
7585   assert(Subtarget.getTargetTriple().isOSMSVCRT() &&
7586          "Custom lowering is MSVCRT specific!");
7587 
7588   SDLoc dl(Op);
7589   SDValue Val = Op.getOperand(0);
7590   MVT Ty = Val->getSimpleValueType(0);
7591   SDValue Exponent = DAG.getNode(ISD::SINT_TO_FP, dl, Ty, Op.getOperand(1));
7592   SDValue Callee = DAG.getExternalSymbol(Ty == MVT::f32 ? "powf" : "pow",
7593                                          TLI.getPointerTy(DAG.getDataLayout()));
7594 
7595   TargetLowering::ArgListTy Args;
7596   TargetLowering::ArgListEntry Entry;
7597 
7598   Entry.Node = Val;
7599   Entry.Ty = Val.getValueType().getTypeForEVT(*DAG.getContext());
7600   Entry.IsZExt = true;
7601   Args.push_back(Entry);
7602 
7603   Entry.Node = Exponent;
7604   Entry.Ty = Exponent.getValueType().getTypeForEVT(*DAG.getContext());
7605   Entry.IsZExt = true;
7606   Args.push_back(Entry);
7607 
7608   Type *LCRTy = Val.getValueType().getTypeForEVT(*DAG.getContext());
7609 
7610   // In the in-chain to the call is the entry node  If we are emitting a
7611   // tailcall, the chain will be mutated if the node has a non-entry input
7612   // chain.
7613   SDValue InChain = DAG.getEntryNode();
7614   SDValue TCChain = InChain;
7615 
7616   const auto *F = DAG.getMachineFunction().getFunction();
7617   bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) &&
7618               F->getReturnType() == LCRTy;
7619   if (IsTC)
7620     InChain = TCChain;
7621 
7622   TargetLowering::CallLoweringInfo CLI(DAG);
7623   CLI.setDebugLoc(dl)
7624       .setChain(InChain)
7625       .setCallee(CallingConv::ARM_AAPCS_VFP, LCRTy, Callee, std::move(Args))
7626       .setTailCall(IsTC);
7627   std::pair<SDValue, SDValue> CI = TLI.LowerCallTo(CLI);
7628 
7629   // Return the chain (the DAG root) if it is a tail call
7630   return !CI.second.getNode() ? DAG.getRoot() : CI.first;
7631 }
7632 
7633 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
7634   switch (Op.getOpcode()) {
7635   default: llvm_unreachable("Don't know how to custom lower this!");
7636   case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
7637   case ISD::ConstantPool:
7638     if (Subtarget->genExecuteOnly())
7639       llvm_unreachable("execute-only should not generate constant pools");
7640     return LowerConstantPool(Op, DAG);
7641   case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
7642   case ISD::GlobalAddress:
7643     switch (Subtarget->getTargetTriple().getObjectFormat()) {
7644     default: llvm_unreachable("unknown object format");
7645     case Triple::COFF:
7646       return LowerGlobalAddressWindows(Op, DAG);
7647     case Triple::ELF:
7648       return LowerGlobalAddressELF(Op, DAG);
7649     case Triple::MachO:
7650       return LowerGlobalAddressDarwin(Op, DAG);
7651     }
7652   case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
7653   case ISD::SELECT:        return LowerSELECT(Op, DAG);
7654   case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
7655   case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
7656   case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
7657   case ISD::VASTART:       return LowerVASTART(Op, DAG);
7658   case ISD::ATOMIC_FENCE:  return LowerATOMIC_FENCE(Op, DAG, Subtarget);
7659   case ISD::PREFETCH:      return LowerPREFETCH(Op, DAG, Subtarget);
7660   case ISD::SINT_TO_FP:
7661   case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
7662   case ISD::FP_TO_SINT:
7663   case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
7664   case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
7665   case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
7666   case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
7667   case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
7668   case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
7669   case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
7670   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
7671                                                                Subtarget);
7672   case ISD::BITCAST:       return ExpandBITCAST(Op.getNode(), DAG);
7673   case ISD::SHL:
7674   case ISD::SRL:
7675   case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
7676   case ISD::SREM:          return LowerREM(Op.getNode(), DAG);
7677   case ISD::UREM:          return LowerREM(Op.getNode(), DAG);
7678   case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
7679   case ISD::SRL_PARTS:
7680   case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
7681   case ISD::CTTZ:
7682   case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
7683   case ISD::CTPOP:         return LowerCTPOP(Op.getNode(), DAG, Subtarget);
7684   case ISD::SETCC:         return LowerVSETCC(Op, DAG);
7685   case ISD::SETCCE:        return LowerSETCCE(Op, DAG);
7686   case ISD::ConstantFP:    return LowerConstantFP(Op, DAG, Subtarget);
7687   case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);
7688   case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
7689   case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
7690   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
7691   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
7692   case ISD::FLT_ROUNDS_:   return LowerFLT_ROUNDS_(Op, DAG);
7693   case ISD::MUL:           return LowerMUL(Op, DAG);
7694   case ISD::SDIV:
7695     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
7696       return LowerDIV_Windows(Op, DAG, /* Signed */ true);
7697     return LowerSDIV(Op, DAG);
7698   case ISD::UDIV:
7699     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
7700       return LowerDIV_Windows(Op, DAG, /* Signed */ false);
7701     return LowerUDIV(Op, DAG);
7702   case ISD::ADDC:
7703   case ISD::ADDE:
7704   case ISD::SUBC:
7705   case ISD::SUBE:          return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
7706   case ISD::SADDO:
7707   case ISD::UADDO:
7708   case ISD::SSUBO:
7709   case ISD::USUBO:
7710     return LowerXALUO(Op, DAG);
7711   case ISD::ATOMIC_LOAD:
7712   case ISD::ATOMIC_STORE:  return LowerAtomicLoadStore(Op, DAG);
7713   case ISD::FSINCOS:       return LowerFSINCOS(Op, DAG);
7714   case ISD::SDIVREM:
7715   case ISD::UDIVREM:       return LowerDivRem(Op, DAG);
7716   case ISD::DYNAMIC_STACKALLOC:
7717     if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
7718       return LowerDYNAMIC_STACKALLOC(Op, DAG);
7719     llvm_unreachable("Don't know how to custom lower this!");
7720   case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
7721   case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
7722   case ISD::FPOWI: return LowerFPOWI(Op, *Subtarget, DAG);
7723   case ARMISD::WIN__DBZCHK: return SDValue();
7724   }
7725 }
7726 
7727 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
7728                                  SelectionDAG &DAG) {
7729   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7730   unsigned Opc = 0;
7731   if (IntNo == Intrinsic::arm_smlald)
7732     Opc = ARMISD::SMLALD;
7733   else if (IntNo == Intrinsic::arm_smlaldx)
7734     Opc = ARMISD::SMLALDX;
7735   else if (IntNo == Intrinsic::arm_smlsld)
7736     Opc = ARMISD::SMLSLD;
7737   else if (IntNo == Intrinsic::arm_smlsldx)
7738     Opc = ARMISD::SMLSLDX;
7739   else
7740     return;
7741 
7742   SDLoc dl(N);
7743   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
7744                            N->getOperand(3),
7745                            DAG.getConstant(0, dl, MVT::i32));
7746   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
7747                            N->getOperand(3),
7748                            DAG.getConstant(1, dl, MVT::i32));
7749 
7750   SDValue LongMul = DAG.getNode(Opc, dl,
7751                                 DAG.getVTList(MVT::i32, MVT::i32),
7752                                 N->getOperand(1), N->getOperand(2),
7753                                 Lo, Hi);
7754   Results.push_back(LongMul.getValue(0));
7755   Results.push_back(LongMul.getValue(1));
7756 }
7757 
7758 /// ReplaceNodeResults - Replace the results of node with an illegal result
7759 /// type with new values built out of custom code.
7760 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
7761                                            SmallVectorImpl<SDValue> &Results,
7762                                            SelectionDAG &DAG) const {
7763   SDValue Res;
7764   switch (N->getOpcode()) {
7765   default:
7766     llvm_unreachable("Don't know how to custom expand this!");
7767   case ISD::READ_REGISTER:
7768     ExpandREAD_REGISTER(N, Results, DAG);
7769     break;
7770   case ISD::BITCAST:
7771     Res = ExpandBITCAST(N, DAG);
7772     break;
7773   case ISD::SRL:
7774   case ISD::SRA:
7775     Res = Expand64BitShift(N, DAG, Subtarget);
7776     break;
7777   case ISD::SREM:
7778   case ISD::UREM:
7779     Res = LowerREM(N, DAG);
7780     break;
7781   case ISD::SDIVREM:
7782   case ISD::UDIVREM:
7783     Res = LowerDivRem(SDValue(N, 0), DAG);
7784     assert(Res.getNumOperands() == 2 && "DivRem needs two values");
7785     Results.push_back(Res.getValue(0));
7786     Results.push_back(Res.getValue(1));
7787     return;
7788   case ISD::READCYCLECOUNTER:
7789     ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
7790     return;
7791   case ISD::UDIV:
7792   case ISD::SDIV:
7793     assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
7794     return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
7795                              Results);
7796   case ISD::ATOMIC_CMP_SWAP:
7797     ReplaceCMP_SWAP_64Results(N, Results, DAG);
7798     return;
7799   case ISD::INTRINSIC_WO_CHAIN:
7800     return ReplaceLongIntrinsic(N, Results, DAG);
7801   }
7802   if (Res.getNode())
7803     Results.push_back(Res);
7804 }
7805 
7806 //===----------------------------------------------------------------------===//
7807 //                           ARM Scheduler Hooks
7808 //===----------------------------------------------------------------------===//
7809 
7810 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
7811 /// registers the function context.
7812 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
7813                                                MachineBasicBlock *MBB,
7814                                                MachineBasicBlock *DispatchBB,
7815                                                int FI) const {
7816   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
7817          "ROPI/RWPI not currently supported with SjLj");
7818   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
7819   DebugLoc dl = MI.getDebugLoc();
7820   MachineFunction *MF = MBB->getParent();
7821   MachineRegisterInfo *MRI = &MF->getRegInfo();
7822   MachineConstantPool *MCP = MF->getConstantPool();
7823   ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
7824   const Function *F = MF->getFunction();
7825 
7826   bool isThumb = Subtarget->isThumb();
7827   bool isThumb2 = Subtarget->isThumb2();
7828 
7829   unsigned PCLabelId = AFI->createPICLabelUId();
7830   unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
7831   ARMConstantPoolValue *CPV =
7832     ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
7833   unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
7834 
7835   const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
7836                                            : &ARM::GPRRegClass;
7837 
7838   // Grab constant pool and fixed stack memory operands.
7839   MachineMemOperand *CPMMO =
7840       MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
7841                                MachineMemOperand::MOLoad, 4, 4);
7842 
7843   MachineMemOperand *FIMMOSt =
7844       MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
7845                                MachineMemOperand::MOStore, 4, 4);
7846 
7847   // Load the address of the dispatch MBB into the jump buffer.
7848   if (isThumb2) {
7849     // Incoming value: jbuf
7850     //   ldr.n  r5, LCPI1_1
7851     //   orr    r5, r5, #1
7852     //   add    r5, pc
7853     //   str    r5, [$jbuf, #+4] ; &jbuf[1]
7854     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7855     BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
7856         .addConstantPoolIndex(CPI)
7857         .addMemOperand(CPMMO)
7858         .add(predOps(ARMCC::AL));
7859     // Set the low bit because of thumb mode.
7860     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7861     BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
7862         .addReg(NewVReg1, RegState::Kill)
7863         .addImm(0x01)
7864         .add(predOps(ARMCC::AL))
7865         .add(condCodeOp());
7866     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
7867     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
7868       .addReg(NewVReg2, RegState::Kill)
7869       .addImm(PCLabelId);
7870     BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
7871         .addReg(NewVReg3, RegState::Kill)
7872         .addFrameIndex(FI)
7873         .addImm(36) // &jbuf[1] :: pc
7874         .addMemOperand(FIMMOSt)
7875         .add(predOps(ARMCC::AL));
7876   } else if (isThumb) {
7877     // Incoming value: jbuf
7878     //   ldr.n  r1, LCPI1_4
7879     //   add    r1, pc
7880     //   mov    r2, #1
7881     //   orrs   r1, r2
7882     //   add    r2, $jbuf, #+4 ; &jbuf[1]
7883     //   str    r1, [r2]
7884     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7885     BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
7886         .addConstantPoolIndex(CPI)
7887         .addMemOperand(CPMMO)
7888         .add(predOps(ARMCC::AL));
7889     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7890     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
7891       .addReg(NewVReg1, RegState::Kill)
7892       .addImm(PCLabelId);
7893     // Set the low bit because of thumb mode.
7894     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
7895     BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
7896         .addReg(ARM::CPSR, RegState::Define)
7897         .addImm(1)
7898         .add(predOps(ARMCC::AL));
7899     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
7900     BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
7901         .addReg(ARM::CPSR, RegState::Define)
7902         .addReg(NewVReg2, RegState::Kill)
7903         .addReg(NewVReg3, RegState::Kill)
7904         .add(predOps(ARMCC::AL));
7905     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
7906     BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
7907             .addFrameIndex(FI)
7908             .addImm(36); // &jbuf[1] :: pc
7909     BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
7910         .addReg(NewVReg4, RegState::Kill)
7911         .addReg(NewVReg5, RegState::Kill)
7912         .addImm(0)
7913         .addMemOperand(FIMMOSt)
7914         .add(predOps(ARMCC::AL));
7915   } else {
7916     // Incoming value: jbuf
7917     //   ldr  r1, LCPI1_1
7918     //   add  r1, pc, r1
7919     //   str  r1, [$jbuf, #+4] ; &jbuf[1]
7920     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7921     BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
7922         .addConstantPoolIndex(CPI)
7923         .addImm(0)
7924         .addMemOperand(CPMMO)
7925         .add(predOps(ARMCC::AL));
7926     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7927     BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
7928         .addReg(NewVReg1, RegState::Kill)
7929         .addImm(PCLabelId)
7930         .add(predOps(ARMCC::AL));
7931     BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
7932         .addReg(NewVReg2, RegState::Kill)
7933         .addFrameIndex(FI)
7934         .addImm(36) // &jbuf[1] :: pc
7935         .addMemOperand(FIMMOSt)
7936         .add(predOps(ARMCC::AL));
7937   }
7938 }
7939 
7940 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
7941                                               MachineBasicBlock *MBB) const {
7942   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
7943   DebugLoc dl = MI.getDebugLoc();
7944   MachineFunction *MF = MBB->getParent();
7945   MachineRegisterInfo *MRI = &MF->getRegInfo();
7946   MachineFrameInfo &MFI = MF->getFrameInfo();
7947   int FI = MFI.getFunctionContextIndex();
7948 
7949   const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
7950                                                         : &ARM::GPRnopcRegClass;
7951 
7952   // Get a mapping of the call site numbers to all of the landing pads they're
7953   // associated with.
7954   DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad;
7955   unsigned MaxCSNum = 0;
7956   for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
7957        ++BB) {
7958     if (!BB->isEHPad()) continue;
7959 
7960     // FIXME: We should assert that the EH_LABEL is the first MI in the landing
7961     // pad.
7962     for (MachineBasicBlock::iterator
7963            II = BB->begin(), IE = BB->end(); II != IE; ++II) {
7964       if (!II->isEHLabel()) continue;
7965 
7966       MCSymbol *Sym = II->getOperand(0).getMCSymbol();
7967       if (!MF->hasCallSiteLandingPad(Sym)) continue;
7968 
7969       SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
7970       for (SmallVectorImpl<unsigned>::iterator
7971              CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
7972            CSI != CSE; ++CSI) {
7973         CallSiteNumToLPad[*CSI].push_back(&*BB);
7974         MaxCSNum = std::max(MaxCSNum, *CSI);
7975       }
7976       break;
7977     }
7978   }
7979 
7980   // Get an ordered list of the machine basic blocks for the jump table.
7981   std::vector<MachineBasicBlock*> LPadList;
7982   SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
7983   LPadList.reserve(CallSiteNumToLPad.size());
7984   for (unsigned I = 1; I <= MaxCSNum; ++I) {
7985     SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
7986     for (SmallVectorImpl<MachineBasicBlock*>::iterator
7987            II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
7988       LPadList.push_back(*II);
7989       InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
7990     }
7991   }
7992 
7993   assert(!LPadList.empty() &&
7994          "No landing pad destinations for the dispatch jump table!");
7995 
7996   // Create the jump table and associated information.
7997   MachineJumpTableInfo *JTI =
7998     MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
7999   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
8000 
8001   // Create the MBBs for the dispatch code.
8002 
8003   // Shove the dispatch's address into the return slot in the function context.
8004   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
8005   DispatchBB->setIsEHPad();
8006 
8007   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
8008   unsigned trap_opcode;
8009   if (Subtarget->isThumb())
8010     trap_opcode = ARM::tTRAP;
8011   else
8012     trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
8013 
8014   BuildMI(TrapBB, dl, TII->get(trap_opcode));
8015   DispatchBB->addSuccessor(TrapBB);
8016 
8017   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
8018   DispatchBB->addSuccessor(DispContBB);
8019 
8020   // Insert and MBBs.
8021   MF->insert(MF->end(), DispatchBB);
8022   MF->insert(MF->end(), DispContBB);
8023   MF->insert(MF->end(), TrapBB);
8024 
8025   // Insert code into the entry block that creates and registers the function
8026   // context.
8027   SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
8028 
8029   MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
8030       MachinePointerInfo::getFixedStack(*MF, FI),
8031       MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4);
8032 
8033   MachineInstrBuilder MIB;
8034   MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
8035 
8036   const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
8037   const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
8038 
8039   // Add a register mask with no preserved registers.  This results in all
8040   // registers being marked as clobbered. This can't work if the dispatch block
8041   // is in a Thumb1 function and is linked with ARM code which uses the FP
8042   // registers, as there is no way to preserve the FP registers in Thumb1 mode.
8043   MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));
8044 
8045   bool IsPositionIndependent = isPositionIndependent();
8046   unsigned NumLPads = LPadList.size();
8047   if (Subtarget->isThumb2()) {
8048     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8049     BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
8050         .addFrameIndex(FI)
8051         .addImm(4)
8052         .addMemOperand(FIMMOLd)
8053         .add(predOps(ARMCC::AL));
8054 
8055     if (NumLPads < 256) {
8056       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
8057           .addReg(NewVReg1)
8058           .addImm(LPadList.size())
8059           .add(predOps(ARMCC::AL));
8060     } else {
8061       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8062       BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
8063           .addImm(NumLPads & 0xFFFF)
8064           .add(predOps(ARMCC::AL));
8065 
8066       unsigned VReg2 = VReg1;
8067       if ((NumLPads & 0xFFFF0000) != 0) {
8068         VReg2 = MRI->createVirtualRegister(TRC);
8069         BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
8070             .addReg(VReg1)
8071             .addImm(NumLPads >> 16)
8072             .add(predOps(ARMCC::AL));
8073       }
8074 
8075       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
8076           .addReg(NewVReg1)
8077           .addReg(VReg2)
8078           .add(predOps(ARMCC::AL));
8079     }
8080 
8081     BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
8082       .addMBB(TrapBB)
8083       .addImm(ARMCC::HI)
8084       .addReg(ARM::CPSR);
8085 
8086     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8087     BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3)
8088         .addJumpTableIndex(MJTI)
8089         .add(predOps(ARMCC::AL));
8090 
8091     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8092     BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
8093         .addReg(NewVReg3, RegState::Kill)
8094         .addReg(NewVReg1)
8095         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
8096         .add(predOps(ARMCC::AL))
8097         .add(condCodeOp());
8098 
8099     BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
8100       .addReg(NewVReg4, RegState::Kill)
8101       .addReg(NewVReg1)
8102       .addJumpTableIndex(MJTI);
8103   } else if (Subtarget->isThumb()) {
8104     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8105     BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
8106         .addFrameIndex(FI)
8107         .addImm(1)
8108         .addMemOperand(FIMMOLd)
8109         .add(predOps(ARMCC::AL));
8110 
8111     if (NumLPads < 256) {
8112       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
8113           .addReg(NewVReg1)
8114           .addImm(NumLPads)
8115           .add(predOps(ARMCC::AL));
8116     } else {
8117       MachineConstantPool *ConstantPool = MF->getConstantPool();
8118       Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8119       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8120 
8121       // MachineConstantPool wants an explicit alignment.
8122       unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8123       if (Align == 0)
8124         Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8125       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8126 
8127       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8128       BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
8129           .addReg(VReg1, RegState::Define)
8130           .addConstantPoolIndex(Idx)
8131           .add(predOps(ARMCC::AL));
8132       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
8133           .addReg(NewVReg1)
8134           .addReg(VReg1)
8135           .add(predOps(ARMCC::AL));
8136     }
8137 
8138     BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
8139       .addMBB(TrapBB)
8140       .addImm(ARMCC::HI)
8141       .addReg(ARM::CPSR);
8142 
8143     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
8144     BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
8145         .addReg(ARM::CPSR, RegState::Define)
8146         .addReg(NewVReg1)
8147         .addImm(2)
8148         .add(predOps(ARMCC::AL));
8149 
8150     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8151     BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
8152         .addJumpTableIndex(MJTI)
8153         .add(predOps(ARMCC::AL));
8154 
8155     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8156     BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
8157         .addReg(ARM::CPSR, RegState::Define)
8158         .addReg(NewVReg2, RegState::Kill)
8159         .addReg(NewVReg3)
8160         .add(predOps(ARMCC::AL));
8161 
8162     MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8163         MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8164 
8165     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8166     BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
8167         .addReg(NewVReg4, RegState::Kill)
8168         .addImm(0)
8169         .addMemOperand(JTMMOLd)
8170         .add(predOps(ARMCC::AL));
8171 
8172     unsigned NewVReg6 = NewVReg5;
8173     if (IsPositionIndependent) {
8174       NewVReg6 = MRI->createVirtualRegister(TRC);
8175       BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
8176           .addReg(ARM::CPSR, RegState::Define)
8177           .addReg(NewVReg5, RegState::Kill)
8178           .addReg(NewVReg3)
8179           .add(predOps(ARMCC::AL));
8180     }
8181 
8182     BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
8183       .addReg(NewVReg6, RegState::Kill)
8184       .addJumpTableIndex(MJTI);
8185   } else {
8186     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8187     BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
8188         .addFrameIndex(FI)
8189         .addImm(4)
8190         .addMemOperand(FIMMOLd)
8191         .add(predOps(ARMCC::AL));
8192 
8193     if (NumLPads < 256) {
8194       BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
8195           .addReg(NewVReg1)
8196           .addImm(NumLPads)
8197           .add(predOps(ARMCC::AL));
8198     } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
8199       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8200       BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
8201           .addImm(NumLPads & 0xFFFF)
8202           .add(predOps(ARMCC::AL));
8203 
8204       unsigned VReg2 = VReg1;
8205       if ((NumLPads & 0xFFFF0000) != 0) {
8206         VReg2 = MRI->createVirtualRegister(TRC);
8207         BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
8208             .addReg(VReg1)
8209             .addImm(NumLPads >> 16)
8210             .add(predOps(ARMCC::AL));
8211       }
8212 
8213       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8214           .addReg(NewVReg1)
8215           .addReg(VReg2)
8216           .add(predOps(ARMCC::AL));
8217     } else {
8218       MachineConstantPool *ConstantPool = MF->getConstantPool();
8219       Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8220       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8221 
8222       // MachineConstantPool wants an explicit alignment.
8223       unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8224       if (Align == 0)
8225         Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8226       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8227 
8228       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8229       BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
8230           .addReg(VReg1, RegState::Define)
8231           .addConstantPoolIndex(Idx)
8232           .addImm(0)
8233           .add(predOps(ARMCC::AL));
8234       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8235           .addReg(NewVReg1)
8236           .addReg(VReg1, RegState::Kill)
8237           .add(predOps(ARMCC::AL));
8238     }
8239 
8240     BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
8241       .addMBB(TrapBB)
8242       .addImm(ARMCC::HI)
8243       .addReg(ARM::CPSR);
8244 
8245     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8246     BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
8247         .addReg(NewVReg1)
8248         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
8249         .add(predOps(ARMCC::AL))
8250         .add(condCodeOp());
8251     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8252     BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
8253         .addJumpTableIndex(MJTI)
8254         .add(predOps(ARMCC::AL));
8255 
8256     MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8257         MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8258     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8259     BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
8260         .addReg(NewVReg3, RegState::Kill)
8261         .addReg(NewVReg4)
8262         .addImm(0)
8263         .addMemOperand(JTMMOLd)
8264         .add(predOps(ARMCC::AL));
8265 
8266     if (IsPositionIndependent) {
8267       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
8268         .addReg(NewVReg5, RegState::Kill)
8269         .addReg(NewVReg4)
8270         .addJumpTableIndex(MJTI);
8271     } else {
8272       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
8273         .addReg(NewVReg5, RegState::Kill)
8274         .addJumpTableIndex(MJTI);
8275     }
8276   }
8277 
8278   // Add the jump table entries as successors to the MBB.
8279   SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
8280   for (std::vector<MachineBasicBlock*>::iterator
8281          I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
8282     MachineBasicBlock *CurMBB = *I;
8283     if (SeenMBBs.insert(CurMBB).second)
8284       DispContBB->addSuccessor(CurMBB);
8285   }
8286 
8287   // N.B. the order the invoke BBs are processed in doesn't matter here.
8288   const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
8289   SmallVector<MachineBasicBlock*, 64> MBBLPads;
8290   for (MachineBasicBlock *BB : InvokeBBs) {
8291 
8292     // Remove the landing pad successor from the invoke block and replace it
8293     // with the new dispatch block.
8294     SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
8295                                                   BB->succ_end());
8296     while (!Successors.empty()) {
8297       MachineBasicBlock *SMBB = Successors.pop_back_val();
8298       if (SMBB->isEHPad()) {
8299         BB->removeSuccessor(SMBB);
8300         MBBLPads.push_back(SMBB);
8301       }
8302     }
8303 
8304     BB->addSuccessor(DispatchBB, BranchProbability::getZero());
8305     BB->normalizeSuccProbs();
8306 
8307     // Find the invoke call and mark all of the callee-saved registers as
8308     // 'implicit defined' so that they're spilled. This prevents code from
8309     // moving instructions to before the EH block, where they will never be
8310     // executed.
8311     for (MachineBasicBlock::reverse_iterator
8312            II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
8313       if (!II->isCall()) continue;
8314 
8315       DenseMap<unsigned, bool> DefRegs;
8316       for (MachineInstr::mop_iterator
8317              OI = II->operands_begin(), OE = II->operands_end();
8318            OI != OE; ++OI) {
8319         if (!OI->isReg()) continue;
8320         DefRegs[OI->getReg()] = true;
8321       }
8322 
8323       MachineInstrBuilder MIB(*MF, &*II);
8324 
8325       for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
8326         unsigned Reg = SavedRegs[i];
8327         if (Subtarget->isThumb2() &&
8328             !ARM::tGPRRegClass.contains(Reg) &&
8329             !ARM::hGPRRegClass.contains(Reg))
8330           continue;
8331         if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
8332           continue;
8333         if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
8334           continue;
8335         if (!DefRegs[Reg])
8336           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
8337       }
8338 
8339       break;
8340     }
8341   }
8342 
8343   // Mark all former landing pads as non-landing pads. The dispatch is the only
8344   // landing pad now.
8345   for (SmallVectorImpl<MachineBasicBlock*>::iterator
8346          I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
8347     (*I)->setIsEHPad(false);
8348 
8349   // The instruction is gone now.
8350   MI.eraseFromParent();
8351 }
8352 
8353 static
8354 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
8355   for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
8356        E = MBB->succ_end(); I != E; ++I)
8357     if (*I != Succ)
8358       return *I;
8359   llvm_unreachable("Expecting a BB with two successors!");
8360 }
8361 
8362 /// Return the load opcode for a given load size. If load size >= 8,
8363 /// neon opcode will be returned.
8364 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
8365   if (LdSize >= 8)
8366     return LdSize == 16 ? ARM::VLD1q32wb_fixed
8367                         : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
8368   if (IsThumb1)
8369     return LdSize == 4 ? ARM::tLDRi
8370                        : LdSize == 2 ? ARM::tLDRHi
8371                                      : LdSize == 1 ? ARM::tLDRBi : 0;
8372   if (IsThumb2)
8373     return LdSize == 4 ? ARM::t2LDR_POST
8374                        : LdSize == 2 ? ARM::t2LDRH_POST
8375                                      : LdSize == 1 ? ARM::t2LDRB_POST : 0;
8376   return LdSize == 4 ? ARM::LDR_POST_IMM
8377                      : LdSize == 2 ? ARM::LDRH_POST
8378                                    : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
8379 }
8380 
8381 /// Return the store opcode for a given store size. If store size >= 8,
8382 /// neon opcode will be returned.
8383 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
8384   if (StSize >= 8)
8385     return StSize == 16 ? ARM::VST1q32wb_fixed
8386                         : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
8387   if (IsThumb1)
8388     return StSize == 4 ? ARM::tSTRi
8389                        : StSize == 2 ? ARM::tSTRHi
8390                                      : StSize == 1 ? ARM::tSTRBi : 0;
8391   if (IsThumb2)
8392     return StSize == 4 ? ARM::t2STR_POST
8393                        : StSize == 2 ? ARM::t2STRH_POST
8394                                      : StSize == 1 ? ARM::t2STRB_POST : 0;
8395   return StSize == 4 ? ARM::STR_POST_IMM
8396                      : StSize == 2 ? ARM::STRH_POST
8397                                    : StSize == 1 ? ARM::STRB_POST_IMM : 0;
8398 }
8399 
8400 /// Emit a post-increment load operation with given size. The instructions
8401 /// will be added to BB at Pos.
8402 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
8403                        const TargetInstrInfo *TII, const DebugLoc &dl,
8404                        unsigned LdSize, unsigned Data, unsigned AddrIn,
8405                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
8406   unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
8407   assert(LdOpc != 0 && "Should have a load opcode");
8408   if (LdSize >= 8) {
8409     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8410         .addReg(AddrOut, RegState::Define)
8411         .addReg(AddrIn)
8412         .addImm(0)
8413         .add(predOps(ARMCC::AL));
8414   } else if (IsThumb1) {
8415     // load + update AddrIn
8416     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8417         .addReg(AddrIn)
8418         .addImm(0)
8419         .add(predOps(ARMCC::AL));
8420     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
8421         .add(t1CondCodeOp())
8422         .addReg(AddrIn)
8423         .addImm(LdSize)
8424         .add(predOps(ARMCC::AL));
8425   } else if (IsThumb2) {
8426     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8427         .addReg(AddrOut, RegState::Define)
8428         .addReg(AddrIn)
8429         .addImm(LdSize)
8430         .add(predOps(ARMCC::AL));
8431   } else { // arm
8432     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8433         .addReg(AddrOut, RegState::Define)
8434         .addReg(AddrIn)
8435         .addReg(0)
8436         .addImm(LdSize)
8437         .add(predOps(ARMCC::AL));
8438   }
8439 }
8440 
8441 /// Emit a post-increment store operation with given size. The instructions
8442 /// will be added to BB at Pos.
8443 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
8444                        const TargetInstrInfo *TII, const DebugLoc &dl,
8445                        unsigned StSize, unsigned Data, unsigned AddrIn,
8446                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
8447   unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
8448   assert(StOpc != 0 && "Should have a store opcode");
8449   if (StSize >= 8) {
8450     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8451         .addReg(AddrIn)
8452         .addImm(0)
8453         .addReg(Data)
8454         .add(predOps(ARMCC::AL));
8455   } else if (IsThumb1) {
8456     // store + update AddrIn
8457     BuildMI(*BB, Pos, dl, TII->get(StOpc))
8458         .addReg(Data)
8459         .addReg(AddrIn)
8460         .addImm(0)
8461         .add(predOps(ARMCC::AL));
8462     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
8463         .add(t1CondCodeOp())
8464         .addReg(AddrIn)
8465         .addImm(StSize)
8466         .add(predOps(ARMCC::AL));
8467   } else if (IsThumb2) {
8468     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8469         .addReg(Data)
8470         .addReg(AddrIn)
8471         .addImm(StSize)
8472         .add(predOps(ARMCC::AL));
8473   } else { // arm
8474     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8475         .addReg(Data)
8476         .addReg(AddrIn)
8477         .addReg(0)
8478         .addImm(StSize)
8479         .add(predOps(ARMCC::AL));
8480   }
8481 }
8482 
8483 MachineBasicBlock *
8484 ARMTargetLowering::EmitStructByval(MachineInstr &MI,
8485                                    MachineBasicBlock *BB) const {
8486   // This pseudo instruction has 3 operands: dst, src, size
8487   // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
8488   // Otherwise, we will generate unrolled scalar copies.
8489   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8490   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8491   MachineFunction::iterator It = ++BB->getIterator();
8492 
8493   unsigned dest = MI.getOperand(0).getReg();
8494   unsigned src = MI.getOperand(1).getReg();
8495   unsigned SizeVal = MI.getOperand(2).getImm();
8496   unsigned Align = MI.getOperand(3).getImm();
8497   DebugLoc dl = MI.getDebugLoc();
8498 
8499   MachineFunction *MF = BB->getParent();
8500   MachineRegisterInfo &MRI = MF->getRegInfo();
8501   unsigned UnitSize = 0;
8502   const TargetRegisterClass *TRC = nullptr;
8503   const TargetRegisterClass *VecTRC = nullptr;
8504 
8505   bool IsThumb1 = Subtarget->isThumb1Only();
8506   bool IsThumb2 = Subtarget->isThumb2();
8507   bool IsThumb = Subtarget->isThumb();
8508 
8509   if (Align & 1) {
8510     UnitSize = 1;
8511   } else if (Align & 2) {
8512     UnitSize = 2;
8513   } else {
8514     // Check whether we can use NEON instructions.
8515     if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
8516         Subtarget->hasNEON()) {
8517       if ((Align % 16 == 0) && SizeVal >= 16)
8518         UnitSize = 16;
8519       else if ((Align % 8 == 0) && SizeVal >= 8)
8520         UnitSize = 8;
8521     }
8522     // Can't use NEON instructions.
8523     if (UnitSize == 0)
8524       UnitSize = 4;
8525   }
8526 
8527   // Select the correct opcode and register class for unit size load/store
8528   bool IsNeon = UnitSize >= 8;
8529   TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
8530   if (IsNeon)
8531     VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
8532                             : UnitSize == 8 ? &ARM::DPRRegClass
8533                                             : nullptr;
8534 
8535   unsigned BytesLeft = SizeVal % UnitSize;
8536   unsigned LoopSize = SizeVal - BytesLeft;
8537 
8538   if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
8539     // Use LDR and STR to copy.
8540     // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
8541     // [destOut] = STR_POST(scratch, destIn, UnitSize)
8542     unsigned srcIn = src;
8543     unsigned destIn = dest;
8544     for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
8545       unsigned srcOut = MRI.createVirtualRegister(TRC);
8546       unsigned destOut = MRI.createVirtualRegister(TRC);
8547       unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
8548       emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
8549                  IsThumb1, IsThumb2);
8550       emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
8551                  IsThumb1, IsThumb2);
8552       srcIn = srcOut;
8553       destIn = destOut;
8554     }
8555 
8556     // Handle the leftover bytes with LDRB and STRB.
8557     // [scratch, srcOut] = LDRB_POST(srcIn, 1)
8558     // [destOut] = STRB_POST(scratch, destIn, 1)
8559     for (unsigned i = 0; i < BytesLeft; i++) {
8560       unsigned srcOut = MRI.createVirtualRegister(TRC);
8561       unsigned destOut = MRI.createVirtualRegister(TRC);
8562       unsigned scratch = MRI.createVirtualRegister(TRC);
8563       emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
8564                  IsThumb1, IsThumb2);
8565       emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
8566                  IsThumb1, IsThumb2);
8567       srcIn = srcOut;
8568       destIn = destOut;
8569     }
8570     MI.eraseFromParent(); // The instruction is gone now.
8571     return BB;
8572   }
8573 
8574   // Expand the pseudo op to a loop.
8575   // thisMBB:
8576   //   ...
8577   //   movw varEnd, # --> with thumb2
8578   //   movt varEnd, #
8579   //   ldrcp varEnd, idx --> without thumb2
8580   //   fallthrough --> loopMBB
8581   // loopMBB:
8582   //   PHI varPhi, varEnd, varLoop
8583   //   PHI srcPhi, src, srcLoop
8584   //   PHI destPhi, dst, destLoop
8585   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
8586   //   [destLoop] = STR_POST(scratch, destPhi, UnitSize)
8587   //   subs varLoop, varPhi, #UnitSize
8588   //   bne loopMBB
8589   //   fallthrough --> exitMBB
8590   // exitMBB:
8591   //   epilogue to handle left-over bytes
8592   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
8593   //   [destOut] = STRB_POST(scratch, destLoop, 1)
8594   MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
8595   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
8596   MF->insert(It, loopMBB);
8597   MF->insert(It, exitMBB);
8598 
8599   // Transfer the remainder of BB and its successor edges to exitMBB.
8600   exitMBB->splice(exitMBB->begin(), BB,
8601                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8602   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
8603 
8604   // Load an immediate to varEnd.
8605   unsigned varEnd = MRI.createVirtualRegister(TRC);
8606   if (Subtarget->useMovt(*MF)) {
8607     unsigned Vtmp = varEnd;
8608     if ((LoopSize & 0xFFFF0000) != 0)
8609       Vtmp = MRI.createVirtualRegister(TRC);
8610     BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
8611         .addImm(LoopSize & 0xFFFF)
8612         .add(predOps(ARMCC::AL));
8613 
8614     if ((LoopSize & 0xFFFF0000) != 0)
8615       BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
8616           .addReg(Vtmp)
8617           .addImm(LoopSize >> 16)
8618           .add(predOps(ARMCC::AL));
8619   } else {
8620     MachineConstantPool *ConstantPool = MF->getConstantPool();
8621     Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8622     const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
8623 
8624     // MachineConstantPool wants an explicit alignment.
8625     unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8626     if (Align == 0)
8627       Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8628     unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8629 
8630     if (IsThumb)
8631       BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci))
8632           .addReg(varEnd, RegState::Define)
8633           .addConstantPoolIndex(Idx)
8634           .add(predOps(ARMCC::AL));
8635     else
8636       BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp))
8637           .addReg(varEnd, RegState::Define)
8638           .addConstantPoolIndex(Idx)
8639           .addImm(0)
8640           .add(predOps(ARMCC::AL));
8641   }
8642   BB->addSuccessor(loopMBB);
8643 
8644   // Generate the loop body:
8645   //   varPhi = PHI(varLoop, varEnd)
8646   //   srcPhi = PHI(srcLoop, src)
8647   //   destPhi = PHI(destLoop, dst)
8648   MachineBasicBlock *entryBB = BB;
8649   BB = loopMBB;
8650   unsigned varLoop = MRI.createVirtualRegister(TRC);
8651   unsigned varPhi = MRI.createVirtualRegister(TRC);
8652   unsigned srcLoop = MRI.createVirtualRegister(TRC);
8653   unsigned srcPhi = MRI.createVirtualRegister(TRC);
8654   unsigned destLoop = MRI.createVirtualRegister(TRC);
8655   unsigned destPhi = MRI.createVirtualRegister(TRC);
8656 
8657   BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
8658     .addReg(varLoop).addMBB(loopMBB)
8659     .addReg(varEnd).addMBB(entryBB);
8660   BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
8661     .addReg(srcLoop).addMBB(loopMBB)
8662     .addReg(src).addMBB(entryBB);
8663   BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
8664     .addReg(destLoop).addMBB(loopMBB)
8665     .addReg(dest).addMBB(entryBB);
8666 
8667   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
8668   //   [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
8669   unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
8670   emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
8671              IsThumb1, IsThumb2);
8672   emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
8673              IsThumb1, IsThumb2);
8674 
8675   // Decrement loop variable by UnitSize.
8676   if (IsThumb1) {
8677     BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop)
8678         .add(t1CondCodeOp())
8679         .addReg(varPhi)
8680         .addImm(UnitSize)
8681         .add(predOps(ARMCC::AL));
8682   } else {
8683     MachineInstrBuilder MIB =
8684         BuildMI(*BB, BB->end(), dl,
8685                 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
8686     MIB.addReg(varPhi)
8687         .addImm(UnitSize)
8688         .add(predOps(ARMCC::AL))
8689         .add(condCodeOp());
8690     MIB->getOperand(5).setReg(ARM::CPSR);
8691     MIB->getOperand(5).setIsDef(true);
8692   }
8693   BuildMI(*BB, BB->end(), dl,
8694           TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
8695       .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
8696 
8697   // loopMBB can loop back to loopMBB or fall through to exitMBB.
8698   BB->addSuccessor(loopMBB);
8699   BB->addSuccessor(exitMBB);
8700 
8701   // Add epilogue to handle BytesLeft.
8702   BB = exitMBB;
8703   auto StartOfExit = exitMBB->begin();
8704 
8705   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
8706   //   [destOut] = STRB_POST(scratch, destLoop, 1)
8707   unsigned srcIn = srcLoop;
8708   unsigned destIn = destLoop;
8709   for (unsigned i = 0; i < BytesLeft; i++) {
8710     unsigned srcOut = MRI.createVirtualRegister(TRC);
8711     unsigned destOut = MRI.createVirtualRegister(TRC);
8712     unsigned scratch = MRI.createVirtualRegister(TRC);
8713     emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
8714                IsThumb1, IsThumb2);
8715     emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
8716                IsThumb1, IsThumb2);
8717     srcIn = srcOut;
8718     destIn = destOut;
8719   }
8720 
8721   MI.eraseFromParent(); // The instruction is gone now.
8722   return BB;
8723 }
8724 
8725 MachineBasicBlock *
8726 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
8727                                        MachineBasicBlock *MBB) const {
8728   const TargetMachine &TM = getTargetMachine();
8729   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
8730   DebugLoc DL = MI.getDebugLoc();
8731 
8732   assert(Subtarget->isTargetWindows() &&
8733          "__chkstk is only supported on Windows");
8734   assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
8735 
8736   // __chkstk takes the number of words to allocate on the stack in R4, and
8737   // returns the stack adjustment in number of bytes in R4.  This will not
8738   // clober any other registers (other than the obvious lr).
8739   //
8740   // Although, technically, IP should be considered a register which may be
8741   // clobbered, the call itself will not touch it.  Windows on ARM is a pure
8742   // thumb-2 environment, so there is no interworking required.  As a result, we
8743   // do not expect a veneer to be emitted by the linker, clobbering IP.
8744   //
8745   // Each module receives its own copy of __chkstk, so no import thunk is
8746   // required, again, ensuring that IP is not clobbered.
8747   //
8748   // Finally, although some linkers may theoretically provide a trampoline for
8749   // out of range calls (which is quite common due to a 32M range limitation of
8750   // branches for Thumb), we can generate the long-call version via
8751   // -mcmodel=large, alleviating the need for the trampoline which may clobber
8752   // IP.
8753 
8754   switch (TM.getCodeModel()) {
8755   case CodeModel::Small:
8756   case CodeModel::Medium:
8757   case CodeModel::Default:
8758   case CodeModel::Kernel:
8759     BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
8760         .add(predOps(ARMCC::AL))
8761         .addExternalSymbol("__chkstk")
8762         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
8763         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
8764         .addReg(ARM::R12,
8765                 RegState::Implicit | RegState::Define | RegState::Dead);
8766     break;
8767   case CodeModel::Large:
8768   case CodeModel::JITDefault: {
8769     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
8770     unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
8771 
8772     BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
8773       .addExternalSymbol("__chkstk");
8774     BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
8775         .add(predOps(ARMCC::AL))
8776         .addReg(Reg, RegState::Kill)
8777         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
8778         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
8779         .addReg(ARM::R12,
8780                 RegState::Implicit | RegState::Define | RegState::Dead);
8781     break;
8782   }
8783   }
8784 
8785   BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
8786       .addReg(ARM::SP, RegState::Kill)
8787       .addReg(ARM::R4, RegState::Kill)
8788       .setMIFlags(MachineInstr::FrameSetup)
8789       .add(predOps(ARMCC::AL))
8790       .add(condCodeOp());
8791 
8792   MI.eraseFromParent();
8793   return MBB;
8794 }
8795 
8796 MachineBasicBlock *
8797 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
8798                                        MachineBasicBlock *MBB) const {
8799   DebugLoc DL = MI.getDebugLoc();
8800   MachineFunction *MF = MBB->getParent();
8801   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8802 
8803   MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
8804   MF->insert(++MBB->getIterator(), ContBB);
8805   ContBB->splice(ContBB->begin(), MBB,
8806                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
8807   ContBB->transferSuccessorsAndUpdatePHIs(MBB);
8808   MBB->addSuccessor(ContBB);
8809 
8810   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
8811   BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
8812   MF->push_back(TrapBB);
8813   MBB->addSuccessor(TrapBB);
8814 
8815   BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
8816       .addReg(MI.getOperand(0).getReg())
8817       .addImm(0)
8818       .add(predOps(ARMCC::AL));
8819   BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
8820       .addMBB(TrapBB)
8821       .addImm(ARMCC::EQ)
8822       .addReg(ARM::CPSR);
8823 
8824   MI.eraseFromParent();
8825   return ContBB;
8826 }
8827 
8828 MachineBasicBlock *
8829 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
8830                                                MachineBasicBlock *BB) const {
8831   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8832   DebugLoc dl = MI.getDebugLoc();
8833   bool isThumb2 = Subtarget->isThumb2();
8834   switch (MI.getOpcode()) {
8835   default: {
8836     MI.print(errs());
8837     llvm_unreachable("Unexpected instr type to insert");
8838   }
8839 
8840   // Thumb1 post-indexed loads are really just single-register LDMs.
8841   case ARM::tLDR_postidx: {
8842     BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
8843         .add(MI.getOperand(1))  // Rn_wb
8844         .add(MI.getOperand(2))  // Rn
8845         .add(MI.getOperand(3))  // PredImm
8846         .add(MI.getOperand(4))  // PredReg
8847         .add(MI.getOperand(0)); // Rt
8848     MI.eraseFromParent();
8849     return BB;
8850   }
8851 
8852   // The Thumb2 pre-indexed stores have the same MI operands, they just
8853   // define them differently in the .td files from the isel patterns, so
8854   // they need pseudos.
8855   case ARM::t2STR_preidx:
8856     MI.setDesc(TII->get(ARM::t2STR_PRE));
8857     return BB;
8858   case ARM::t2STRB_preidx:
8859     MI.setDesc(TII->get(ARM::t2STRB_PRE));
8860     return BB;
8861   case ARM::t2STRH_preidx:
8862     MI.setDesc(TII->get(ARM::t2STRH_PRE));
8863     return BB;
8864 
8865   case ARM::STRi_preidx:
8866   case ARM::STRBi_preidx: {
8867     unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
8868                                                          : ARM::STRB_PRE_IMM;
8869     // Decode the offset.
8870     unsigned Offset = MI.getOperand(4).getImm();
8871     bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
8872     Offset = ARM_AM::getAM2Offset(Offset);
8873     if (isSub)
8874       Offset = -Offset;
8875 
8876     MachineMemOperand *MMO = *MI.memoperands_begin();
8877     BuildMI(*BB, MI, dl, TII->get(NewOpc))
8878         .add(MI.getOperand(0)) // Rn_wb
8879         .add(MI.getOperand(1)) // Rt
8880         .add(MI.getOperand(2)) // Rn
8881         .addImm(Offset)        // offset (skip GPR==zero_reg)
8882         .add(MI.getOperand(5)) // pred
8883         .add(MI.getOperand(6))
8884         .addMemOperand(MMO);
8885     MI.eraseFromParent();
8886     return BB;
8887   }
8888   case ARM::STRr_preidx:
8889   case ARM::STRBr_preidx:
8890   case ARM::STRH_preidx: {
8891     unsigned NewOpc;
8892     switch (MI.getOpcode()) {
8893     default: llvm_unreachable("unexpected opcode!");
8894     case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
8895     case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
8896     case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
8897     }
8898     MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
8899     for (unsigned i = 0; i < MI.getNumOperands(); ++i)
8900       MIB.add(MI.getOperand(i));
8901     MI.eraseFromParent();
8902     return BB;
8903   }
8904 
8905   case ARM::tMOVCCr_pseudo: {
8906     // To "insert" a SELECT_CC instruction, we actually have to insert the
8907     // diamond control-flow pattern.  The incoming instruction knows the
8908     // destination vreg to set, the condition code register to branch on, the
8909     // true/false values to select between, and a branch opcode to use.
8910     const BasicBlock *LLVM_BB = BB->getBasicBlock();
8911     MachineFunction::iterator It = ++BB->getIterator();
8912 
8913     //  thisMBB:
8914     //  ...
8915     //   TrueVal = ...
8916     //   cmpTY ccX, r1, r2
8917     //   bCC copy1MBB
8918     //   fallthrough --> copy0MBB
8919     MachineBasicBlock *thisMBB  = BB;
8920     MachineFunction *F = BB->getParent();
8921     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
8922     MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
8923     F->insert(It, copy0MBB);
8924     F->insert(It, sinkMBB);
8925 
8926     // Transfer the remainder of BB and its successor edges to sinkMBB.
8927     sinkMBB->splice(sinkMBB->begin(), BB,
8928                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
8929     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
8930 
8931     BB->addSuccessor(copy0MBB);
8932     BB->addSuccessor(sinkMBB);
8933 
8934     BuildMI(BB, dl, TII->get(ARM::tBcc))
8935         .addMBB(sinkMBB)
8936         .addImm(MI.getOperand(3).getImm())
8937         .addReg(MI.getOperand(4).getReg());
8938 
8939     //  copy0MBB:
8940     //   %FalseValue = ...
8941     //   # fallthrough to sinkMBB
8942     BB = copy0MBB;
8943 
8944     // Update machine-CFG edges
8945     BB->addSuccessor(sinkMBB);
8946 
8947     //  sinkMBB:
8948     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
8949     //  ...
8950     BB = sinkMBB;
8951     BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
8952         .addReg(MI.getOperand(1).getReg())
8953         .addMBB(copy0MBB)
8954         .addReg(MI.getOperand(2).getReg())
8955         .addMBB(thisMBB);
8956 
8957     MI.eraseFromParent(); // The pseudo instruction is gone now.
8958     return BB;
8959   }
8960 
8961   case ARM::BCCi64:
8962   case ARM::BCCZi64: {
8963     // If there is an unconditional branch to the other successor, remove it.
8964     BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
8965 
8966     // Compare both parts that make up the double comparison separately for
8967     // equality.
8968     bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
8969 
8970     unsigned LHS1 = MI.getOperand(1).getReg();
8971     unsigned LHS2 = MI.getOperand(2).getReg();
8972     if (RHSisZero) {
8973       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8974           .addReg(LHS1)
8975           .addImm(0)
8976           .add(predOps(ARMCC::AL));
8977       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8978         .addReg(LHS2).addImm(0)
8979         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
8980     } else {
8981       unsigned RHS1 = MI.getOperand(3).getReg();
8982       unsigned RHS2 = MI.getOperand(4).getReg();
8983       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
8984           .addReg(LHS1)
8985           .addReg(RHS1)
8986           .add(predOps(ARMCC::AL));
8987       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
8988         .addReg(LHS2).addReg(RHS2)
8989         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
8990     }
8991 
8992     MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
8993     MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
8994     if (MI.getOperand(0).getImm() == ARMCC::NE)
8995       std::swap(destMBB, exitMBB);
8996 
8997     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
8998       .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
8999     if (isThumb2)
9000       BuildMI(BB, dl, TII->get(ARM::t2B))
9001           .addMBB(exitMBB)
9002           .add(predOps(ARMCC::AL));
9003     else
9004       BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
9005 
9006     MI.eraseFromParent(); // The pseudo instruction is gone now.
9007     return BB;
9008   }
9009 
9010   case ARM::Int_eh_sjlj_setjmp:
9011   case ARM::Int_eh_sjlj_setjmp_nofp:
9012   case ARM::tInt_eh_sjlj_setjmp:
9013   case ARM::t2Int_eh_sjlj_setjmp:
9014   case ARM::t2Int_eh_sjlj_setjmp_nofp:
9015     return BB;
9016 
9017   case ARM::Int_eh_sjlj_setup_dispatch:
9018     EmitSjLjDispatchBlock(MI, BB);
9019     return BB;
9020 
9021   case ARM::ABS:
9022   case ARM::t2ABS: {
9023     // To insert an ABS instruction, we have to insert the
9024     // diamond control-flow pattern.  The incoming instruction knows the
9025     // source vreg to test against 0, the destination vreg to set,
9026     // the condition code register to branch on, the
9027     // true/false values to select between, and a branch opcode to use.
9028     // It transforms
9029     //     V1 = ABS V0
9030     // into
9031     //     V2 = MOVS V0
9032     //     BCC                      (branch to SinkBB if V0 >= 0)
9033     //     RSBBB: V3 = RSBri V2, 0  (compute ABS if V2 < 0)
9034     //     SinkBB: V1 = PHI(V2, V3)
9035     const BasicBlock *LLVM_BB = BB->getBasicBlock();
9036     MachineFunction::iterator BBI = ++BB->getIterator();
9037     MachineFunction *Fn = BB->getParent();
9038     MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
9039     MachineBasicBlock *SinkBB  = Fn->CreateMachineBasicBlock(LLVM_BB);
9040     Fn->insert(BBI, RSBBB);
9041     Fn->insert(BBI, SinkBB);
9042 
9043     unsigned int ABSSrcReg = MI.getOperand(1).getReg();
9044     unsigned int ABSDstReg = MI.getOperand(0).getReg();
9045     bool ABSSrcKIll = MI.getOperand(1).isKill();
9046     bool isThumb2 = Subtarget->isThumb2();
9047     MachineRegisterInfo &MRI = Fn->getRegInfo();
9048     // In Thumb mode S must not be specified if source register is the SP or
9049     // PC and if destination register is the SP, so restrict register class
9050     unsigned NewRsbDstReg =
9051       MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
9052 
9053     // Transfer the remainder of BB and its successor edges to sinkMBB.
9054     SinkBB->splice(SinkBB->begin(), BB,
9055                    std::next(MachineBasicBlock::iterator(MI)), BB->end());
9056     SinkBB->transferSuccessorsAndUpdatePHIs(BB);
9057 
9058     BB->addSuccessor(RSBBB);
9059     BB->addSuccessor(SinkBB);
9060 
9061     // fall through to SinkMBB
9062     RSBBB->addSuccessor(SinkBB);
9063 
9064     // insert a cmp at the end of BB
9065     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9066         .addReg(ABSSrcReg)
9067         .addImm(0)
9068         .add(predOps(ARMCC::AL));
9069 
9070     // insert a bcc with opposite CC to ARMCC::MI at the end of BB
9071     BuildMI(BB, dl,
9072       TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
9073       .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
9074 
9075     // insert rsbri in RSBBB
9076     // Note: BCC and rsbri will be converted into predicated rsbmi
9077     // by if-conversion pass
9078     BuildMI(*RSBBB, RSBBB->begin(), dl,
9079             TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
9080         .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
9081         .addImm(0)
9082         .add(predOps(ARMCC::AL))
9083         .add(condCodeOp());
9084 
9085     // insert PHI in SinkBB,
9086     // reuse ABSDstReg to not change uses of ABS instruction
9087     BuildMI(*SinkBB, SinkBB->begin(), dl,
9088       TII->get(ARM::PHI), ABSDstReg)
9089       .addReg(NewRsbDstReg).addMBB(RSBBB)
9090       .addReg(ABSSrcReg).addMBB(BB);
9091 
9092     // remove ABS instruction
9093     MI.eraseFromParent();
9094 
9095     // return last added BB
9096     return SinkBB;
9097   }
9098   case ARM::COPY_STRUCT_BYVAL_I32:
9099     ++NumLoopByVals;
9100     return EmitStructByval(MI, BB);
9101   case ARM::WIN__CHKSTK:
9102     return EmitLowered__chkstk(MI, BB);
9103   case ARM::WIN__DBZCHK:
9104     return EmitLowered__dbzchk(MI, BB);
9105   }
9106 }
9107 
9108 /// \brief Attaches vregs to MEMCPY that it will use as scratch registers
9109 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
9110 /// instead of as a custom inserter because we need the use list from the SDNode.
9111 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
9112                                     MachineInstr &MI, const SDNode *Node) {
9113   bool isThumb1 = Subtarget->isThumb1Only();
9114 
9115   DebugLoc DL = MI.getDebugLoc();
9116   MachineFunction *MF = MI.getParent()->getParent();
9117   MachineRegisterInfo &MRI = MF->getRegInfo();
9118   MachineInstrBuilder MIB(*MF, MI);
9119 
9120   // If the new dst/src is unused mark it as dead.
9121   if (!Node->hasAnyUseOfValue(0)) {
9122     MI.getOperand(0).setIsDead(true);
9123   }
9124   if (!Node->hasAnyUseOfValue(1)) {
9125     MI.getOperand(1).setIsDead(true);
9126   }
9127 
9128   // The MEMCPY both defines and kills the scratch registers.
9129   for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
9130     unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
9131                                                          : &ARM::GPRRegClass);
9132     MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
9133   }
9134 }
9135 
9136 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9137                                                       SDNode *Node) const {
9138   if (MI.getOpcode() == ARM::MEMCPY) {
9139     attachMEMCPYScratchRegs(Subtarget, MI, Node);
9140     return;
9141   }
9142 
9143   const MCInstrDesc *MCID = &MI.getDesc();
9144   // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
9145   // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
9146   // operand is still set to noreg. If needed, set the optional operand's
9147   // register to CPSR, and remove the redundant implicit def.
9148   //
9149   // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
9150 
9151   // Rename pseudo opcodes.
9152   unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
9153   unsigned ccOutIdx;
9154   if (NewOpc) {
9155     const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
9156     MCID = &TII->get(NewOpc);
9157 
9158     assert(MCID->getNumOperands() ==
9159            MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()
9160         && "converted opcode should be the same except for cc_out"
9161            " (and, on Thumb1, pred)");
9162 
9163     MI.setDesc(*MCID);
9164 
9165     // Add the optional cc_out operand
9166     MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
9167 
9168     // On Thumb1, move all input operands to the end, then add the predicate
9169     if (Subtarget->isThumb1Only()) {
9170       for (unsigned c = MCID->getNumOperands() - 4; c--;) {
9171         MI.addOperand(MI.getOperand(1));
9172         MI.RemoveOperand(1);
9173       }
9174 
9175       // Restore the ties
9176       for (unsigned i = MI.getNumOperands(); i--;) {
9177         const MachineOperand& op = MI.getOperand(i);
9178         if (op.isReg() && op.isUse()) {
9179           int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO);
9180           if (DefIdx != -1)
9181             MI.tieOperands(DefIdx, i);
9182         }
9183       }
9184 
9185       MI.addOperand(MachineOperand::CreateImm(ARMCC::AL));
9186       MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
9187       ccOutIdx = 1;
9188     } else
9189       ccOutIdx = MCID->getNumOperands() - 1;
9190   } else
9191     ccOutIdx = MCID->getNumOperands() - 1;
9192 
9193   // Any ARM instruction that sets the 's' bit should specify an optional
9194   // "cc_out" operand in the last operand position.
9195   if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
9196     assert(!NewOpc && "Optional cc_out operand required");
9197     return;
9198   }
9199   // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
9200   // since we already have an optional CPSR def.
9201   bool definesCPSR = false;
9202   bool deadCPSR = false;
9203   for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
9204        ++i) {
9205     const MachineOperand &MO = MI.getOperand(i);
9206     if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
9207       definesCPSR = true;
9208       if (MO.isDead())
9209         deadCPSR = true;
9210       MI.RemoveOperand(i);
9211       break;
9212     }
9213   }
9214   if (!definesCPSR) {
9215     assert(!NewOpc && "Optional cc_out operand required");
9216     return;
9217   }
9218   assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
9219   if (deadCPSR) {
9220     assert(!MI.getOperand(ccOutIdx).getReg() &&
9221            "expect uninitialized optional cc_out operand");
9222     // Thumb1 instructions must have the S bit even if the CPSR is dead.
9223     if (!Subtarget->isThumb1Only())
9224       return;
9225   }
9226 
9227   // If this instruction was defined with an optional CPSR def and its dag node
9228   // had a live implicit CPSR def, then activate the optional CPSR def.
9229   MachineOperand &MO = MI.getOperand(ccOutIdx);
9230   MO.setReg(ARM::CPSR);
9231   MO.setIsDef(true);
9232 }
9233 
9234 //===----------------------------------------------------------------------===//
9235 //                           ARM Optimization Hooks
9236 //===----------------------------------------------------------------------===//
9237 
9238 // Helper function that checks if N is a null or all ones constant.
9239 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
9240   return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
9241 }
9242 
9243 // Return true if N is conditionally 0 or all ones.
9244 // Detects these expressions where cc is an i1 value:
9245 //
9246 //   (select cc 0, y)   [AllOnes=0]
9247 //   (select cc y, 0)   [AllOnes=0]
9248 //   (zext cc)          [AllOnes=0]
9249 //   (sext cc)          [AllOnes=0/1]
9250 //   (select cc -1, y)  [AllOnes=1]
9251 //   (select cc y, -1)  [AllOnes=1]
9252 //
9253 // Invert is set when N is the null/all ones constant when CC is false.
9254 // OtherOp is set to the alternative value of N.
9255 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
9256                                        SDValue &CC, bool &Invert,
9257                                        SDValue &OtherOp,
9258                                        SelectionDAG &DAG) {
9259   switch (N->getOpcode()) {
9260   default: return false;
9261   case ISD::SELECT: {
9262     CC = N->getOperand(0);
9263     SDValue N1 = N->getOperand(1);
9264     SDValue N2 = N->getOperand(2);
9265     if (isZeroOrAllOnes(N1, AllOnes)) {
9266       Invert = false;
9267       OtherOp = N2;
9268       return true;
9269     }
9270     if (isZeroOrAllOnes(N2, AllOnes)) {
9271       Invert = true;
9272       OtherOp = N1;
9273       return true;
9274     }
9275     return false;
9276   }
9277   case ISD::ZERO_EXTEND:
9278     // (zext cc) can never be the all ones value.
9279     if (AllOnes)
9280       return false;
9281     LLVM_FALLTHROUGH;
9282   case ISD::SIGN_EXTEND: {
9283     SDLoc dl(N);
9284     EVT VT = N->getValueType(0);
9285     CC = N->getOperand(0);
9286     if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC)
9287       return false;
9288     Invert = !AllOnes;
9289     if (AllOnes)
9290       // When looking for an AllOnes constant, N is an sext, and the 'other'
9291       // value is 0.
9292       OtherOp = DAG.getConstant(0, dl, VT);
9293     else if (N->getOpcode() == ISD::ZERO_EXTEND)
9294       // When looking for a 0 constant, N can be zext or sext.
9295       OtherOp = DAG.getConstant(1, dl, VT);
9296     else
9297       OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
9298                                 VT);
9299     return true;
9300   }
9301   }
9302 }
9303 
9304 // Combine a constant select operand into its use:
9305 //
9306 //   (add (select cc, 0, c), x)  -> (select cc, x, (add, x, c))
9307 //   (sub x, (select cc, 0, c))  -> (select cc, x, (sub, x, c))
9308 //   (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))  [AllOnes=1]
9309 //   (or  (select cc, 0, c), x)  -> (select cc, x, (or, x, c))
9310 //   (xor (select cc, 0, c), x)  -> (select cc, x, (xor, x, c))
9311 //
9312 // The transform is rejected if the select doesn't have a constant operand that
9313 // is null, or all ones when AllOnes is set.
9314 //
9315 // Also recognize sext/zext from i1:
9316 //
9317 //   (add (zext cc), x) -> (select cc (add x, 1), x)
9318 //   (add (sext cc), x) -> (select cc (add x, -1), x)
9319 //
9320 // These transformations eventually create predicated instructions.
9321 //
9322 // @param N       The node to transform.
9323 // @param Slct    The N operand that is a select.
9324 // @param OtherOp The other N operand (x above).
9325 // @param DCI     Context.
9326 // @param AllOnes Require the select constant to be all ones instead of null.
9327 // @returns The new node, or SDValue() on failure.
9328 static
9329 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
9330                             TargetLowering::DAGCombinerInfo &DCI,
9331                             bool AllOnes = false) {
9332   SelectionDAG &DAG = DCI.DAG;
9333   EVT VT = N->getValueType(0);
9334   SDValue NonConstantVal;
9335   SDValue CCOp;
9336   bool SwapSelectOps;
9337   if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
9338                                   NonConstantVal, DAG))
9339     return SDValue();
9340 
9341   // Slct is now know to be the desired identity constant when CC is true.
9342   SDValue TrueVal = OtherOp;
9343   SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
9344                                  OtherOp, NonConstantVal);
9345   // Unless SwapSelectOps says CC should be false.
9346   if (SwapSelectOps)
9347     std::swap(TrueVal, FalseVal);
9348 
9349   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
9350                      CCOp, TrueVal, FalseVal);
9351 }
9352 
9353 // Attempt combineSelectAndUse on each operand of a commutative operator N.
9354 static
9355 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
9356                                        TargetLowering::DAGCombinerInfo &DCI) {
9357   SDValue N0 = N->getOperand(0);
9358   SDValue N1 = N->getOperand(1);
9359   if (N0.getNode()->hasOneUse())
9360     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
9361       return Result;
9362   if (N1.getNode()->hasOneUse())
9363     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
9364       return Result;
9365   return SDValue();
9366 }
9367 
9368 static bool IsVUZPShuffleNode(SDNode *N) {
9369   // VUZP shuffle node.
9370   if (N->getOpcode() == ARMISD::VUZP)
9371     return true;
9372 
9373   // "VUZP" on i32 is an alias for VTRN.
9374   if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
9375     return true;
9376 
9377   return false;
9378 }
9379 
9380 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
9381                                  TargetLowering::DAGCombinerInfo &DCI,
9382                                  const ARMSubtarget *Subtarget) {
9383   // Look for ADD(VUZP.0, VUZP.1).
9384   if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
9385       N0 == N1)
9386    return SDValue();
9387 
9388   // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
9389   if (!N->getValueType(0).is64BitVector())
9390     return SDValue();
9391 
9392   // Generate vpadd.
9393   SelectionDAG &DAG = DCI.DAG;
9394   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9395   SDLoc dl(N);
9396   SDNode *Unzip = N0.getNode();
9397   EVT VT = N->getValueType(0);
9398 
9399   SmallVector<SDValue, 8> Ops;
9400   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
9401                                 TLI.getPointerTy(DAG.getDataLayout())));
9402   Ops.push_back(Unzip->getOperand(0));
9403   Ops.push_back(Unzip->getOperand(1));
9404 
9405   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
9406 }
9407 
9408 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
9409                                       TargetLowering::DAGCombinerInfo &DCI,
9410                                       const ARMSubtarget *Subtarget) {
9411   // Check for two extended operands.
9412   if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
9413         N1.getOpcode() == ISD::SIGN_EXTEND) &&
9414       !(N0.getOpcode() == ISD::ZERO_EXTEND &&
9415         N1.getOpcode() == ISD::ZERO_EXTEND))
9416     return SDValue();
9417 
9418   SDValue N00 = N0.getOperand(0);
9419   SDValue N10 = N1.getOperand(0);
9420 
9421   // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
9422   if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
9423       N00 == N10)
9424     return SDValue();
9425 
9426   // We only recognize Q register paddl here; this can't be reached until
9427   // after type legalization.
9428   if (!N00.getValueType().is64BitVector() ||
9429       !N0.getValueType().is128BitVector())
9430     return SDValue();
9431 
9432   // Generate vpaddl.
9433   SelectionDAG &DAG = DCI.DAG;
9434   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9435   SDLoc dl(N);
9436   EVT VT = N->getValueType(0);
9437 
9438   SmallVector<SDValue, 8> Ops;
9439   // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
9440   unsigned Opcode;
9441   if (N0.getOpcode() == ISD::SIGN_EXTEND)
9442     Opcode = Intrinsic::arm_neon_vpaddls;
9443   else
9444     Opcode = Intrinsic::arm_neon_vpaddlu;
9445   Ops.push_back(DAG.getConstant(Opcode, dl,
9446                                 TLI.getPointerTy(DAG.getDataLayout())));
9447   EVT ElemTy = N00.getValueType().getVectorElementType();
9448   unsigned NumElts = VT.getVectorNumElements();
9449   EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
9450   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
9451                                N00.getOperand(0), N00.getOperand(1));
9452   Ops.push_back(Concat);
9453 
9454   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
9455 }
9456 
9457 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
9458 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
9459 // much easier to match.
9460 static SDValue
9461 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
9462                                TargetLowering::DAGCombinerInfo &DCI,
9463                                const ARMSubtarget *Subtarget) {
9464   // Only perform optimization if after legalize, and if NEON is available. We
9465   // also expected both operands to be BUILD_VECTORs.
9466   if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
9467       || N0.getOpcode() != ISD::BUILD_VECTOR
9468       || N1.getOpcode() != ISD::BUILD_VECTOR)
9469     return SDValue();
9470 
9471   // Check output type since VPADDL operand elements can only be 8, 16, or 32.
9472   EVT VT = N->getValueType(0);
9473   if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
9474     return SDValue();
9475 
9476   // Check that the vector operands are of the right form.
9477   // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
9478   // operands, where N is the size of the formed vector.
9479   // Each EXTRACT_VECTOR should have the same input vector and odd or even
9480   // index such that we have a pair wise add pattern.
9481 
9482   // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
9483   if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9484     return SDValue();
9485   SDValue Vec = N0->getOperand(0)->getOperand(0);
9486   SDNode *V = Vec.getNode();
9487   unsigned nextIndex = 0;
9488 
9489   // For each operands to the ADD which are BUILD_VECTORs,
9490   // check to see if each of their operands are an EXTRACT_VECTOR with
9491   // the same vector and appropriate index.
9492   for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
9493     if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
9494         && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
9495 
9496       SDValue ExtVec0 = N0->getOperand(i);
9497       SDValue ExtVec1 = N1->getOperand(i);
9498 
9499       // First operand is the vector, verify its the same.
9500       if (V != ExtVec0->getOperand(0).getNode() ||
9501           V != ExtVec1->getOperand(0).getNode())
9502         return SDValue();
9503 
9504       // Second is the constant, verify its correct.
9505       ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
9506       ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
9507 
9508       // For the constant, we want to see all the even or all the odd.
9509       if (!C0 || !C1 || C0->getZExtValue() != nextIndex
9510           || C1->getZExtValue() != nextIndex+1)
9511         return SDValue();
9512 
9513       // Increment index.
9514       nextIndex+=2;
9515     } else
9516       return SDValue();
9517   }
9518 
9519   // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure
9520   // we're using the entire input vector, otherwise there's a size/legality
9521   // mismatch somewhere.
9522   if (nextIndex != Vec.getValueType().getVectorNumElements() ||
9523       Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
9524     return SDValue();
9525 
9526   // Create VPADDL node.
9527   SelectionDAG &DAG = DCI.DAG;
9528   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9529 
9530   SDLoc dl(N);
9531 
9532   // Build operand list.
9533   SmallVector<SDValue, 8> Ops;
9534   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
9535                                 TLI.getPointerTy(DAG.getDataLayout())));
9536 
9537   // Input is the vector.
9538   Ops.push_back(Vec);
9539 
9540   // Get widened type and narrowed type.
9541   MVT widenType;
9542   unsigned numElem = VT.getVectorNumElements();
9543 
9544   EVT inputLaneType = Vec.getValueType().getVectorElementType();
9545   switch (inputLaneType.getSimpleVT().SimpleTy) {
9546     case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
9547     case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
9548     case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
9549     default:
9550       llvm_unreachable("Invalid vector element type for padd optimization.");
9551   }
9552 
9553   SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
9554   unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
9555   return DAG.getNode(ExtOp, dl, VT, tmp);
9556 }
9557 
9558 static SDValue findMUL_LOHI(SDValue V) {
9559   if (V->getOpcode() == ISD::UMUL_LOHI ||
9560       V->getOpcode() == ISD::SMUL_LOHI)
9561     return V;
9562   return SDValue();
9563 }
9564 
9565 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
9566                                         TargetLowering::DAGCombinerInfo &DCI,
9567                                         const ARMSubtarget *Subtarget) {
9568 
9569   if (Subtarget->isThumb()) {
9570     if (!Subtarget->hasDSP())
9571       return SDValue();
9572   } else if (!Subtarget->hasV5TEOps())
9573     return SDValue();
9574 
9575   // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and
9576   // accumulates the product into a 64-bit value. The 16-bit values will
9577   // be sign extended somehow or SRA'd into 32-bit values
9578   // (addc (adde (mul 16bit, 16bit), lo), hi)
9579   SDValue Mul = AddcNode->getOperand(0);
9580   SDValue Lo = AddcNode->getOperand(1);
9581   if (Mul.getOpcode() != ISD::MUL) {
9582     Lo = AddcNode->getOperand(0);
9583     Mul = AddcNode->getOperand(1);
9584     if (Mul.getOpcode() != ISD::MUL)
9585       return SDValue();
9586   }
9587 
9588   SDValue SRA = AddeNode->getOperand(0);
9589   SDValue Hi = AddeNode->getOperand(1);
9590   if (SRA.getOpcode() != ISD::SRA) {
9591     SRA = AddeNode->getOperand(1);
9592     Hi = AddeNode->getOperand(0);
9593     if (SRA.getOpcode() != ISD::SRA)
9594       return SDValue();
9595   }
9596   if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) {
9597     if (Const->getZExtValue() != 31)
9598       return SDValue();
9599   } else
9600     return SDValue();
9601 
9602   if (SRA.getOperand(0) != Mul)
9603     return SDValue();
9604 
9605   SelectionDAG &DAG = DCI.DAG;
9606   SDLoc dl(AddcNode);
9607   unsigned Opcode = 0;
9608   SDValue Op0;
9609   SDValue Op1;
9610 
9611   if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) {
9612     Opcode = ARMISD::SMLALBB;
9613     Op0 = Mul.getOperand(0);
9614     Op1 = Mul.getOperand(1);
9615   } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) {
9616     Opcode = ARMISD::SMLALBT;
9617     Op0 = Mul.getOperand(0);
9618     Op1 = Mul.getOperand(1).getOperand(0);
9619   } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) {
9620     Opcode = ARMISD::SMLALTB;
9621     Op0 = Mul.getOperand(0).getOperand(0);
9622     Op1 = Mul.getOperand(1);
9623   } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) {
9624     Opcode = ARMISD::SMLALTT;
9625     Op0 = Mul->getOperand(0).getOperand(0);
9626     Op1 = Mul->getOperand(1).getOperand(0);
9627   }
9628 
9629   if (!Op0 || !Op1)
9630     return SDValue();
9631 
9632   SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
9633                               Op0, Op1, Lo, Hi);
9634   // Replace the ADDs' nodes uses by the MLA node's values.
9635   SDValue HiMLALResult(SMLAL.getNode(), 1);
9636   SDValue LoMLALResult(SMLAL.getNode(), 0);
9637 
9638   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
9639   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
9640 
9641   // Return original node to notify the driver to stop replacing.
9642   SDValue resNode(AddcNode, 0);
9643   return resNode;
9644 }
9645 
9646 static SDValue AddCombineTo64bitMLAL(SDNode *AddeNode,
9647                                      TargetLowering::DAGCombinerInfo &DCI,
9648                                      const ARMSubtarget *Subtarget) {
9649   // Look for multiply add opportunities.
9650   // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
9651   // each add nodes consumes a value from ISD::UMUL_LOHI and there is
9652   // a glue link from the first add to the second add.
9653   // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
9654   // a S/UMLAL instruction.
9655   //                  UMUL_LOHI
9656   //                 / :lo    \ :hi
9657   //                /          \          [no multiline comment]
9658   //    loAdd ->  ADDE         |
9659   //                 \ :glue  /
9660   //                  \      /
9661   //                    ADDC   <- hiAdd
9662   //
9663   assert(AddeNode->getOpcode() == ARMISD::ADDE && "Expect an ADDE");
9664 
9665   assert(AddeNode->getNumOperands() == 3 &&
9666          AddeNode->getOperand(2).getValueType() == MVT::i32 &&
9667          "ADDE node has the wrong inputs");
9668 
9669   // Check that we have a glued ADDC node.
9670   SDNode* AddcNode = AddeNode->getOperand(2).getNode();
9671   if (AddcNode->getOpcode() != ARMISD::ADDC)
9672     return SDValue();
9673 
9674   SDValue AddcOp0 = AddcNode->getOperand(0);
9675   SDValue AddcOp1 = AddcNode->getOperand(1);
9676 
9677   // Check if the two operands are from the same mul_lohi node.
9678   if (AddcOp0.getNode() == AddcOp1.getNode())
9679     return SDValue();
9680 
9681   assert(AddcNode->getNumValues() == 2 &&
9682          AddcNode->getValueType(0) == MVT::i32 &&
9683          "Expect ADDC with two result values. First: i32");
9684 
9685   // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it
9686   // maybe a SMLAL which multiplies two 16-bit values.
9687   if (AddcOp0->getOpcode() != ISD::UMUL_LOHI &&
9688       AddcOp0->getOpcode() != ISD::SMUL_LOHI &&
9689       AddcOp1->getOpcode() != ISD::UMUL_LOHI &&
9690       AddcOp1->getOpcode() != ISD::SMUL_LOHI)
9691     return AddCombineTo64BitSMLAL16(AddcNode, AddeNode, DCI, Subtarget);
9692 
9693   // Check for the triangle shape.
9694   SDValue AddeOp0 = AddeNode->getOperand(0);
9695   SDValue AddeOp1 = AddeNode->getOperand(1);
9696 
9697   // Make sure that the ADDE operands are not coming from the same node.
9698   if (AddeOp0.getNode() == AddeOp1.getNode())
9699     return SDValue();
9700 
9701   // Find the MUL_LOHI node walking up ADDE's operands.
9702   bool IsLeftOperandMUL = false;
9703   SDValue MULOp = findMUL_LOHI(AddeOp0);
9704   if (MULOp == SDValue())
9705    MULOp = findMUL_LOHI(AddeOp1);
9706   else
9707     IsLeftOperandMUL = true;
9708   if (MULOp == SDValue())
9709     return SDValue();
9710 
9711   // Figure out the right opcode.
9712   unsigned Opc = MULOp->getOpcode();
9713   unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
9714 
9715   // Figure out the high and low input values to the MLAL node.
9716   SDValue* HiAdd = nullptr;
9717   SDValue* LoMul = nullptr;
9718   SDValue* LowAdd = nullptr;
9719 
9720   // Ensure that ADDE is from high result of ISD::SMUL_LOHI.
9721   if ((AddeOp0 != MULOp.getValue(1)) && (AddeOp1 != MULOp.getValue(1)))
9722     return SDValue();
9723 
9724   if (IsLeftOperandMUL)
9725     HiAdd = &AddeOp1;
9726   else
9727     HiAdd = &AddeOp0;
9728 
9729 
9730   // Ensure that LoMul and LowAdd are taken from correct ISD::SMUL_LOHI node
9731   // whose low result is fed to the ADDC we are checking.
9732 
9733   if (AddcOp0 == MULOp.getValue(0)) {
9734     LoMul = &AddcOp0;
9735     LowAdd = &AddcOp1;
9736   }
9737   if (AddcOp1 == MULOp.getValue(0)) {
9738     LoMul = &AddcOp1;
9739     LowAdd = &AddcOp0;
9740   }
9741 
9742   if (!LoMul)
9743     return SDValue();
9744 
9745   // Create the merged node.
9746   SelectionDAG &DAG = DCI.DAG;
9747 
9748   // Build operand list.
9749   SmallVector<SDValue, 8> Ops;
9750   Ops.push_back(LoMul->getOperand(0));
9751   Ops.push_back(LoMul->getOperand(1));
9752   Ops.push_back(*LowAdd);
9753   Ops.push_back(*HiAdd);
9754 
9755   SDValue MLALNode =  DAG.getNode(FinalOpc, SDLoc(AddcNode),
9756                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
9757 
9758   // Replace the ADDs' nodes uses by the MLA node's values.
9759   SDValue HiMLALResult(MLALNode.getNode(), 1);
9760   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
9761 
9762   SDValue LoMLALResult(MLALNode.getNode(), 0);
9763   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
9764 
9765   // Return original node to notify the driver to stop replacing.
9766   return SDValue(AddeNode, 0);
9767 }
9768 
9769 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
9770                                       TargetLowering::DAGCombinerInfo &DCI,
9771                                       const ARMSubtarget *Subtarget) {
9772   // UMAAL is similar to UMLAL except that it adds two unsigned values.
9773   // While trying to combine for the other MLAL nodes, first search for the
9774   // chance to use UMAAL. Check if Addc uses a node which has already
9775   // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde
9776   // as the addend, and it's handled in PerformUMLALCombine.
9777 
9778   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
9779     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
9780 
9781   // Check that we have a glued ADDC node.
9782   SDNode* AddcNode = AddeNode->getOperand(2).getNode();
9783   if (AddcNode->getOpcode() != ARMISD::ADDC)
9784     return SDValue();
9785 
9786   // Find the converted UMAAL or quit if it doesn't exist.
9787   SDNode *UmlalNode = nullptr;
9788   SDValue AddHi;
9789   if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
9790     UmlalNode = AddcNode->getOperand(0).getNode();
9791     AddHi = AddcNode->getOperand(1);
9792   } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
9793     UmlalNode = AddcNode->getOperand(1).getNode();
9794     AddHi = AddcNode->getOperand(0);
9795   } else {
9796     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
9797   }
9798 
9799   // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
9800   // the ADDC as well as Zero.
9801   if (!isNullConstant(UmlalNode->getOperand(3)))
9802     return SDValue();
9803 
9804   if ((isNullConstant(AddeNode->getOperand(0)) &&
9805        AddeNode->getOperand(1).getNode() == UmlalNode) ||
9806       (AddeNode->getOperand(0).getNode() == UmlalNode &&
9807        isNullConstant(AddeNode->getOperand(1)))) {
9808 
9809     SelectionDAG &DAG = DCI.DAG;
9810     SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
9811                       UmlalNode->getOperand(2), AddHi };
9812     SDValue UMAAL =  DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
9813                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
9814 
9815     // Replace the ADDs' nodes uses by the UMAAL node's values.
9816     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
9817     DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
9818 
9819     // Return original node to notify the driver to stop replacing.
9820     return SDValue(AddeNode, 0);
9821   }
9822   return SDValue();
9823 }
9824 
9825 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
9826                                    const ARMSubtarget *Subtarget) {
9827   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
9828     return SDValue();
9829 
9830   // Check that we have a pair of ADDC and ADDE as operands.
9831   // Both addends of the ADDE must be zero.
9832   SDNode* AddcNode = N->getOperand(2).getNode();
9833   SDNode* AddeNode = N->getOperand(3).getNode();
9834   if ((AddcNode->getOpcode() == ARMISD::ADDC) &&
9835       (AddeNode->getOpcode() == ARMISD::ADDE) &&
9836       isNullConstant(AddeNode->getOperand(0)) &&
9837       isNullConstant(AddeNode->getOperand(1)) &&
9838       (AddeNode->getOperand(2).getNode() == AddcNode))
9839     return DAG.getNode(ARMISD::UMAAL, SDLoc(N),
9840                        DAG.getVTList(MVT::i32, MVT::i32),
9841                        {N->getOperand(0), N->getOperand(1),
9842                         AddcNode->getOperand(0), AddcNode->getOperand(1)});
9843   else
9844     return SDValue();
9845 }
9846 
9847 static SDValue PerformAddcSubcCombine(SDNode *N, SelectionDAG &DAG,
9848                                       const ARMSubtarget *Subtarget) {
9849   if (Subtarget->isThumb1Only()) {
9850     SDValue RHS = N->getOperand(1);
9851     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
9852       int32_t imm = C->getSExtValue();
9853       if (imm < 0 && imm > INT_MIN) {
9854         SDLoc DL(N);
9855         RHS = DAG.getConstant(-imm, DL, MVT::i32);
9856         unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
9857                                                            : ARMISD::ADDC;
9858         return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS);
9859       }
9860     }
9861   }
9862   return SDValue();
9863 }
9864 
9865 static SDValue PerformAddeSubeCombine(SDNode *N, SelectionDAG &DAG,
9866                                       const ARMSubtarget *Subtarget) {
9867   if (Subtarget->isThumb1Only()) {
9868     SDValue RHS = N->getOperand(1);
9869     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
9870       int64_t imm = C->getSExtValue();
9871       if (imm < 0) {
9872         SDLoc DL(N);
9873 
9874         // The with-carry-in form matches bitwise not instead of the negation.
9875         // Effectively, the inverse interpretation of the carry flag already
9876         // accounts for part of the negation.
9877         RHS = DAG.getConstant(~imm, DL, MVT::i32);
9878 
9879         unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE
9880                                                            : ARMISD::ADDE;
9881         return DAG.getNode(Opcode, DL, N->getVTList(),
9882                            N->getOperand(0), RHS, N->getOperand(2));
9883       }
9884     }
9885   }
9886   return SDValue();
9887 }
9888 
9889 /// PerformADDECombine - Target-specific dag combine transform from
9890 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
9891 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
9892 static SDValue PerformADDECombine(SDNode *N,
9893                                   TargetLowering::DAGCombinerInfo &DCI,
9894                                   const ARMSubtarget *Subtarget) {
9895   // Only ARM and Thumb2 support UMLAL/SMLAL.
9896   if (Subtarget->isThumb1Only())
9897     return PerformAddeSubeCombine(N, DCI.DAG, Subtarget);
9898 
9899   // Only perform the checks after legalize when the pattern is available.
9900   if (DCI.isBeforeLegalize()) return SDValue();
9901 
9902   return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
9903 }
9904 
9905 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
9906 /// operands N0 and N1.  This is a helper for PerformADDCombine that is
9907 /// called with the default operands, and if that fails, with commuted
9908 /// operands.
9909 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
9910                                           TargetLowering::DAGCombinerInfo &DCI,
9911                                           const ARMSubtarget *Subtarget){
9912   // Attempt to create vpadd for this add.
9913   if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
9914     return Result;
9915 
9916   // Attempt to create vpaddl for this add.
9917   if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
9918     return Result;
9919   if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
9920                                                       Subtarget))
9921     return Result;
9922 
9923   // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
9924   if (N0.getNode()->hasOneUse())
9925     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
9926       return Result;
9927   return SDValue();
9928 }
9929 
9930 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
9931 ///
9932 static SDValue PerformADDCombine(SDNode *N,
9933                                  TargetLowering::DAGCombinerInfo &DCI,
9934                                  const ARMSubtarget *Subtarget) {
9935   SDValue N0 = N->getOperand(0);
9936   SDValue N1 = N->getOperand(1);
9937 
9938   // First try with the default operand order.
9939   if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
9940     return Result;
9941 
9942   // If that didn't work, try again with the operands commuted.
9943   return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
9944 }
9945 
9946 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
9947 ///
9948 static SDValue PerformSUBCombine(SDNode *N,
9949                                  TargetLowering::DAGCombinerInfo &DCI) {
9950   SDValue N0 = N->getOperand(0);
9951   SDValue N1 = N->getOperand(1);
9952 
9953   // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
9954   if (N1.getNode()->hasOneUse())
9955     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
9956       return Result;
9957 
9958   return SDValue();
9959 }
9960 
9961 /// PerformVMULCombine
9962 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
9963 /// special multiplier accumulator forwarding.
9964 ///   vmul d3, d0, d2
9965 ///   vmla d3, d1, d2
9966 /// is faster than
9967 ///   vadd d3, d0, d1
9968 ///   vmul d3, d3, d2
9969 //  However, for (A + B) * (A + B),
9970 //    vadd d2, d0, d1
9971 //    vmul d3, d0, d2
9972 //    vmla d3, d1, d2
9973 //  is slower than
9974 //    vadd d2, d0, d1
9975 //    vmul d3, d2, d2
9976 static SDValue PerformVMULCombine(SDNode *N,
9977                                   TargetLowering::DAGCombinerInfo &DCI,
9978                                   const ARMSubtarget *Subtarget) {
9979   if (!Subtarget->hasVMLxForwarding())
9980     return SDValue();
9981 
9982   SelectionDAG &DAG = DCI.DAG;
9983   SDValue N0 = N->getOperand(0);
9984   SDValue N1 = N->getOperand(1);
9985   unsigned Opcode = N0.getOpcode();
9986   if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
9987       Opcode != ISD::FADD && Opcode != ISD::FSUB) {
9988     Opcode = N1.getOpcode();
9989     if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
9990         Opcode != ISD::FADD && Opcode != ISD::FSUB)
9991       return SDValue();
9992     std::swap(N0, N1);
9993   }
9994 
9995   if (N0 == N1)
9996     return SDValue();
9997 
9998   EVT VT = N->getValueType(0);
9999   SDLoc DL(N);
10000   SDValue N00 = N0->getOperand(0);
10001   SDValue N01 = N0->getOperand(1);
10002   return DAG.getNode(Opcode, DL, VT,
10003                      DAG.getNode(ISD::MUL, DL, VT, N00, N1),
10004                      DAG.getNode(ISD::MUL, DL, VT, N01, N1));
10005 }
10006 
10007 static SDValue PerformMULCombine(SDNode *N,
10008                                  TargetLowering::DAGCombinerInfo &DCI,
10009                                  const ARMSubtarget *Subtarget) {
10010   SelectionDAG &DAG = DCI.DAG;
10011 
10012   if (Subtarget->isThumb1Only())
10013     return SDValue();
10014 
10015   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
10016     return SDValue();
10017 
10018   EVT VT = N->getValueType(0);
10019   if (VT.is64BitVector() || VT.is128BitVector())
10020     return PerformVMULCombine(N, DCI, Subtarget);
10021   if (VT != MVT::i32)
10022     return SDValue();
10023 
10024   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
10025   if (!C)
10026     return SDValue();
10027 
10028   int64_t MulAmt = C->getSExtValue();
10029   unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
10030 
10031   ShiftAmt = ShiftAmt & (32 - 1);
10032   SDValue V = N->getOperand(0);
10033   SDLoc DL(N);
10034 
10035   SDValue Res;
10036   MulAmt >>= ShiftAmt;
10037 
10038   if (MulAmt >= 0) {
10039     if (isPowerOf2_32(MulAmt - 1)) {
10040       // (mul x, 2^N + 1) => (add (shl x, N), x)
10041       Res = DAG.getNode(ISD::ADD, DL, VT,
10042                         V,
10043                         DAG.getNode(ISD::SHL, DL, VT,
10044                                     V,
10045                                     DAG.getConstant(Log2_32(MulAmt - 1), DL,
10046                                                     MVT::i32)));
10047     } else if (isPowerOf2_32(MulAmt + 1)) {
10048       // (mul x, 2^N - 1) => (sub (shl x, N), x)
10049       Res = DAG.getNode(ISD::SUB, DL, VT,
10050                         DAG.getNode(ISD::SHL, DL, VT,
10051                                     V,
10052                                     DAG.getConstant(Log2_32(MulAmt + 1), DL,
10053                                                     MVT::i32)),
10054                         V);
10055     } else
10056       return SDValue();
10057   } else {
10058     uint64_t MulAmtAbs = -MulAmt;
10059     if (isPowerOf2_32(MulAmtAbs + 1)) {
10060       // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
10061       Res = DAG.getNode(ISD::SUB, DL, VT,
10062                         V,
10063                         DAG.getNode(ISD::SHL, DL, VT,
10064                                     V,
10065                                     DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
10066                                                     MVT::i32)));
10067     } else if (isPowerOf2_32(MulAmtAbs - 1)) {
10068       // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
10069       Res = DAG.getNode(ISD::ADD, DL, VT,
10070                         V,
10071                         DAG.getNode(ISD::SHL, DL, VT,
10072                                     V,
10073                                     DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
10074                                                     MVT::i32)));
10075       Res = DAG.getNode(ISD::SUB, DL, VT,
10076                         DAG.getConstant(0, DL, MVT::i32), Res);
10077 
10078     } else
10079       return SDValue();
10080   }
10081 
10082   if (ShiftAmt != 0)
10083     Res = DAG.getNode(ISD::SHL, DL, VT,
10084                       Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
10085 
10086   // Do not add new nodes to DAG combiner worklist.
10087   DCI.CombineTo(N, Res, false);
10088   return SDValue();
10089 }
10090 
10091 static SDValue PerformANDCombine(SDNode *N,
10092                                  TargetLowering::DAGCombinerInfo &DCI,
10093                                  const ARMSubtarget *Subtarget) {
10094   // Attempt to use immediate-form VBIC
10095   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
10096   SDLoc dl(N);
10097   EVT VT = N->getValueType(0);
10098   SelectionDAG &DAG = DCI.DAG;
10099 
10100   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
10101     return SDValue();
10102 
10103   APInt SplatBits, SplatUndef;
10104   unsigned SplatBitSize;
10105   bool HasAnyUndefs;
10106   if (BVN &&
10107       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
10108     if (SplatBitSize <= 64) {
10109       EVT VbicVT;
10110       SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
10111                                       SplatUndef.getZExtValue(), SplatBitSize,
10112                                       DAG, dl, VbicVT, VT.is128BitVector(),
10113                                       OtherModImm);
10114       if (Val.getNode()) {
10115         SDValue Input =
10116           DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
10117         SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
10118         return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
10119       }
10120     }
10121   }
10122 
10123   if (!Subtarget->isThumb1Only()) {
10124     // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
10125     if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
10126       return Result;
10127   }
10128 
10129   return SDValue();
10130 }
10131 
10132 // Try combining OR nodes to SMULWB, SMULWT.
10133 static SDValue PerformORCombineToSMULWBT(SDNode *OR,
10134                                          TargetLowering::DAGCombinerInfo &DCI,
10135                                          const ARMSubtarget *Subtarget) {
10136   if (!Subtarget->hasV6Ops() ||
10137       (Subtarget->isThumb() &&
10138        (!Subtarget->hasThumb2() || !Subtarget->hasDSP())))
10139     return SDValue();
10140 
10141   SDValue SRL = OR->getOperand(0);
10142   SDValue SHL = OR->getOperand(1);
10143 
10144   if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) {
10145     SRL = OR->getOperand(1);
10146     SHL = OR->getOperand(0);
10147   }
10148   if (!isSRL16(SRL) || !isSHL16(SHL))
10149     return SDValue();
10150 
10151   // The first operands to the shifts need to be the two results from the
10152   // same smul_lohi node.
10153   if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
10154        SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI)
10155     return SDValue();
10156 
10157   SDNode *SMULLOHI = SRL.getOperand(0).getNode();
10158   if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) ||
10159       SHL.getOperand(0) != SDValue(SMULLOHI, 1))
10160     return SDValue();
10161 
10162   // Now we have:
10163   // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
10164   // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
10165   // For SMUWB the 16-bit value will signed extended somehow.
10166   // For SMULWT only the SRA is required.
10167   // Check both sides of SMUL_LOHI
10168   SDValue OpS16 = SMULLOHI->getOperand(0);
10169   SDValue OpS32 = SMULLOHI->getOperand(1);
10170 
10171   SelectionDAG &DAG = DCI.DAG;
10172   if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) {
10173     OpS16 = OpS32;
10174     OpS32 = SMULLOHI->getOperand(0);
10175   }
10176 
10177   SDLoc dl(OR);
10178   unsigned Opcode = 0;
10179   if (isS16(OpS16, DAG))
10180     Opcode = ARMISD::SMULWB;
10181   else if (isSRA16(OpS16)) {
10182     Opcode = ARMISD::SMULWT;
10183     OpS16 = OpS16->getOperand(0);
10184   }
10185   else
10186     return SDValue();
10187 
10188   SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16);
10189   DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res);
10190   return SDValue(OR, 0);
10191 }
10192 
10193 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
10194 static SDValue PerformORCombine(SDNode *N,
10195                                 TargetLowering::DAGCombinerInfo &DCI,
10196                                 const ARMSubtarget *Subtarget) {
10197   // Attempt to use immediate-form VORR
10198   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
10199   SDLoc dl(N);
10200   EVT VT = N->getValueType(0);
10201   SelectionDAG &DAG = DCI.DAG;
10202 
10203   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
10204     return SDValue();
10205 
10206   APInt SplatBits, SplatUndef;
10207   unsigned SplatBitSize;
10208   bool HasAnyUndefs;
10209   if (BVN && Subtarget->hasNEON() &&
10210       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
10211     if (SplatBitSize <= 64) {
10212       EVT VorrVT;
10213       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
10214                                       SplatUndef.getZExtValue(), SplatBitSize,
10215                                       DAG, dl, VorrVT, VT.is128BitVector(),
10216                                       OtherModImm);
10217       if (Val.getNode()) {
10218         SDValue Input =
10219           DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
10220         SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
10221         return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
10222       }
10223     }
10224   }
10225 
10226   if (!Subtarget->isThumb1Only()) {
10227     // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
10228     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
10229       return Result;
10230     if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget))
10231       return Result;
10232   }
10233 
10234   // The code below optimizes (or (and X, Y), Z).
10235   // The AND operand needs to have a single user to make these optimizations
10236   // profitable.
10237   SDValue N0 = N->getOperand(0);
10238   if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
10239     return SDValue();
10240   SDValue N1 = N->getOperand(1);
10241 
10242   // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
10243   if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
10244       DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
10245     APInt SplatUndef;
10246     unsigned SplatBitSize;
10247     bool HasAnyUndefs;
10248 
10249     APInt SplatBits0, SplatBits1;
10250     BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
10251     BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
10252     // Ensure that the second operand of both ands are constants
10253     if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
10254                                       HasAnyUndefs) && !HasAnyUndefs) {
10255         if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
10256                                           HasAnyUndefs) && !HasAnyUndefs) {
10257             // Ensure that the bit width of the constants are the same and that
10258             // the splat arguments are logical inverses as per the pattern we
10259             // are trying to simplify.
10260             if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
10261                 SplatBits0 == ~SplatBits1) {
10262                 // Canonicalize the vector type to make instruction selection
10263                 // simpler.
10264                 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
10265                 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
10266                                              N0->getOperand(1),
10267                                              N0->getOperand(0),
10268                                              N1->getOperand(0));
10269                 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
10270             }
10271         }
10272     }
10273   }
10274 
10275   // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
10276   // reasonable.
10277 
10278   // BFI is only available on V6T2+
10279   if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
10280     return SDValue();
10281 
10282   SDLoc DL(N);
10283   // 1) or (and A, mask), val => ARMbfi A, val, mask
10284   //      iff (val & mask) == val
10285   //
10286   // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
10287   //  2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
10288   //          && mask == ~mask2
10289   //  2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
10290   //          && ~mask == mask2
10291   //  (i.e., copy a bitfield value into another bitfield of the same width)
10292 
10293   if (VT != MVT::i32)
10294     return SDValue();
10295 
10296   SDValue N00 = N0.getOperand(0);
10297 
10298   // The value and the mask need to be constants so we can verify this is
10299   // actually a bitfield set. If the mask is 0xffff, we can do better
10300   // via a movt instruction, so don't use BFI in that case.
10301   SDValue MaskOp = N0.getOperand(1);
10302   ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
10303   if (!MaskC)
10304     return SDValue();
10305   unsigned Mask = MaskC->getZExtValue();
10306   if (Mask == 0xffff)
10307     return SDValue();
10308   SDValue Res;
10309   // Case (1): or (and A, mask), val => ARMbfi A, val, mask
10310   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
10311   if (N1C) {
10312     unsigned Val = N1C->getZExtValue();
10313     if ((Val & ~Mask) != Val)
10314       return SDValue();
10315 
10316     if (ARM::isBitFieldInvertedMask(Mask)) {
10317       Val >>= countTrailingZeros(~Mask);
10318 
10319       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
10320                         DAG.getConstant(Val, DL, MVT::i32),
10321                         DAG.getConstant(Mask, DL, MVT::i32));
10322 
10323       // Do not add new nodes to DAG combiner worklist.
10324       DCI.CombineTo(N, Res, false);
10325       return SDValue();
10326     }
10327   } else if (N1.getOpcode() == ISD::AND) {
10328     // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
10329     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
10330     if (!N11C)
10331       return SDValue();
10332     unsigned Mask2 = N11C->getZExtValue();
10333 
10334     // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
10335     // as is to match.
10336     if (ARM::isBitFieldInvertedMask(Mask) &&
10337         (Mask == ~Mask2)) {
10338       // The pack halfword instruction works better for masks that fit it,
10339       // so use that when it's available.
10340       if (Subtarget->hasDSP() &&
10341           (Mask == 0xffff || Mask == 0xffff0000))
10342         return SDValue();
10343       // 2a
10344       unsigned amt = countTrailingZeros(Mask2);
10345       Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
10346                         DAG.getConstant(amt, DL, MVT::i32));
10347       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
10348                         DAG.getConstant(Mask, DL, MVT::i32));
10349       // Do not add new nodes to DAG combiner worklist.
10350       DCI.CombineTo(N, Res, false);
10351       return SDValue();
10352     } else if (ARM::isBitFieldInvertedMask(~Mask) &&
10353                (~Mask == Mask2)) {
10354       // The pack halfword instruction works better for masks that fit it,
10355       // so use that when it's available.
10356       if (Subtarget->hasDSP() &&
10357           (Mask2 == 0xffff || Mask2 == 0xffff0000))
10358         return SDValue();
10359       // 2b
10360       unsigned lsb = countTrailingZeros(Mask);
10361       Res = DAG.getNode(ISD::SRL, DL, VT, N00,
10362                         DAG.getConstant(lsb, DL, MVT::i32));
10363       Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
10364                         DAG.getConstant(Mask2, DL, MVT::i32));
10365       // Do not add new nodes to DAG combiner worklist.
10366       DCI.CombineTo(N, Res, false);
10367       return SDValue();
10368     }
10369   }
10370 
10371   if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
10372       N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
10373       ARM::isBitFieldInvertedMask(~Mask)) {
10374     // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
10375     // where lsb(mask) == #shamt and masked bits of B are known zero.
10376     SDValue ShAmt = N00.getOperand(1);
10377     unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
10378     unsigned LSB = countTrailingZeros(Mask);
10379     if (ShAmtC != LSB)
10380       return SDValue();
10381 
10382     Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
10383                       DAG.getConstant(~Mask, DL, MVT::i32));
10384 
10385     // Do not add new nodes to DAG combiner worklist.
10386     DCI.CombineTo(N, Res, false);
10387   }
10388 
10389   return SDValue();
10390 }
10391 
10392 static SDValue PerformXORCombine(SDNode *N,
10393                                  TargetLowering::DAGCombinerInfo &DCI,
10394                                  const ARMSubtarget *Subtarget) {
10395   EVT VT = N->getValueType(0);
10396   SelectionDAG &DAG = DCI.DAG;
10397 
10398   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
10399     return SDValue();
10400 
10401   if (!Subtarget->isThumb1Only()) {
10402     // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
10403     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
10404       return Result;
10405   }
10406 
10407   return SDValue();
10408 }
10409 
10410 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
10411 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
10412 // their position in "to" (Rd).
10413 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
10414   assert(N->getOpcode() == ARMISD::BFI);
10415 
10416   SDValue From = N->getOperand(1);
10417   ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
10418   FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation());
10419 
10420   // If the Base came from a SHR #C, we can deduce that it is really testing bit
10421   // #C in the base of the SHR.
10422   if (From->getOpcode() == ISD::SRL &&
10423       isa<ConstantSDNode>(From->getOperand(1))) {
10424     APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
10425     assert(Shift.getLimitedValue() < 32 && "Shift too large!");
10426     FromMask <<= Shift.getLimitedValue(31);
10427     From = From->getOperand(0);
10428   }
10429 
10430   return From;
10431 }
10432 
10433 // If A and B contain one contiguous set of bits, does A | B == A . B?
10434 //
10435 // Neither A nor B must be zero.
10436 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
10437   unsigned LastActiveBitInA =  A.countTrailingZeros();
10438   unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1;
10439   return LastActiveBitInA - 1 == FirstActiveBitInB;
10440 }
10441 
10442 static SDValue FindBFIToCombineWith(SDNode *N) {
10443   // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
10444   // if one exists.
10445   APInt ToMask, FromMask;
10446   SDValue From = ParseBFI(N, ToMask, FromMask);
10447   SDValue To = N->getOperand(0);
10448 
10449   // Now check for a compatible BFI to merge with. We can pass through BFIs that
10450   // aren't compatible, but not if they set the same bit in their destination as
10451   // we do (or that of any BFI we're going to combine with).
10452   SDValue V = To;
10453   APInt CombinedToMask = ToMask;
10454   while (V.getOpcode() == ARMISD::BFI) {
10455     APInt NewToMask, NewFromMask;
10456     SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
10457     if (NewFrom != From) {
10458       // This BFI has a different base. Keep going.
10459       CombinedToMask |= NewToMask;
10460       V = V.getOperand(0);
10461       continue;
10462     }
10463 
10464     // Do the written bits conflict with any we've seen so far?
10465     if ((NewToMask & CombinedToMask).getBoolValue())
10466       // Conflicting bits - bail out because going further is unsafe.
10467       return SDValue();
10468 
10469     // Are the new bits contiguous when combined with the old bits?
10470     if (BitsProperlyConcatenate(ToMask, NewToMask) &&
10471         BitsProperlyConcatenate(FromMask, NewFromMask))
10472       return V;
10473     if (BitsProperlyConcatenate(NewToMask, ToMask) &&
10474         BitsProperlyConcatenate(NewFromMask, FromMask))
10475       return V;
10476 
10477     // We've seen a write to some bits, so track it.
10478     CombinedToMask |= NewToMask;
10479     // Keep going...
10480     V = V.getOperand(0);
10481   }
10482 
10483   return SDValue();
10484 }
10485 
10486 static SDValue PerformBFICombine(SDNode *N,
10487                                  TargetLowering::DAGCombinerInfo &DCI) {
10488   SDValue N1 = N->getOperand(1);
10489   if (N1.getOpcode() == ISD::AND) {
10490     // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
10491     // the bits being cleared by the AND are not demanded by the BFI.
10492     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
10493     if (!N11C)
10494       return SDValue();
10495     unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
10496     unsigned LSB = countTrailingZeros(~InvMask);
10497     unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
10498     assert(Width <
10499                static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
10500            "undefined behavior");
10501     unsigned Mask = (1u << Width) - 1;
10502     unsigned Mask2 = N11C->getZExtValue();
10503     if ((Mask & (~Mask2)) == 0)
10504       return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
10505                              N->getOperand(0), N1.getOperand(0),
10506                              N->getOperand(2));
10507   } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
10508     // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
10509     // Keep track of any consecutive bits set that all come from the same base
10510     // value. We can combine these together into a single BFI.
10511     SDValue CombineBFI = FindBFIToCombineWith(N);
10512     if (CombineBFI == SDValue())
10513       return SDValue();
10514 
10515     // We've found a BFI.
10516     APInt ToMask1, FromMask1;
10517     SDValue From1 = ParseBFI(N, ToMask1, FromMask1);
10518 
10519     APInt ToMask2, FromMask2;
10520     SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
10521     assert(From1 == From2);
10522     (void)From2;
10523 
10524     // First, unlink CombineBFI.
10525     DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0));
10526     // Then create a new BFI, combining the two together.
10527     APInt NewFromMask = FromMask1 | FromMask2;
10528     APInt NewToMask = ToMask1 | ToMask2;
10529 
10530     EVT VT = N->getValueType(0);
10531     SDLoc dl(N);
10532 
10533     if (NewFromMask[0] == 0)
10534       From1 = DCI.DAG.getNode(
10535         ISD::SRL, dl, VT, From1,
10536         DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT));
10537     return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1,
10538                            DCI.DAG.getConstant(~NewToMask, dl, VT));
10539   }
10540   return SDValue();
10541 }
10542 
10543 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
10544 /// ARMISD::VMOVRRD.
10545 static SDValue PerformVMOVRRDCombine(SDNode *N,
10546                                      TargetLowering::DAGCombinerInfo &DCI,
10547                                      const ARMSubtarget *Subtarget) {
10548   // vmovrrd(vmovdrr x, y) -> x,y
10549   SDValue InDouble = N->getOperand(0);
10550   if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP())
10551     return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
10552 
10553   // vmovrrd(load f64) -> (load i32), (load i32)
10554   SDNode *InNode = InDouble.getNode();
10555   if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
10556       InNode->getValueType(0) == MVT::f64 &&
10557       InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
10558       !cast<LoadSDNode>(InNode)->isVolatile()) {
10559     // TODO: Should this be done for non-FrameIndex operands?
10560     LoadSDNode *LD = cast<LoadSDNode>(InNode);
10561 
10562     SelectionDAG &DAG = DCI.DAG;
10563     SDLoc DL(LD);
10564     SDValue BasePtr = LD->getBasePtr();
10565     SDValue NewLD1 =
10566         DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
10567                     LD->getAlignment(), LD->getMemOperand()->getFlags());
10568 
10569     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
10570                                     DAG.getConstant(4, DL, MVT::i32));
10571     SDValue NewLD2 = DAG.getLoad(
10572         MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(),
10573         std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags());
10574 
10575     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
10576     if (DCI.DAG.getDataLayout().isBigEndian())
10577       std::swap (NewLD1, NewLD2);
10578     SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
10579     return Result;
10580   }
10581 
10582   return SDValue();
10583 }
10584 
10585 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
10586 /// ARMISD::VMOVDRR.  This is also used for BUILD_VECTORs with 2 operands.
10587 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
10588   // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
10589   SDValue Op0 = N->getOperand(0);
10590   SDValue Op1 = N->getOperand(1);
10591   if (Op0.getOpcode() == ISD::BITCAST)
10592     Op0 = Op0.getOperand(0);
10593   if (Op1.getOpcode() == ISD::BITCAST)
10594     Op1 = Op1.getOperand(0);
10595   if (Op0.getOpcode() == ARMISD::VMOVRRD &&
10596       Op0.getNode() == Op1.getNode() &&
10597       Op0.getResNo() == 0 && Op1.getResNo() == 1)
10598     return DAG.getNode(ISD::BITCAST, SDLoc(N),
10599                        N->getValueType(0), Op0.getOperand(0));
10600   return SDValue();
10601 }
10602 
10603 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
10604 /// are normal, non-volatile loads.  If so, it is profitable to bitcast an
10605 /// i64 vector to have f64 elements, since the value can then be loaded
10606 /// directly into a VFP register.
10607 static bool hasNormalLoadOperand(SDNode *N) {
10608   unsigned NumElts = N->getValueType(0).getVectorNumElements();
10609   for (unsigned i = 0; i < NumElts; ++i) {
10610     SDNode *Elt = N->getOperand(i).getNode();
10611     if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
10612       return true;
10613   }
10614   return false;
10615 }
10616 
10617 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
10618 /// ISD::BUILD_VECTOR.
10619 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
10620                                           TargetLowering::DAGCombinerInfo &DCI,
10621                                           const ARMSubtarget *Subtarget) {
10622   // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
10623   // VMOVRRD is introduced when legalizing i64 types.  It forces the i64 value
10624   // into a pair of GPRs, which is fine when the value is used as a scalar,
10625   // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
10626   SelectionDAG &DAG = DCI.DAG;
10627   if (N->getNumOperands() == 2)
10628     if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
10629       return RV;
10630 
10631   // Load i64 elements as f64 values so that type legalization does not split
10632   // them up into i32 values.
10633   EVT VT = N->getValueType(0);
10634   if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
10635     return SDValue();
10636   SDLoc dl(N);
10637   SmallVector<SDValue, 8> Ops;
10638   unsigned NumElts = VT.getVectorNumElements();
10639   for (unsigned i = 0; i < NumElts; ++i) {
10640     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
10641     Ops.push_back(V);
10642     // Make the DAGCombiner fold the bitcast.
10643     DCI.AddToWorklist(V.getNode());
10644   }
10645   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
10646   SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
10647   return DAG.getNode(ISD::BITCAST, dl, VT, BV);
10648 }
10649 
10650 /// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
10651 static SDValue
10652 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
10653   // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
10654   // At that time, we may have inserted bitcasts from integer to float.
10655   // If these bitcasts have survived DAGCombine, change the lowering of this
10656   // BUILD_VECTOR in something more vector friendly, i.e., that does not
10657   // force to use floating point types.
10658 
10659   // Make sure we can change the type of the vector.
10660   // This is possible iff:
10661   // 1. The vector is only used in a bitcast to a integer type. I.e.,
10662   //    1.1. Vector is used only once.
10663   //    1.2. Use is a bit convert to an integer type.
10664   // 2. The size of its operands are 32-bits (64-bits are not legal).
10665   EVT VT = N->getValueType(0);
10666   EVT EltVT = VT.getVectorElementType();
10667 
10668   // Check 1.1. and 2.
10669   if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
10670     return SDValue();
10671 
10672   // By construction, the input type must be float.
10673   assert(EltVT == MVT::f32 && "Unexpected type!");
10674 
10675   // Check 1.2.
10676   SDNode *Use = *N->use_begin();
10677   if (Use->getOpcode() != ISD::BITCAST ||
10678       Use->getValueType(0).isFloatingPoint())
10679     return SDValue();
10680 
10681   // Check profitability.
10682   // Model is, if more than half of the relevant operands are bitcast from
10683   // i32, turn the build_vector into a sequence of insert_vector_elt.
10684   // Relevant operands are everything that is not statically
10685   // (i.e., at compile time) bitcasted.
10686   unsigned NumOfBitCastedElts = 0;
10687   unsigned NumElts = VT.getVectorNumElements();
10688   unsigned NumOfRelevantElts = NumElts;
10689   for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
10690     SDValue Elt = N->getOperand(Idx);
10691     if (Elt->getOpcode() == ISD::BITCAST) {
10692       // Assume only bit cast to i32 will go away.
10693       if (Elt->getOperand(0).getValueType() == MVT::i32)
10694         ++NumOfBitCastedElts;
10695     } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
10696       // Constants are statically casted, thus do not count them as
10697       // relevant operands.
10698       --NumOfRelevantElts;
10699   }
10700 
10701   // Check if more than half of the elements require a non-free bitcast.
10702   if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
10703     return SDValue();
10704 
10705   SelectionDAG &DAG = DCI.DAG;
10706   // Create the new vector type.
10707   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
10708   // Check if the type is legal.
10709   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10710   if (!TLI.isTypeLegal(VecVT))
10711     return SDValue();
10712 
10713   // Combine:
10714   // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
10715   // => BITCAST INSERT_VECTOR_ELT
10716   //                      (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
10717   //                      (BITCAST EN), N.
10718   SDValue Vec = DAG.getUNDEF(VecVT);
10719   SDLoc dl(N);
10720   for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
10721     SDValue V = N->getOperand(Idx);
10722     if (V.isUndef())
10723       continue;
10724     if (V.getOpcode() == ISD::BITCAST &&
10725         V->getOperand(0).getValueType() == MVT::i32)
10726       // Fold obvious case.
10727       V = V.getOperand(0);
10728     else {
10729       V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
10730       // Make the DAGCombiner fold the bitcasts.
10731       DCI.AddToWorklist(V.getNode());
10732     }
10733     SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
10734     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
10735   }
10736   Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
10737   // Make the DAGCombiner fold the bitcasts.
10738   DCI.AddToWorklist(Vec.getNode());
10739   return Vec;
10740 }
10741 
10742 /// PerformInsertEltCombine - Target-specific dag combine xforms for
10743 /// ISD::INSERT_VECTOR_ELT.
10744 static SDValue PerformInsertEltCombine(SDNode *N,
10745                                        TargetLowering::DAGCombinerInfo &DCI) {
10746   // Bitcast an i64 load inserted into a vector to f64.
10747   // Otherwise, the i64 value will be legalized to a pair of i32 values.
10748   EVT VT = N->getValueType(0);
10749   SDNode *Elt = N->getOperand(1).getNode();
10750   if (VT.getVectorElementType() != MVT::i64 ||
10751       !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
10752     return SDValue();
10753 
10754   SelectionDAG &DAG = DCI.DAG;
10755   SDLoc dl(N);
10756   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
10757                                  VT.getVectorNumElements());
10758   SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
10759   SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
10760   // Make the DAGCombiner fold the bitcasts.
10761   DCI.AddToWorklist(Vec.getNode());
10762   DCI.AddToWorklist(V.getNode());
10763   SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
10764                                Vec, V, N->getOperand(2));
10765   return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
10766 }
10767 
10768 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
10769 /// ISD::VECTOR_SHUFFLE.
10770 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
10771   // The LLVM shufflevector instruction does not require the shuffle mask
10772   // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
10773   // have that requirement.  When translating to ISD::VECTOR_SHUFFLE, if the
10774   // operands do not match the mask length, they are extended by concatenating
10775   // them with undef vectors.  That is probably the right thing for other
10776   // targets, but for NEON it is better to concatenate two double-register
10777   // size vector operands into a single quad-register size vector.  Do that
10778   // transformation here:
10779   //   shuffle(concat(v1, undef), concat(v2, undef)) ->
10780   //   shuffle(concat(v1, v2), undef)
10781   SDValue Op0 = N->getOperand(0);
10782   SDValue Op1 = N->getOperand(1);
10783   if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
10784       Op1.getOpcode() != ISD::CONCAT_VECTORS ||
10785       Op0.getNumOperands() != 2 ||
10786       Op1.getNumOperands() != 2)
10787     return SDValue();
10788   SDValue Concat0Op1 = Op0.getOperand(1);
10789   SDValue Concat1Op1 = Op1.getOperand(1);
10790   if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
10791     return SDValue();
10792   // Skip the transformation if any of the types are illegal.
10793   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10794   EVT VT = N->getValueType(0);
10795   if (!TLI.isTypeLegal(VT) ||
10796       !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
10797       !TLI.isTypeLegal(Concat1Op1.getValueType()))
10798     return SDValue();
10799 
10800   SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
10801                                   Op0.getOperand(0), Op1.getOperand(0));
10802   // Translate the shuffle mask.
10803   SmallVector<int, 16> NewMask;
10804   unsigned NumElts = VT.getVectorNumElements();
10805   unsigned HalfElts = NumElts/2;
10806   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
10807   for (unsigned n = 0; n < NumElts; ++n) {
10808     int MaskElt = SVN->getMaskElt(n);
10809     int NewElt = -1;
10810     if (MaskElt < (int)HalfElts)
10811       NewElt = MaskElt;
10812     else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
10813       NewElt = HalfElts + MaskElt - NumElts;
10814     NewMask.push_back(NewElt);
10815   }
10816   return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
10817                               DAG.getUNDEF(VT), NewMask);
10818 }
10819 
10820 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
10821 /// NEON load/store intrinsics, and generic vector load/stores, to merge
10822 /// base address updates.
10823 /// For generic load/stores, the memory type is assumed to be a vector.
10824 /// The caller is assumed to have checked legality.
10825 static SDValue CombineBaseUpdate(SDNode *N,
10826                                  TargetLowering::DAGCombinerInfo &DCI) {
10827   SelectionDAG &DAG = DCI.DAG;
10828   const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
10829                             N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
10830   const bool isStore = N->getOpcode() == ISD::STORE;
10831   const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
10832   SDValue Addr = N->getOperand(AddrOpIdx);
10833   MemSDNode *MemN = cast<MemSDNode>(N);
10834   SDLoc dl(N);
10835 
10836   // Search for a use of the address operand that is an increment.
10837   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
10838          UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
10839     SDNode *User = *UI;
10840     if (User->getOpcode() != ISD::ADD ||
10841         UI.getUse().getResNo() != Addr.getResNo())
10842       continue;
10843 
10844     // Check that the add is independent of the load/store.  Otherwise, folding
10845     // it would create a cycle.
10846     if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
10847       continue;
10848 
10849     // Find the new opcode for the updating load/store.
10850     bool isLoadOp = true;
10851     bool isLaneOp = false;
10852     unsigned NewOpc = 0;
10853     unsigned NumVecs = 0;
10854     if (isIntrinsic) {
10855       unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10856       switch (IntNo) {
10857       default: llvm_unreachable("unexpected intrinsic for Neon base update");
10858       case Intrinsic::arm_neon_vld1:     NewOpc = ARMISD::VLD1_UPD;
10859         NumVecs = 1; break;
10860       case Intrinsic::arm_neon_vld2:     NewOpc = ARMISD::VLD2_UPD;
10861         NumVecs = 2; break;
10862       case Intrinsic::arm_neon_vld3:     NewOpc = ARMISD::VLD3_UPD;
10863         NumVecs = 3; break;
10864       case Intrinsic::arm_neon_vld4:     NewOpc = ARMISD::VLD4_UPD;
10865         NumVecs = 4; break;
10866       case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
10867         NumVecs = 2; isLaneOp = true; break;
10868       case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
10869         NumVecs = 3; isLaneOp = true; break;
10870       case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
10871         NumVecs = 4; isLaneOp = true; break;
10872       case Intrinsic::arm_neon_vst1:     NewOpc = ARMISD::VST1_UPD;
10873         NumVecs = 1; isLoadOp = false; break;
10874       case Intrinsic::arm_neon_vst2:     NewOpc = ARMISD::VST2_UPD;
10875         NumVecs = 2; isLoadOp = false; break;
10876       case Intrinsic::arm_neon_vst3:     NewOpc = ARMISD::VST3_UPD;
10877         NumVecs = 3; isLoadOp = false; break;
10878       case Intrinsic::arm_neon_vst4:     NewOpc = ARMISD::VST4_UPD;
10879         NumVecs = 4; isLoadOp = false; break;
10880       case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
10881         NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
10882       case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
10883         NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
10884       case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
10885         NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
10886       }
10887     } else {
10888       isLaneOp = true;
10889       switch (N->getOpcode()) {
10890       default: llvm_unreachable("unexpected opcode for Neon base update");
10891       case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break;
10892       case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
10893       case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
10894       case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
10895       case ISD::LOAD:       NewOpc = ARMISD::VLD1_UPD;
10896         NumVecs = 1; isLaneOp = false; break;
10897       case ISD::STORE:      NewOpc = ARMISD::VST1_UPD;
10898         NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
10899       }
10900     }
10901 
10902     // Find the size of memory referenced by the load/store.
10903     EVT VecTy;
10904     if (isLoadOp) {
10905       VecTy = N->getValueType(0);
10906     } else if (isIntrinsic) {
10907       VecTy = N->getOperand(AddrOpIdx+1).getValueType();
10908     } else {
10909       assert(isStore && "Node has to be a load, a store, or an intrinsic!");
10910       VecTy = N->getOperand(1).getValueType();
10911     }
10912 
10913     unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
10914     if (isLaneOp)
10915       NumBytes /= VecTy.getVectorNumElements();
10916 
10917     // If the increment is a constant, it must match the memory ref size.
10918     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
10919     ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
10920     if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) {
10921       // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
10922       // separate instructions that make it harder to use a non-constant update.
10923       continue;
10924     }
10925 
10926     // OK, we found an ADD we can fold into the base update.
10927     // Now, create a _UPD node, taking care of not breaking alignment.
10928 
10929     EVT AlignedVecTy = VecTy;
10930     unsigned Alignment = MemN->getAlignment();
10931 
10932     // If this is a less-than-standard-aligned load/store, change the type to
10933     // match the standard alignment.
10934     // The alignment is overlooked when selecting _UPD variants; and it's
10935     // easier to introduce bitcasts here than fix that.
10936     // There are 3 ways to get to this base-update combine:
10937     // - intrinsics: they are assumed to be properly aligned (to the standard
10938     //   alignment of the memory type), so we don't need to do anything.
10939     // - ARMISD::VLDx nodes: they are only generated from the aforementioned
10940     //   intrinsics, so, likewise, there's nothing to do.
10941     // - generic load/store instructions: the alignment is specified as an
10942     //   explicit operand, rather than implicitly as the standard alignment
10943     //   of the memory type (like the intrisics).  We need to change the
10944     //   memory type to match the explicit alignment.  That way, we don't
10945     //   generate non-standard-aligned ARMISD::VLDx nodes.
10946     if (isa<LSBaseSDNode>(N)) {
10947       if (Alignment == 0)
10948         Alignment = 1;
10949       if (Alignment < VecTy.getScalarSizeInBits() / 8) {
10950         MVT EltTy = MVT::getIntegerVT(Alignment * 8);
10951         assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
10952         assert(!isLaneOp && "Unexpected generic load/store lane.");
10953         unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
10954         AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
10955       }
10956       // Don't set an explicit alignment on regular load/stores that we want
10957       // to transform to VLD/VST 1_UPD nodes.
10958       // This matches the behavior of regular load/stores, which only get an
10959       // explicit alignment if the MMO alignment is larger than the standard
10960       // alignment of the memory type.
10961       // Intrinsics, however, always get an explicit alignment, set to the
10962       // alignment of the MMO.
10963       Alignment = 1;
10964     }
10965 
10966     // Create the new updating load/store node.
10967     // First, create an SDVTList for the new updating node's results.
10968     EVT Tys[6];
10969     unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
10970     unsigned n;
10971     for (n = 0; n < NumResultVecs; ++n)
10972       Tys[n] = AlignedVecTy;
10973     Tys[n++] = MVT::i32;
10974     Tys[n] = MVT::Other;
10975     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
10976 
10977     // Then, gather the new node's operands.
10978     SmallVector<SDValue, 8> Ops;
10979     Ops.push_back(N->getOperand(0)); // incoming chain
10980     Ops.push_back(N->getOperand(AddrOpIdx));
10981     Ops.push_back(Inc);
10982 
10983     if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
10984       // Try to match the intrinsic's signature
10985       Ops.push_back(StN->getValue());
10986     } else {
10987       // Loads (and of course intrinsics) match the intrinsics' signature,
10988       // so just add all but the alignment operand.
10989       for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
10990         Ops.push_back(N->getOperand(i));
10991     }
10992 
10993     // For all node types, the alignment operand is always the last one.
10994     Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));
10995 
10996     // If this is a non-standard-aligned STORE, the penultimate operand is the
10997     // stored value.  Bitcast it to the aligned type.
10998     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
10999       SDValue &StVal = Ops[Ops.size()-2];
11000       StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
11001     }
11002 
11003     EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
11004     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
11005                                            MemN->getMemOperand());
11006 
11007     // Update the uses.
11008     SmallVector<SDValue, 5> NewResults;
11009     for (unsigned i = 0; i < NumResultVecs; ++i)
11010       NewResults.push_back(SDValue(UpdN.getNode(), i));
11011 
11012     // If this is an non-standard-aligned LOAD, the first result is the loaded
11013     // value.  Bitcast it to the expected result type.
11014     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
11015       SDValue &LdVal = NewResults[0];
11016       LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
11017     }
11018 
11019     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
11020     DCI.CombineTo(N, NewResults);
11021     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
11022 
11023     break;
11024   }
11025   return SDValue();
11026 }
11027 
11028 static SDValue PerformVLDCombine(SDNode *N,
11029                                  TargetLowering::DAGCombinerInfo &DCI) {
11030   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
11031     return SDValue();
11032 
11033   return CombineBaseUpdate(N, DCI);
11034 }
11035 
11036 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
11037 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
11038 /// are also VDUPLANEs.  If so, combine them to a vldN-dup operation and
11039 /// return true.
11040 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
11041   SelectionDAG &DAG = DCI.DAG;
11042   EVT VT = N->getValueType(0);
11043   // vldN-dup instructions only support 64-bit vectors for N > 1.
11044   if (!VT.is64BitVector())
11045     return false;
11046 
11047   // Check if the VDUPLANE operand is a vldN-dup intrinsic.
11048   SDNode *VLD = N->getOperand(0).getNode();
11049   if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
11050     return false;
11051   unsigned NumVecs = 0;
11052   unsigned NewOpc = 0;
11053   unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
11054   if (IntNo == Intrinsic::arm_neon_vld2lane) {
11055     NumVecs = 2;
11056     NewOpc = ARMISD::VLD2DUP;
11057   } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
11058     NumVecs = 3;
11059     NewOpc = ARMISD::VLD3DUP;
11060   } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
11061     NumVecs = 4;
11062     NewOpc = ARMISD::VLD4DUP;
11063   } else {
11064     return false;
11065   }
11066 
11067   // First check that all the vldN-lane uses are VDUPLANEs and that the lane
11068   // numbers match the load.
11069   unsigned VLDLaneNo =
11070     cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
11071   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
11072        UI != UE; ++UI) {
11073     // Ignore uses of the chain result.
11074     if (UI.getUse().getResNo() == NumVecs)
11075       continue;
11076     SDNode *User = *UI;
11077     if (User->getOpcode() != ARMISD::VDUPLANE ||
11078         VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
11079       return false;
11080   }
11081 
11082   // Create the vldN-dup node.
11083   EVT Tys[5];
11084   unsigned n;
11085   for (n = 0; n < NumVecs; ++n)
11086     Tys[n] = VT;
11087   Tys[n] = MVT::Other;
11088   SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1));
11089   SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
11090   MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
11091   SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
11092                                            Ops, VLDMemInt->getMemoryVT(),
11093                                            VLDMemInt->getMemOperand());
11094 
11095   // Update the uses.
11096   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
11097        UI != UE; ++UI) {
11098     unsigned ResNo = UI.getUse().getResNo();
11099     // Ignore uses of the chain result.
11100     if (ResNo == NumVecs)
11101       continue;
11102     SDNode *User = *UI;
11103     DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
11104   }
11105 
11106   // Now the vldN-lane intrinsic is dead except for its chain result.
11107   // Update uses of the chain.
11108   std::vector<SDValue> VLDDupResults;
11109   for (unsigned n = 0; n < NumVecs; ++n)
11110     VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
11111   VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
11112   DCI.CombineTo(VLD, VLDDupResults);
11113 
11114   return true;
11115 }
11116 
11117 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
11118 /// ARMISD::VDUPLANE.
11119 static SDValue PerformVDUPLANECombine(SDNode *N,
11120                                       TargetLowering::DAGCombinerInfo &DCI) {
11121   SDValue Op = N->getOperand(0);
11122 
11123   // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
11124   // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
11125   if (CombineVLDDUP(N, DCI))
11126     return SDValue(N, 0);
11127 
11128   // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
11129   // redundant.  Ignore bit_converts for now; element sizes are checked below.
11130   while (Op.getOpcode() == ISD::BITCAST)
11131     Op = Op.getOperand(0);
11132   if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
11133     return SDValue();
11134 
11135   // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
11136   unsigned EltSize = Op.getScalarValueSizeInBits();
11137   // The canonical VMOV for a zero vector uses a 32-bit element size.
11138   unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
11139   unsigned EltBits;
11140   if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
11141     EltSize = 8;
11142   EVT VT = N->getValueType(0);
11143   if (EltSize > VT.getScalarSizeInBits())
11144     return SDValue();
11145 
11146   return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
11147 }
11148 
11149 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
11150 static SDValue PerformVDUPCombine(SDNode *N,
11151                                   TargetLowering::DAGCombinerInfo &DCI) {
11152   SelectionDAG &DAG = DCI.DAG;
11153   SDValue Op = N->getOperand(0);
11154 
11155   // Match VDUP(LOAD) -> VLD1DUP.
11156   // We match this pattern here rather than waiting for isel because the
11157   // transform is only legal for unindexed loads.
11158   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
11159   if (LD && Op.hasOneUse() && LD->isUnindexed() &&
11160       LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
11161     SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
11162                       DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) };
11163     SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
11164     SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys,
11165                                              Ops, LD->getMemoryVT(),
11166                                              LD->getMemOperand());
11167     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
11168     return VLDDup;
11169   }
11170 
11171   return SDValue();
11172 }
11173 
11174 static SDValue PerformLOADCombine(SDNode *N,
11175                                   TargetLowering::DAGCombinerInfo &DCI) {
11176   EVT VT = N->getValueType(0);
11177 
11178   // If this is a legal vector load, try to combine it into a VLD1_UPD.
11179   if (ISD::isNormalLoad(N) && VT.isVector() &&
11180       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
11181     return CombineBaseUpdate(N, DCI);
11182 
11183   return SDValue();
11184 }
11185 
11186 /// PerformSTORECombine - Target-specific dag combine xforms for
11187 /// ISD::STORE.
11188 static SDValue PerformSTORECombine(SDNode *N,
11189                                    TargetLowering::DAGCombinerInfo &DCI) {
11190   StoreSDNode *St = cast<StoreSDNode>(N);
11191   if (St->isVolatile())
11192     return SDValue();
11193 
11194   // Optimize trunc store (of multiple scalars) to shuffle and store.  First,
11195   // pack all of the elements in one place.  Next, store to memory in fewer
11196   // chunks.
11197   SDValue StVal = St->getValue();
11198   EVT VT = StVal.getValueType();
11199   if (St->isTruncatingStore() && VT.isVector()) {
11200     SelectionDAG &DAG = DCI.DAG;
11201     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11202     EVT StVT = St->getMemoryVT();
11203     unsigned NumElems = VT.getVectorNumElements();
11204     assert(StVT != VT && "Cannot truncate to the same type");
11205     unsigned FromEltSz = VT.getScalarSizeInBits();
11206     unsigned ToEltSz = StVT.getScalarSizeInBits();
11207 
11208     // From, To sizes and ElemCount must be pow of two
11209     if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
11210 
11211     // We are going to use the original vector elt for storing.
11212     // Accumulated smaller vector elements must be a multiple of the store size.
11213     if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue();
11214 
11215     unsigned SizeRatio  = FromEltSz / ToEltSz;
11216     assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
11217 
11218     // Create a type on which we perform the shuffle.
11219     EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
11220                                      NumElems*SizeRatio);
11221     assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
11222 
11223     SDLoc DL(St);
11224     SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
11225     SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
11226     for (unsigned i = 0; i < NumElems; ++i)
11227       ShuffleVec[i] = DAG.getDataLayout().isBigEndian()
11228                           ? (i + 1) * SizeRatio - 1
11229                           : i * SizeRatio;
11230 
11231     // Can't shuffle using an illegal type.
11232     if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
11233 
11234     SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec,
11235                                 DAG.getUNDEF(WideVec.getValueType()),
11236                                 ShuffleVec);
11237     // At this point all of the data is stored at the bottom of the
11238     // register. We now need to save it to mem.
11239 
11240     // Find the largest store unit
11241     MVT StoreType = MVT::i8;
11242     for (MVT Tp : MVT::integer_valuetypes()) {
11243       if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
11244         StoreType = Tp;
11245     }
11246     // Didn't find a legal store type.
11247     if (!TLI.isTypeLegal(StoreType))
11248       return SDValue();
11249 
11250     // Bitcast the original vector into a vector of store-size units
11251     EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
11252             StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
11253     assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
11254     SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
11255     SmallVector<SDValue, 8> Chains;
11256     SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
11257                                         TLI.getPointerTy(DAG.getDataLayout()));
11258     SDValue BasePtr = St->getBasePtr();
11259 
11260     // Perform one or more big stores into memory.
11261     unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits();
11262     for (unsigned I = 0; I < E; I++) {
11263       SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
11264                                    StoreType, ShuffWide,
11265                                    DAG.getIntPtrConstant(I, DL));
11266       SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
11267                                 St->getPointerInfo(), St->getAlignment(),
11268                                 St->getMemOperand()->getFlags());
11269       BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
11270                             Increment);
11271       Chains.push_back(Ch);
11272     }
11273     return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
11274   }
11275 
11276   if (!ISD::isNormalStore(St))
11277     return SDValue();
11278 
11279   // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
11280   // ARM stores of arguments in the same cache line.
11281   if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
11282       StVal.getNode()->hasOneUse()) {
11283     SelectionDAG  &DAG = DCI.DAG;
11284     bool isBigEndian = DAG.getDataLayout().isBigEndian();
11285     SDLoc DL(St);
11286     SDValue BasePtr = St->getBasePtr();
11287     SDValue NewST1 = DAG.getStore(
11288         St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
11289         BasePtr, St->getPointerInfo(), St->getAlignment(),
11290         St->getMemOperand()->getFlags());
11291 
11292     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
11293                                     DAG.getConstant(4, DL, MVT::i32));
11294     return DAG.getStore(NewST1.getValue(0), DL,
11295                         StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
11296                         OffsetPtr, St->getPointerInfo(),
11297                         std::min(4U, St->getAlignment() / 2),
11298                         St->getMemOperand()->getFlags());
11299   }
11300 
11301   if (StVal.getValueType() == MVT::i64 &&
11302       StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11303 
11304     // Bitcast an i64 store extracted from a vector to f64.
11305     // Otherwise, the i64 value will be legalized to a pair of i32 values.
11306     SelectionDAG &DAG = DCI.DAG;
11307     SDLoc dl(StVal);
11308     SDValue IntVec = StVal.getOperand(0);
11309     EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
11310                                    IntVec.getValueType().getVectorNumElements());
11311     SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
11312     SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
11313                                  Vec, StVal.getOperand(1));
11314     dl = SDLoc(N);
11315     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
11316     // Make the DAGCombiner fold the bitcasts.
11317     DCI.AddToWorklist(Vec.getNode());
11318     DCI.AddToWorklist(ExtElt.getNode());
11319     DCI.AddToWorklist(V.getNode());
11320     return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
11321                         St->getPointerInfo(), St->getAlignment(),
11322                         St->getMemOperand()->getFlags(), St->getAAInfo());
11323   }
11324 
11325   // If this is a legal vector store, try to combine it into a VST1_UPD.
11326   if (ISD::isNormalStore(N) && VT.isVector() &&
11327       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
11328     return CombineBaseUpdate(N, DCI);
11329 
11330   return SDValue();
11331 }
11332 
11333 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
11334 /// can replace combinations of VMUL and VCVT (floating-point to integer)
11335 /// when the VMUL has a constant operand that is a power of 2.
11336 ///
11337 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
11338 ///  vmul.f32        d16, d17, d16
11339 ///  vcvt.s32.f32    d16, d16
11340 /// becomes:
11341 ///  vcvt.s32.f32    d16, d16, #3
11342 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
11343                                   const ARMSubtarget *Subtarget) {
11344   if (!Subtarget->hasNEON())
11345     return SDValue();
11346 
11347   SDValue Op = N->getOperand(0);
11348   if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
11349       Op.getOpcode() != ISD::FMUL)
11350     return SDValue();
11351 
11352   SDValue ConstVec = Op->getOperand(1);
11353   if (!isa<BuildVectorSDNode>(ConstVec))
11354     return SDValue();
11355 
11356   MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
11357   uint32_t FloatBits = FloatTy.getSizeInBits();
11358   MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
11359   uint32_t IntBits = IntTy.getSizeInBits();
11360   unsigned NumLanes = Op.getValueType().getVectorNumElements();
11361   if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
11362     // These instructions only exist converting from f32 to i32. We can handle
11363     // smaller integers by generating an extra truncate, but larger ones would
11364     // be lossy. We also can't handle more then 4 lanes, since these intructions
11365     // only support v2i32/v4i32 types.
11366     return SDValue();
11367   }
11368 
11369   BitVector UndefElements;
11370   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
11371   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
11372   if (C == -1 || C == 0 || C > 32)
11373     return SDValue();
11374 
11375   SDLoc dl(N);
11376   bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
11377   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
11378     Intrinsic::arm_neon_vcvtfp2fxu;
11379   SDValue FixConv = DAG.getNode(
11380       ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
11381       DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
11382       DAG.getConstant(C, dl, MVT::i32));
11383 
11384   if (IntBits < FloatBits)
11385     FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
11386 
11387   return FixConv;
11388 }
11389 
11390 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
11391 /// can replace combinations of VCVT (integer to floating-point) and VDIV
11392 /// when the VDIV has a constant operand that is a power of 2.
11393 ///
11394 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
11395 ///  vcvt.f32.s32    d16, d16
11396 ///  vdiv.f32        d16, d17, d16
11397 /// becomes:
11398 ///  vcvt.f32.s32    d16, d16, #3
11399 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
11400                                   const ARMSubtarget *Subtarget) {
11401   if (!Subtarget->hasNEON())
11402     return SDValue();
11403 
11404   SDValue Op = N->getOperand(0);
11405   unsigned OpOpcode = Op.getNode()->getOpcode();
11406   if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
11407       (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
11408     return SDValue();
11409 
11410   SDValue ConstVec = N->getOperand(1);
11411   if (!isa<BuildVectorSDNode>(ConstVec))
11412     return SDValue();
11413 
11414   MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
11415   uint32_t FloatBits = FloatTy.getSizeInBits();
11416   MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
11417   uint32_t IntBits = IntTy.getSizeInBits();
11418   unsigned NumLanes = Op.getValueType().getVectorNumElements();
11419   if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
11420     // These instructions only exist converting from i32 to f32. We can handle
11421     // smaller integers by generating an extra extend, but larger ones would
11422     // be lossy. We also can't handle more then 4 lanes, since these intructions
11423     // only support v2i32/v4i32 types.
11424     return SDValue();
11425   }
11426 
11427   BitVector UndefElements;
11428   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
11429   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
11430   if (C == -1 || C == 0 || C > 32)
11431     return SDValue();
11432 
11433   SDLoc dl(N);
11434   bool isSigned = OpOpcode == ISD::SINT_TO_FP;
11435   SDValue ConvInput = Op.getOperand(0);
11436   if (IntBits < FloatBits)
11437     ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
11438                             dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
11439                             ConvInput);
11440 
11441   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
11442     Intrinsic::arm_neon_vcvtfxu2fp;
11443   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
11444                      Op.getValueType(),
11445                      DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
11446                      ConvInput, DAG.getConstant(C, dl, MVT::i32));
11447 }
11448 
11449 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
11450 /// operand of a vector shift operation, where all the elements of the
11451 /// build_vector must have the same constant integer value.
11452 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
11453   // Ignore bit_converts.
11454   while (Op.getOpcode() == ISD::BITCAST)
11455     Op = Op.getOperand(0);
11456   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
11457   APInt SplatBits, SplatUndef;
11458   unsigned SplatBitSize;
11459   bool HasAnyUndefs;
11460   if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
11461                                       HasAnyUndefs, ElementBits) ||
11462       SplatBitSize > ElementBits)
11463     return false;
11464   Cnt = SplatBits.getSExtValue();
11465   return true;
11466 }
11467 
11468 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
11469 /// operand of a vector shift left operation.  That value must be in the range:
11470 ///   0 <= Value < ElementBits for a left shift; or
11471 ///   0 <= Value <= ElementBits for a long left shift.
11472 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
11473   assert(VT.isVector() && "vector shift count is not a vector type");
11474   int64_t ElementBits = VT.getScalarSizeInBits();
11475   if (! getVShiftImm(Op, ElementBits, Cnt))
11476     return false;
11477   return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
11478 }
11479 
11480 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
11481 /// operand of a vector shift right operation.  For a shift opcode, the value
11482 /// is positive, but for an intrinsic the value count must be negative. The
11483 /// absolute value must be in the range:
11484 ///   1 <= |Value| <= ElementBits for a right shift; or
11485 ///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
11486 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
11487                          int64_t &Cnt) {
11488   assert(VT.isVector() && "vector shift count is not a vector type");
11489   int64_t ElementBits = VT.getScalarSizeInBits();
11490   if (! getVShiftImm(Op, ElementBits, Cnt))
11491     return false;
11492   if (!isIntrinsic)
11493     return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
11494   if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) {
11495     Cnt = -Cnt;
11496     return true;
11497   }
11498   return false;
11499 }
11500 
11501 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
11502 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
11503   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
11504   switch (IntNo) {
11505   default:
11506     // Don't do anything for most intrinsics.
11507     break;
11508 
11509   // Vector shifts: check for immediate versions and lower them.
11510   // Note: This is done during DAG combining instead of DAG legalizing because
11511   // the build_vectors for 64-bit vector element shift counts are generally
11512   // not legal, and it is hard to see their values after they get legalized to
11513   // loads from a constant pool.
11514   case Intrinsic::arm_neon_vshifts:
11515   case Intrinsic::arm_neon_vshiftu:
11516   case Intrinsic::arm_neon_vrshifts:
11517   case Intrinsic::arm_neon_vrshiftu:
11518   case Intrinsic::arm_neon_vrshiftn:
11519   case Intrinsic::arm_neon_vqshifts:
11520   case Intrinsic::arm_neon_vqshiftu:
11521   case Intrinsic::arm_neon_vqshiftsu:
11522   case Intrinsic::arm_neon_vqshiftns:
11523   case Intrinsic::arm_neon_vqshiftnu:
11524   case Intrinsic::arm_neon_vqshiftnsu:
11525   case Intrinsic::arm_neon_vqrshiftns:
11526   case Intrinsic::arm_neon_vqrshiftnu:
11527   case Intrinsic::arm_neon_vqrshiftnsu: {
11528     EVT VT = N->getOperand(1).getValueType();
11529     int64_t Cnt;
11530     unsigned VShiftOpc = 0;
11531 
11532     switch (IntNo) {
11533     case Intrinsic::arm_neon_vshifts:
11534     case Intrinsic::arm_neon_vshiftu:
11535       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
11536         VShiftOpc = ARMISD::VSHL;
11537         break;
11538       }
11539       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
11540         VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
11541                      ARMISD::VSHRs : ARMISD::VSHRu);
11542         break;
11543       }
11544       return SDValue();
11545 
11546     case Intrinsic::arm_neon_vrshifts:
11547     case Intrinsic::arm_neon_vrshiftu:
11548       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
11549         break;
11550       return SDValue();
11551 
11552     case Intrinsic::arm_neon_vqshifts:
11553     case Intrinsic::arm_neon_vqshiftu:
11554       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
11555         break;
11556       return SDValue();
11557 
11558     case Intrinsic::arm_neon_vqshiftsu:
11559       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
11560         break;
11561       llvm_unreachable("invalid shift count for vqshlu intrinsic");
11562 
11563     case Intrinsic::arm_neon_vrshiftn:
11564     case Intrinsic::arm_neon_vqshiftns:
11565     case Intrinsic::arm_neon_vqshiftnu:
11566     case Intrinsic::arm_neon_vqshiftnsu:
11567     case Intrinsic::arm_neon_vqrshiftns:
11568     case Intrinsic::arm_neon_vqrshiftnu:
11569     case Intrinsic::arm_neon_vqrshiftnsu:
11570       // Narrowing shifts require an immediate right shift.
11571       if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
11572         break;
11573       llvm_unreachable("invalid shift count for narrowing vector shift "
11574                        "intrinsic");
11575 
11576     default:
11577       llvm_unreachable("unhandled vector shift");
11578     }
11579 
11580     switch (IntNo) {
11581     case Intrinsic::arm_neon_vshifts:
11582     case Intrinsic::arm_neon_vshiftu:
11583       // Opcode already set above.
11584       break;
11585     case Intrinsic::arm_neon_vrshifts:
11586       VShiftOpc = ARMISD::VRSHRs; break;
11587     case Intrinsic::arm_neon_vrshiftu:
11588       VShiftOpc = ARMISD::VRSHRu; break;
11589     case Intrinsic::arm_neon_vrshiftn:
11590       VShiftOpc = ARMISD::VRSHRN; break;
11591     case Intrinsic::arm_neon_vqshifts:
11592       VShiftOpc = ARMISD::VQSHLs; break;
11593     case Intrinsic::arm_neon_vqshiftu:
11594       VShiftOpc = ARMISD::VQSHLu; break;
11595     case Intrinsic::arm_neon_vqshiftsu:
11596       VShiftOpc = ARMISD::VQSHLsu; break;
11597     case Intrinsic::arm_neon_vqshiftns:
11598       VShiftOpc = ARMISD::VQSHRNs; break;
11599     case Intrinsic::arm_neon_vqshiftnu:
11600       VShiftOpc = ARMISD::VQSHRNu; break;
11601     case Intrinsic::arm_neon_vqshiftnsu:
11602       VShiftOpc = ARMISD::VQSHRNsu; break;
11603     case Intrinsic::arm_neon_vqrshiftns:
11604       VShiftOpc = ARMISD::VQRSHRNs; break;
11605     case Intrinsic::arm_neon_vqrshiftnu:
11606       VShiftOpc = ARMISD::VQRSHRNu; break;
11607     case Intrinsic::arm_neon_vqrshiftnsu:
11608       VShiftOpc = ARMISD::VQRSHRNsu; break;
11609     }
11610 
11611     SDLoc dl(N);
11612     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
11613                        N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
11614   }
11615 
11616   case Intrinsic::arm_neon_vshiftins: {
11617     EVT VT = N->getOperand(1).getValueType();
11618     int64_t Cnt;
11619     unsigned VShiftOpc = 0;
11620 
11621     if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
11622       VShiftOpc = ARMISD::VSLI;
11623     else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
11624       VShiftOpc = ARMISD::VSRI;
11625     else {
11626       llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
11627     }
11628 
11629     SDLoc dl(N);
11630     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
11631                        N->getOperand(1), N->getOperand(2),
11632                        DAG.getConstant(Cnt, dl, MVT::i32));
11633   }
11634 
11635   case Intrinsic::arm_neon_vqrshifts:
11636   case Intrinsic::arm_neon_vqrshiftu:
11637     // No immediate versions of these to check for.
11638     break;
11639   }
11640 
11641   return SDValue();
11642 }
11643 
11644 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
11645 /// lowers them.  As with the vector shift intrinsics, this is done during DAG
11646 /// combining instead of DAG legalizing because the build_vectors for 64-bit
11647 /// vector element shift counts are generally not legal, and it is hard to see
11648 /// their values after they get legalized to loads from a constant pool.
11649 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
11650                                    const ARMSubtarget *ST) {
11651   EVT VT = N->getValueType(0);
11652   if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
11653     // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
11654     // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
11655     SDValue N1 = N->getOperand(1);
11656     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
11657       SDValue N0 = N->getOperand(0);
11658       if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
11659           DAG.MaskedValueIsZero(N0.getOperand(0),
11660                                 APInt::getHighBitsSet(32, 16)))
11661         return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
11662     }
11663   }
11664 
11665   // Nothing to be done for scalar shifts.
11666   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11667   if (!VT.isVector() || !TLI.isTypeLegal(VT))
11668     return SDValue();
11669 
11670   assert(ST->hasNEON() && "unexpected vector shift");
11671   int64_t Cnt;
11672 
11673   switch (N->getOpcode()) {
11674   default: llvm_unreachable("unexpected shift opcode");
11675 
11676   case ISD::SHL:
11677     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
11678       SDLoc dl(N);
11679       return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0),
11680                          DAG.getConstant(Cnt, dl, MVT::i32));
11681     }
11682     break;
11683 
11684   case ISD::SRA:
11685   case ISD::SRL:
11686     if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
11687       unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
11688                             ARMISD::VSHRs : ARMISD::VSHRu);
11689       SDLoc dl(N);
11690       return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
11691                          DAG.getConstant(Cnt, dl, MVT::i32));
11692     }
11693   }
11694   return SDValue();
11695 }
11696 
11697 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
11698 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
11699 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
11700                                     const ARMSubtarget *ST) {
11701   SDValue N0 = N->getOperand(0);
11702 
11703   // Check for sign- and zero-extensions of vector extract operations of 8-
11704   // and 16-bit vector elements.  NEON supports these directly.  They are
11705   // handled during DAG combining because type legalization will promote them
11706   // to 32-bit types and it is messy to recognize the operations after that.
11707   if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11708     SDValue Vec = N0.getOperand(0);
11709     SDValue Lane = N0.getOperand(1);
11710     EVT VT = N->getValueType(0);
11711     EVT EltVT = N0.getValueType();
11712     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11713 
11714     if (VT == MVT::i32 &&
11715         (EltVT == MVT::i8 || EltVT == MVT::i16) &&
11716         TLI.isTypeLegal(Vec.getValueType()) &&
11717         isa<ConstantSDNode>(Lane)) {
11718 
11719       unsigned Opc = 0;
11720       switch (N->getOpcode()) {
11721       default: llvm_unreachable("unexpected opcode");
11722       case ISD::SIGN_EXTEND:
11723         Opc = ARMISD::VGETLANEs;
11724         break;
11725       case ISD::ZERO_EXTEND:
11726       case ISD::ANY_EXTEND:
11727         Opc = ARMISD::VGETLANEu;
11728         break;
11729       }
11730       return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
11731     }
11732   }
11733 
11734   return SDValue();
11735 }
11736 
11737 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
11738   // If we have a CMOV, OR and AND combination such as:
11739   //   if (x & CN)
11740   //     y |= CM;
11741   //
11742   // And:
11743   //   * CN is a single bit;
11744   //   * All bits covered by CM are known zero in y
11745   //
11746   // Then we can convert this into a sequence of BFI instructions. This will
11747   // always be a win if CM is a single bit, will always be no worse than the
11748   // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
11749   // three bits (due to the extra IT instruction).
11750 
11751   SDValue Op0 = CMOV->getOperand(0);
11752   SDValue Op1 = CMOV->getOperand(1);
11753   auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
11754   auto CC = CCNode->getAPIntValue().getLimitedValue();
11755   SDValue CmpZ = CMOV->getOperand(4);
11756 
11757   // The compare must be against zero.
11758   if (!isNullConstant(CmpZ->getOperand(1)))
11759     return SDValue();
11760 
11761   assert(CmpZ->getOpcode() == ARMISD::CMPZ);
11762   SDValue And = CmpZ->getOperand(0);
11763   if (And->getOpcode() != ISD::AND)
11764     return SDValue();
11765   ConstantSDNode *AndC = dyn_cast<ConstantSDNode>(And->getOperand(1));
11766   if (!AndC || !AndC->getAPIntValue().isPowerOf2())
11767     return SDValue();
11768   SDValue X = And->getOperand(0);
11769 
11770   if (CC == ARMCC::EQ) {
11771     // We're performing an "equal to zero" compare. Swap the operands so we
11772     // canonicalize on a "not equal to zero" compare.
11773     std::swap(Op0, Op1);
11774   } else {
11775     assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
11776   }
11777 
11778   if (Op1->getOpcode() != ISD::OR)
11779     return SDValue();
11780 
11781   ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
11782   if (!OrC)
11783     return SDValue();
11784   SDValue Y = Op1->getOperand(0);
11785 
11786   if (Op0 != Y)
11787     return SDValue();
11788 
11789   // Now, is it profitable to continue?
11790   APInt OrCI = OrC->getAPIntValue();
11791   unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
11792   if (OrCI.countPopulation() > Heuristic)
11793     return SDValue();
11794 
11795   // Lastly, can we determine that the bits defined by OrCI
11796   // are zero in Y?
11797   KnownBits Known;
11798   DAG.computeKnownBits(Y, Known);
11799   if ((OrCI & Known.Zero) != OrCI)
11800     return SDValue();
11801 
11802   // OK, we can do the combine.
11803   SDValue V = Y;
11804   SDLoc dl(X);
11805   EVT VT = X.getValueType();
11806   unsigned BitInX = AndC->getAPIntValue().logBase2();
11807 
11808   if (BitInX != 0) {
11809     // We must shift X first.
11810     X = DAG.getNode(ISD::SRL, dl, VT, X,
11811                     DAG.getConstant(BitInX, dl, VT));
11812   }
11813 
11814   for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
11815        BitInY < NumActiveBits; ++BitInY) {
11816     if (OrCI[BitInY] == 0)
11817       continue;
11818     APInt Mask(VT.getSizeInBits(), 0);
11819     Mask.setBit(BitInY);
11820     V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
11821                     // Confusingly, the operand is an *inverted* mask.
11822                     DAG.getConstant(~Mask, dl, VT));
11823   }
11824 
11825   return V;
11826 }
11827 
11828 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
11829 SDValue
11830 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
11831   SDValue Cmp = N->getOperand(4);
11832   if (Cmp.getOpcode() != ARMISD::CMPZ)
11833     // Only looking at NE cases.
11834     return SDValue();
11835 
11836   EVT VT = N->getValueType(0);
11837   SDLoc dl(N);
11838   SDValue LHS = Cmp.getOperand(0);
11839   SDValue RHS = Cmp.getOperand(1);
11840   SDValue Chain = N->getOperand(0);
11841   SDValue BB = N->getOperand(1);
11842   SDValue ARMcc = N->getOperand(2);
11843   ARMCC::CondCodes CC =
11844     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
11845 
11846   // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
11847   // -> (brcond Chain BB CC CPSR Cmp)
11848   if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
11849       LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
11850       LHS->getOperand(0)->hasOneUse()) {
11851     auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
11852     auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
11853     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
11854     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
11855     if ((LHS00C && LHS00C->getZExtValue() == 0) &&
11856         (LHS01C && LHS01C->getZExtValue() == 1) &&
11857         (LHS1C && LHS1C->getZExtValue() == 1) &&
11858         (RHSC && RHSC->getZExtValue() == 0)) {
11859       return DAG.getNode(
11860           ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
11861           LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
11862     }
11863   }
11864 
11865   return SDValue();
11866 }
11867 
11868 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
11869 SDValue
11870 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
11871   SDValue Cmp = N->getOperand(4);
11872   if (Cmp.getOpcode() != ARMISD::CMPZ)
11873     // Only looking at EQ and NE cases.
11874     return SDValue();
11875 
11876   EVT VT = N->getValueType(0);
11877   SDLoc dl(N);
11878   SDValue LHS = Cmp.getOperand(0);
11879   SDValue RHS = Cmp.getOperand(1);
11880   SDValue FalseVal = N->getOperand(0);
11881   SDValue TrueVal = N->getOperand(1);
11882   SDValue ARMcc = N->getOperand(2);
11883   ARMCC::CondCodes CC =
11884     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
11885 
11886   // BFI is only available on V6T2+.
11887   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
11888     SDValue R = PerformCMOVToBFICombine(N, DAG);
11889     if (R)
11890       return R;
11891   }
11892 
11893   // Simplify
11894   //   mov     r1, r0
11895   //   cmp     r1, x
11896   //   mov     r0, y
11897   //   moveq   r0, x
11898   // to
11899   //   cmp     r0, x
11900   //   movne   r0, y
11901   //
11902   //   mov     r1, r0
11903   //   cmp     r1, x
11904   //   mov     r0, x
11905   //   movne   r0, y
11906   // to
11907   //   cmp     r0, x
11908   //   movne   r0, y
11909   /// FIXME: Turn this into a target neutral optimization?
11910   SDValue Res;
11911   if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
11912     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
11913                       N->getOperand(3), Cmp);
11914   } else if (CC == ARMCC::EQ && TrueVal == RHS) {
11915     SDValue ARMcc;
11916     SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
11917     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
11918                       N->getOperand(3), NewCmp);
11919   }
11920 
11921   // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
11922   // -> (cmov F T CC CPSR Cmp)
11923   if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
11924     auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
11925     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
11926     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
11927     if ((LHS0C && LHS0C->getZExtValue() == 0) &&
11928         (LHS1C && LHS1C->getZExtValue() == 1) &&
11929         (RHSC && RHSC->getZExtValue() == 0)) {
11930       return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
11931                          LHS->getOperand(2), LHS->getOperand(3),
11932                          LHS->getOperand(4));
11933     }
11934   }
11935 
11936   if (Res.getNode()) {
11937     KnownBits Known;
11938     DAG.computeKnownBits(SDValue(N,0), Known);
11939     // Capture demanded bits information that would be otherwise lost.
11940     if (Known.Zero == 0xfffffffe)
11941       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11942                         DAG.getValueType(MVT::i1));
11943     else if (Known.Zero == 0xffffff00)
11944       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11945                         DAG.getValueType(MVT::i8));
11946     else if (Known.Zero == 0xffff0000)
11947       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11948                         DAG.getValueType(MVT::i16));
11949   }
11950 
11951   return Res;
11952 }
11953 
11954 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
11955                                              DAGCombinerInfo &DCI) const {
11956   switch (N->getOpcode()) {
11957   default: break;
11958   case ARMISD::ADDE:    return PerformADDECombine(N, DCI, Subtarget);
11959   case ARMISD::UMLAL:   return PerformUMLALCombine(N, DCI.DAG, Subtarget);
11960   case ISD::ADD:        return PerformADDCombine(N, DCI, Subtarget);
11961   case ISD::SUB:        return PerformSUBCombine(N, DCI);
11962   case ISD::MUL:        return PerformMULCombine(N, DCI, Subtarget);
11963   case ISD::OR:         return PerformORCombine(N, DCI, Subtarget);
11964   case ISD::XOR:        return PerformXORCombine(N, DCI, Subtarget);
11965   case ISD::AND:        return PerformANDCombine(N, DCI, Subtarget);
11966   case ARMISD::ADDC:
11967   case ARMISD::SUBC:    return PerformAddcSubcCombine(N, DCI.DAG, Subtarget);
11968   case ARMISD::SUBE:    return PerformAddeSubeCombine(N, DCI.DAG, Subtarget);
11969   case ARMISD::BFI:     return PerformBFICombine(N, DCI);
11970   case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
11971   case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
11972   case ISD::STORE:      return PerformSTORECombine(N, DCI);
11973   case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
11974   case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
11975   case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
11976   case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
11977   case ARMISD::VDUP: return PerformVDUPCombine(N, DCI);
11978   case ISD::FP_TO_SINT:
11979   case ISD::FP_TO_UINT:
11980     return PerformVCVTCombine(N, DCI.DAG, Subtarget);
11981   case ISD::FDIV:
11982     return PerformVDIVCombine(N, DCI.DAG, Subtarget);
11983   case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
11984   case ISD::SHL:
11985   case ISD::SRA:
11986   case ISD::SRL:        return PerformShiftCombine(N, DCI.DAG, Subtarget);
11987   case ISD::SIGN_EXTEND:
11988   case ISD::ZERO_EXTEND:
11989   case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
11990   case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
11991   case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
11992   case ISD::LOAD:       return PerformLOADCombine(N, DCI);
11993   case ARMISD::VLD1DUP:
11994   case ARMISD::VLD2DUP:
11995   case ARMISD::VLD3DUP:
11996   case ARMISD::VLD4DUP:
11997     return PerformVLDCombine(N, DCI);
11998   case ARMISD::BUILD_VECTOR:
11999     return PerformARMBUILD_VECTORCombine(N, DCI);
12000   case ARMISD::SMULWB: {
12001     unsigned BitWidth = N->getValueType(0).getSizeInBits();
12002     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
12003     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
12004       return SDValue();
12005     break;
12006   }
12007   case ARMISD::SMULWT: {
12008     unsigned BitWidth = N->getValueType(0).getSizeInBits();
12009     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
12010     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
12011       return SDValue();
12012     break;
12013   }
12014   case ARMISD::SMLALBB: {
12015     unsigned BitWidth = N->getValueType(0).getSizeInBits();
12016     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
12017     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
12018         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
12019       return SDValue();
12020     break;
12021   }
12022   case ARMISD::SMLALBT: {
12023     unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits();
12024     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
12025     unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits();
12026     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
12027     if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) ||
12028         (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI)))
12029       return SDValue();
12030     break;
12031   }
12032   case ARMISD::SMLALTB: {
12033     unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits();
12034     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
12035     unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits();
12036     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
12037     if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) ||
12038         (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI)))
12039       return SDValue();
12040     break;
12041   }
12042   case ARMISD::SMLALTT: {
12043     unsigned BitWidth = N->getValueType(0).getSizeInBits();
12044     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
12045     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
12046         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
12047       return SDValue();
12048     break;
12049   }
12050   case ISD::INTRINSIC_VOID:
12051   case ISD::INTRINSIC_W_CHAIN:
12052     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12053     case Intrinsic::arm_neon_vld1:
12054     case Intrinsic::arm_neon_vld2:
12055     case Intrinsic::arm_neon_vld3:
12056     case Intrinsic::arm_neon_vld4:
12057     case Intrinsic::arm_neon_vld2lane:
12058     case Intrinsic::arm_neon_vld3lane:
12059     case Intrinsic::arm_neon_vld4lane:
12060     case Intrinsic::arm_neon_vst1:
12061     case Intrinsic::arm_neon_vst2:
12062     case Intrinsic::arm_neon_vst3:
12063     case Intrinsic::arm_neon_vst4:
12064     case Intrinsic::arm_neon_vst2lane:
12065     case Intrinsic::arm_neon_vst3lane:
12066     case Intrinsic::arm_neon_vst4lane:
12067       return PerformVLDCombine(N, DCI);
12068     default: break;
12069     }
12070     break;
12071   }
12072   return SDValue();
12073 }
12074 
12075 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
12076                                                           EVT VT) const {
12077   return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
12078 }
12079 
12080 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
12081                                                        unsigned,
12082                                                        unsigned,
12083                                                        bool *Fast) const {
12084   // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
12085   bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
12086 
12087   switch (VT.getSimpleVT().SimpleTy) {
12088   default:
12089     return false;
12090   case MVT::i8:
12091   case MVT::i16:
12092   case MVT::i32: {
12093     // Unaligned access can use (for example) LRDB, LRDH, LDR
12094     if (AllowsUnaligned) {
12095       if (Fast)
12096         *Fast = Subtarget->hasV7Ops();
12097       return true;
12098     }
12099     return false;
12100   }
12101   case MVT::f64:
12102   case MVT::v2f64: {
12103     // For any little-endian targets with neon, we can support unaligned ld/st
12104     // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
12105     // A big-endian target may also explicitly support unaligned accesses
12106     if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
12107       if (Fast)
12108         *Fast = true;
12109       return true;
12110     }
12111     return false;
12112   }
12113   }
12114 }
12115 
12116 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign,
12117                        unsigned AlignCheck) {
12118   return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
12119           (DstAlign == 0 || DstAlign % AlignCheck == 0));
12120 }
12121 
12122 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
12123                                            unsigned DstAlign, unsigned SrcAlign,
12124                                            bool IsMemset, bool ZeroMemset,
12125                                            bool MemcpyStrSrc,
12126                                            MachineFunction &MF) const {
12127   const Function *F = MF.getFunction();
12128 
12129   // See if we can use NEON instructions for this...
12130   if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() &&
12131       !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
12132     bool Fast;
12133     if (Size >= 16 &&
12134         (memOpAlign(SrcAlign, DstAlign, 16) ||
12135          (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) {
12136       return MVT::v2f64;
12137     } else if (Size >= 8 &&
12138                (memOpAlign(SrcAlign, DstAlign, 8) ||
12139                 (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) &&
12140                  Fast))) {
12141       return MVT::f64;
12142     }
12143   }
12144 
12145   // Let the target-independent logic figure it out.
12146   return MVT::Other;
12147 }
12148 
12149 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
12150   if (Val.getOpcode() != ISD::LOAD)
12151     return false;
12152 
12153   EVT VT1 = Val.getValueType();
12154   if (!VT1.isSimple() || !VT1.isInteger() ||
12155       !VT2.isSimple() || !VT2.isInteger())
12156     return false;
12157 
12158   switch (VT1.getSimpleVT().SimpleTy) {
12159   default: break;
12160   case MVT::i1:
12161   case MVT::i8:
12162   case MVT::i16:
12163     // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
12164     return true;
12165   }
12166 
12167   return false;
12168 }
12169 
12170 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
12171   EVT VT = ExtVal.getValueType();
12172 
12173   if (!isTypeLegal(VT))
12174     return false;
12175 
12176   // Don't create a loadext if we can fold the extension into a wide/long
12177   // instruction.
12178   // If there's more than one user instruction, the loadext is desirable no
12179   // matter what.  There can be two uses by the same instruction.
12180   if (ExtVal->use_empty() ||
12181       !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
12182     return true;
12183 
12184   SDNode *U = *ExtVal->use_begin();
12185   if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
12186        U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL))
12187     return false;
12188 
12189   return true;
12190 }
12191 
12192 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
12193   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12194     return false;
12195 
12196   if (!isTypeLegal(EVT::getEVT(Ty1)))
12197     return false;
12198 
12199   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
12200 
12201   // Assuming the caller doesn't have a zeroext or signext return parameter,
12202   // truncation all the way down to i1 is valid.
12203   return true;
12204 }
12205 
12206 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
12207                                                 const AddrMode &AM, Type *Ty,
12208                                                 unsigned AS) const {
12209   if (isLegalAddressingMode(DL, AM, Ty, AS)) {
12210     if (Subtarget->hasFPAO())
12211       return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
12212     return 0;
12213   }
12214   return -1;
12215 }
12216 
12217 
12218 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
12219   if (V < 0)
12220     return false;
12221 
12222   unsigned Scale = 1;
12223   switch (VT.getSimpleVT().SimpleTy) {
12224   default: return false;
12225   case MVT::i1:
12226   case MVT::i8:
12227     // Scale == 1;
12228     break;
12229   case MVT::i16:
12230     // Scale == 2;
12231     Scale = 2;
12232     break;
12233   case MVT::i32:
12234     // Scale == 4;
12235     Scale = 4;
12236     break;
12237   }
12238 
12239   if ((V & (Scale - 1)) != 0)
12240     return false;
12241   V /= Scale;
12242   return V == (V & ((1LL << 5) - 1));
12243 }
12244 
12245 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
12246                                       const ARMSubtarget *Subtarget) {
12247   bool isNeg = false;
12248   if (V < 0) {
12249     isNeg = true;
12250     V = - V;
12251   }
12252 
12253   switch (VT.getSimpleVT().SimpleTy) {
12254   default: return false;
12255   case MVT::i1:
12256   case MVT::i8:
12257   case MVT::i16:
12258   case MVT::i32:
12259     // + imm12 or - imm8
12260     if (isNeg)
12261       return V == (V & ((1LL << 8) - 1));
12262     return V == (V & ((1LL << 12) - 1));
12263   case MVT::f32:
12264   case MVT::f64:
12265     // Same as ARM mode. FIXME: NEON?
12266     if (!Subtarget->hasVFP2())
12267       return false;
12268     if ((V & 3) != 0)
12269       return false;
12270     V >>= 2;
12271     return V == (V & ((1LL << 8) - 1));
12272   }
12273 }
12274 
12275 /// isLegalAddressImmediate - Return true if the integer value can be used
12276 /// as the offset of the target addressing mode for load / store of the
12277 /// given type.
12278 static bool isLegalAddressImmediate(int64_t V, EVT VT,
12279                                     const ARMSubtarget *Subtarget) {
12280   if (V == 0)
12281     return true;
12282 
12283   if (!VT.isSimple())
12284     return false;
12285 
12286   if (Subtarget->isThumb1Only())
12287     return isLegalT1AddressImmediate(V, VT);
12288   else if (Subtarget->isThumb2())
12289     return isLegalT2AddressImmediate(V, VT, Subtarget);
12290 
12291   // ARM mode.
12292   if (V < 0)
12293     V = - V;
12294   switch (VT.getSimpleVT().SimpleTy) {
12295   default: return false;
12296   case MVT::i1:
12297   case MVT::i8:
12298   case MVT::i32:
12299     // +- imm12
12300     return V == (V & ((1LL << 12) - 1));
12301   case MVT::i16:
12302     // +- imm8
12303     return V == (V & ((1LL << 8) - 1));
12304   case MVT::f32:
12305   case MVT::f64:
12306     if (!Subtarget->hasVFP2()) // FIXME: NEON?
12307       return false;
12308     if ((V & 3) != 0)
12309       return false;
12310     V >>= 2;
12311     return V == (V & ((1LL << 8) - 1));
12312   }
12313 }
12314 
12315 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
12316                                                       EVT VT) const {
12317   int Scale = AM.Scale;
12318   if (Scale < 0)
12319     return false;
12320 
12321   switch (VT.getSimpleVT().SimpleTy) {
12322   default: return false;
12323   case MVT::i1:
12324   case MVT::i8:
12325   case MVT::i16:
12326   case MVT::i32:
12327     if (Scale == 1)
12328       return true;
12329     // r + r << imm
12330     Scale = Scale & ~1;
12331     return Scale == 2 || Scale == 4 || Scale == 8;
12332   case MVT::i64:
12333     // r + r
12334     if (((unsigned)AM.HasBaseReg + Scale) <= 2)
12335       return true;
12336     return false;
12337   case MVT::isVoid:
12338     // Note, we allow "void" uses (basically, uses that aren't loads or
12339     // stores), because arm allows folding a scale into many arithmetic
12340     // operations.  This should be made more precise and revisited later.
12341 
12342     // Allow r << imm, but the imm has to be a multiple of two.
12343     if (Scale & 1) return false;
12344     return isPowerOf2_32(Scale);
12345   }
12346 }
12347 
12348 /// isLegalAddressingMode - Return true if the addressing mode represented
12349 /// by AM is legal for this target, for a load/store of the specified type.
12350 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
12351                                               const AddrMode &AM, Type *Ty,
12352                                               unsigned AS) const {
12353   EVT VT = getValueType(DL, Ty, true);
12354   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
12355     return false;
12356 
12357   // Can never fold addr of global into load/store.
12358   if (AM.BaseGV)
12359     return false;
12360 
12361   switch (AM.Scale) {
12362   case 0:  // no scale reg, must be "r+i" or "r", or "i".
12363     break;
12364   case 1:
12365     if (Subtarget->isThumb1Only())
12366       return false;
12367     LLVM_FALLTHROUGH;
12368   default:
12369     // ARM doesn't support any R+R*scale+imm addr modes.
12370     if (AM.BaseOffs)
12371       return false;
12372 
12373     if (!VT.isSimple())
12374       return false;
12375 
12376     if (Subtarget->isThumb2())
12377       return isLegalT2ScaledAddressingMode(AM, VT);
12378 
12379     int Scale = AM.Scale;
12380     switch (VT.getSimpleVT().SimpleTy) {
12381     default: return false;
12382     case MVT::i1:
12383     case MVT::i8:
12384     case MVT::i32:
12385       if (Scale < 0) Scale = -Scale;
12386       if (Scale == 1)
12387         return true;
12388       // r + r << imm
12389       return isPowerOf2_32(Scale & ~1);
12390     case MVT::i16:
12391     case MVT::i64:
12392       // r + r
12393       if (((unsigned)AM.HasBaseReg + Scale) <= 2)
12394         return true;
12395       return false;
12396 
12397     case MVT::isVoid:
12398       // Note, we allow "void" uses (basically, uses that aren't loads or
12399       // stores), because arm allows folding a scale into many arithmetic
12400       // operations.  This should be made more precise and revisited later.
12401 
12402       // Allow r << imm, but the imm has to be a multiple of two.
12403       if (Scale & 1) return false;
12404       return isPowerOf2_32(Scale);
12405     }
12406   }
12407   return true;
12408 }
12409 
12410 /// isLegalICmpImmediate - Return true if the specified immediate is legal
12411 /// icmp immediate, that is the target has icmp instructions which can compare
12412 /// a register against the immediate without having to materialize the
12413 /// immediate into a register.
12414 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
12415   // Thumb2 and ARM modes can use cmn for negative immediates.
12416   if (!Subtarget->isThumb())
12417     return ARM_AM::getSOImmVal(std::abs(Imm)) != -1;
12418   if (Subtarget->isThumb2())
12419     return ARM_AM::getT2SOImmVal(std::abs(Imm)) != -1;
12420   // Thumb1 doesn't have cmn, and only 8-bit immediates.
12421   return Imm >= 0 && Imm <= 255;
12422 }
12423 
12424 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
12425 /// *or sub* immediate, that is the target has add or sub instructions which can
12426 /// add a register with the immediate without having to materialize the
12427 /// immediate into a register.
12428 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
12429   // Same encoding for add/sub, just flip the sign.
12430   int64_t AbsImm = std::abs(Imm);
12431   if (!Subtarget->isThumb())
12432     return ARM_AM::getSOImmVal(AbsImm) != -1;
12433   if (Subtarget->isThumb2())
12434     return ARM_AM::getT2SOImmVal(AbsImm) != -1;
12435   // Thumb1 only has 8-bit unsigned immediate.
12436   return AbsImm >= 0 && AbsImm <= 255;
12437 }
12438 
12439 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
12440                                       bool isSEXTLoad, SDValue &Base,
12441                                       SDValue &Offset, bool &isInc,
12442                                       SelectionDAG &DAG) {
12443   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
12444     return false;
12445 
12446   if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
12447     // AddressingMode 3
12448     Base = Ptr->getOperand(0);
12449     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12450       int RHSC = (int)RHS->getZExtValue();
12451       if (RHSC < 0 && RHSC > -256) {
12452         assert(Ptr->getOpcode() == ISD::ADD);
12453         isInc = false;
12454         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12455         return true;
12456       }
12457     }
12458     isInc = (Ptr->getOpcode() == ISD::ADD);
12459     Offset = Ptr->getOperand(1);
12460     return true;
12461   } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
12462     // AddressingMode 2
12463     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12464       int RHSC = (int)RHS->getZExtValue();
12465       if (RHSC < 0 && RHSC > -0x1000) {
12466         assert(Ptr->getOpcode() == ISD::ADD);
12467         isInc = false;
12468         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12469         Base = Ptr->getOperand(0);
12470         return true;
12471       }
12472     }
12473 
12474     if (Ptr->getOpcode() == ISD::ADD) {
12475       isInc = true;
12476       ARM_AM::ShiftOpc ShOpcVal=
12477         ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
12478       if (ShOpcVal != ARM_AM::no_shift) {
12479         Base = Ptr->getOperand(1);
12480         Offset = Ptr->getOperand(0);
12481       } else {
12482         Base = Ptr->getOperand(0);
12483         Offset = Ptr->getOperand(1);
12484       }
12485       return true;
12486     }
12487 
12488     isInc = (Ptr->getOpcode() == ISD::ADD);
12489     Base = Ptr->getOperand(0);
12490     Offset = Ptr->getOperand(1);
12491     return true;
12492   }
12493 
12494   // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
12495   return false;
12496 }
12497 
12498 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
12499                                      bool isSEXTLoad, SDValue &Base,
12500                                      SDValue &Offset, bool &isInc,
12501                                      SelectionDAG &DAG) {
12502   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
12503     return false;
12504 
12505   Base = Ptr->getOperand(0);
12506   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12507     int RHSC = (int)RHS->getZExtValue();
12508     if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
12509       assert(Ptr->getOpcode() == ISD::ADD);
12510       isInc = false;
12511       Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12512       return true;
12513     } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
12514       isInc = Ptr->getOpcode() == ISD::ADD;
12515       Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
12516       return true;
12517     }
12518   }
12519 
12520   return false;
12521 }
12522 
12523 /// getPreIndexedAddressParts - returns true by value, base pointer and
12524 /// offset pointer and addressing mode by reference if the node's address
12525 /// can be legally represented as pre-indexed load / store address.
12526 bool
12527 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
12528                                              SDValue &Offset,
12529                                              ISD::MemIndexedMode &AM,
12530                                              SelectionDAG &DAG) const {
12531   if (Subtarget->isThumb1Only())
12532     return false;
12533 
12534   EVT VT;
12535   SDValue Ptr;
12536   bool isSEXTLoad = false;
12537   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12538     Ptr = LD->getBasePtr();
12539     VT  = LD->getMemoryVT();
12540     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
12541   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12542     Ptr = ST->getBasePtr();
12543     VT  = ST->getMemoryVT();
12544   } else
12545     return false;
12546 
12547   bool isInc;
12548   bool isLegal = false;
12549   if (Subtarget->isThumb2())
12550     isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
12551                                        Offset, isInc, DAG);
12552   else
12553     isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
12554                                         Offset, isInc, DAG);
12555   if (!isLegal)
12556     return false;
12557 
12558   AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
12559   return true;
12560 }
12561 
12562 /// getPostIndexedAddressParts - returns true by value, base pointer and
12563 /// offset pointer and addressing mode by reference if this node can be
12564 /// combined with a load / store to form a post-indexed load / store.
12565 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
12566                                                    SDValue &Base,
12567                                                    SDValue &Offset,
12568                                                    ISD::MemIndexedMode &AM,
12569                                                    SelectionDAG &DAG) const {
12570   EVT VT;
12571   SDValue Ptr;
12572   bool isSEXTLoad = false, isNonExt;
12573   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12574     VT  = LD->getMemoryVT();
12575     Ptr = LD->getBasePtr();
12576     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
12577     isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
12578   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12579     VT  = ST->getMemoryVT();
12580     Ptr = ST->getBasePtr();
12581     isNonExt = !ST->isTruncatingStore();
12582   } else
12583     return false;
12584 
12585   if (Subtarget->isThumb1Only()) {
12586     // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
12587     // must be non-extending/truncating, i32, with an offset of 4.
12588     assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
12589     if (Op->getOpcode() != ISD::ADD || !isNonExt)
12590       return false;
12591     auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
12592     if (!RHS || RHS->getZExtValue() != 4)
12593       return false;
12594 
12595     Offset = Op->getOperand(1);
12596     Base = Op->getOperand(0);
12597     AM = ISD::POST_INC;
12598     return true;
12599   }
12600 
12601   bool isInc;
12602   bool isLegal = false;
12603   if (Subtarget->isThumb2())
12604     isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
12605                                        isInc, DAG);
12606   else
12607     isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
12608                                         isInc, DAG);
12609   if (!isLegal)
12610     return false;
12611 
12612   if (Ptr != Base) {
12613     // Swap base ptr and offset to catch more post-index load / store when
12614     // it's legal. In Thumb2 mode, offset must be an immediate.
12615     if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
12616         !Subtarget->isThumb2())
12617       std::swap(Base, Offset);
12618 
12619     // Post-indexed load / store update the base pointer.
12620     if (Ptr != Base)
12621       return false;
12622   }
12623 
12624   AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
12625   return true;
12626 }
12627 
12628 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
12629                                                       KnownBits &Known,
12630                                                       const APInt &DemandedElts,
12631                                                       const SelectionDAG &DAG,
12632                                                       unsigned Depth) const {
12633   unsigned BitWidth = Known.getBitWidth();
12634   Known.resetAll();
12635   switch (Op.getOpcode()) {
12636   default: break;
12637   case ARMISD::ADDC:
12638   case ARMISD::ADDE:
12639   case ARMISD::SUBC:
12640   case ARMISD::SUBE:
12641     // These nodes' second result is a boolean
12642     if (Op.getResNo() == 0)
12643       break;
12644     Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
12645     break;
12646   case ARMISD::CMOV: {
12647     // Bits are known zero/one if known on the LHS and RHS.
12648     DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1);
12649     if (Known.isUnknown())
12650       return;
12651 
12652     KnownBits KnownRHS;
12653     DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1);
12654     Known.Zero &= KnownRHS.Zero;
12655     Known.One  &= KnownRHS.One;
12656     return;
12657   }
12658   case ISD::INTRINSIC_W_CHAIN: {
12659     ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
12660     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
12661     switch (IntID) {
12662     default: return;
12663     case Intrinsic::arm_ldaex:
12664     case Intrinsic::arm_ldrex: {
12665       EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
12666       unsigned MemBits = VT.getScalarSizeInBits();
12667       Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
12668       return;
12669     }
12670     }
12671   }
12672   case ARMISD::BFI: {
12673     // Conservatively, we can recurse down the first operand
12674     // and just mask out all affected bits.
12675     DAG.computeKnownBits(Op.getOperand(0), Known, Depth + 1);
12676 
12677     // The operand to BFI is already a mask suitable for removing the bits it
12678     // sets.
12679     ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
12680     const APInt &Mask = CI->getAPIntValue();
12681     Known.Zero &= Mask;
12682     Known.One &= Mask;
12683     return;
12684   }
12685   }
12686 }
12687 
12688 //===----------------------------------------------------------------------===//
12689 //                           ARM Inline Assembly Support
12690 //===----------------------------------------------------------------------===//
12691 
12692 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
12693   // Looking for "rev" which is V6+.
12694   if (!Subtarget->hasV6Ops())
12695     return false;
12696 
12697   InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
12698   std::string AsmStr = IA->getAsmString();
12699   SmallVector<StringRef, 4> AsmPieces;
12700   SplitString(AsmStr, AsmPieces, ";\n");
12701 
12702   switch (AsmPieces.size()) {
12703   default: return false;
12704   case 1:
12705     AsmStr = AsmPieces[0];
12706     AsmPieces.clear();
12707     SplitString(AsmStr, AsmPieces, " \t,");
12708 
12709     // rev $0, $1
12710     if (AsmPieces.size() == 3 &&
12711         AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
12712         IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
12713       IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
12714       if (Ty && Ty->getBitWidth() == 32)
12715         return IntrinsicLowering::LowerToByteSwap(CI);
12716     }
12717     break;
12718   }
12719 
12720   return false;
12721 }
12722 
12723 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
12724   // At this point, we have to lower this constraint to something else, so we
12725   // lower it to an "r" or "w". However, by doing this we will force the result
12726   // to be in register, while the X constraint is much more permissive.
12727   //
12728   // Although we are correct (we are free to emit anything, without
12729   // constraints), we might break use cases that would expect us to be more
12730   // efficient and emit something else.
12731   if (!Subtarget->hasVFP2())
12732     return "r";
12733   if (ConstraintVT.isFloatingPoint())
12734     return "w";
12735   if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
12736      (ConstraintVT.getSizeInBits() == 64 ||
12737       ConstraintVT.getSizeInBits() == 128))
12738     return "w";
12739 
12740   return "r";
12741 }
12742 
12743 /// getConstraintType - Given a constraint letter, return the type of
12744 /// constraint it is for this target.
12745 ARMTargetLowering::ConstraintType
12746 ARMTargetLowering::getConstraintType(StringRef Constraint) const {
12747   if (Constraint.size() == 1) {
12748     switch (Constraint[0]) {
12749     default:  break;
12750     case 'l': return C_RegisterClass;
12751     case 'w': return C_RegisterClass;
12752     case 'h': return C_RegisterClass;
12753     case 'x': return C_RegisterClass;
12754     case 't': return C_RegisterClass;
12755     case 'j': return C_Other; // Constant for movw.
12756       // An address with a single base register. Due to the way we
12757       // currently handle addresses it is the same as an 'r' memory constraint.
12758     case 'Q': return C_Memory;
12759     }
12760   } else if (Constraint.size() == 2) {
12761     switch (Constraint[0]) {
12762     default: break;
12763     // All 'U+' constraints are addresses.
12764     case 'U': return C_Memory;
12765     }
12766   }
12767   return TargetLowering::getConstraintType(Constraint);
12768 }
12769 
12770 /// Examine constraint type and operand type and determine a weight value.
12771 /// This object must already have been set up with the operand type
12772 /// and the current alternative constraint selected.
12773 TargetLowering::ConstraintWeight
12774 ARMTargetLowering::getSingleConstraintMatchWeight(
12775     AsmOperandInfo &info, const char *constraint) const {
12776   ConstraintWeight weight = CW_Invalid;
12777   Value *CallOperandVal = info.CallOperandVal;
12778     // If we don't have a value, we can't do a match,
12779     // but allow it at the lowest weight.
12780   if (!CallOperandVal)
12781     return CW_Default;
12782   Type *type = CallOperandVal->getType();
12783   // Look at the constraint type.
12784   switch (*constraint) {
12785   default:
12786     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
12787     break;
12788   case 'l':
12789     if (type->isIntegerTy()) {
12790       if (Subtarget->isThumb())
12791         weight = CW_SpecificReg;
12792       else
12793         weight = CW_Register;
12794     }
12795     break;
12796   case 'w':
12797     if (type->isFloatingPointTy())
12798       weight = CW_Register;
12799     break;
12800   }
12801   return weight;
12802 }
12803 
12804 typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
12805 RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
12806     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
12807   if (Constraint.size() == 1) {
12808     // GCC ARM Constraint Letters
12809     switch (Constraint[0]) {
12810     case 'l': // Low regs or general regs.
12811       if (Subtarget->isThumb())
12812         return RCPair(0U, &ARM::tGPRRegClass);
12813       return RCPair(0U, &ARM::GPRRegClass);
12814     case 'h': // High regs or no regs.
12815       if (Subtarget->isThumb())
12816         return RCPair(0U, &ARM::hGPRRegClass);
12817       break;
12818     case 'r':
12819       if (Subtarget->isThumb1Only())
12820         return RCPair(0U, &ARM::tGPRRegClass);
12821       return RCPair(0U, &ARM::GPRRegClass);
12822     case 'w':
12823       if (VT == MVT::Other)
12824         break;
12825       if (VT == MVT::f32)
12826         return RCPair(0U, &ARM::SPRRegClass);
12827       if (VT.getSizeInBits() == 64)
12828         return RCPair(0U, &ARM::DPRRegClass);
12829       if (VT.getSizeInBits() == 128)
12830         return RCPair(0U, &ARM::QPRRegClass);
12831       break;
12832     case 'x':
12833       if (VT == MVT::Other)
12834         break;
12835       if (VT == MVT::f32)
12836         return RCPair(0U, &ARM::SPR_8RegClass);
12837       if (VT.getSizeInBits() == 64)
12838         return RCPair(0U, &ARM::DPR_8RegClass);
12839       if (VT.getSizeInBits() == 128)
12840         return RCPair(0U, &ARM::QPR_8RegClass);
12841       break;
12842     case 't':
12843       if (VT == MVT::f32)
12844         return RCPair(0U, &ARM::SPRRegClass);
12845       break;
12846     }
12847   }
12848   if (StringRef("{cc}").equals_lower(Constraint))
12849     return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
12850 
12851   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
12852 }
12853 
12854 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
12855 /// vector.  If it is invalid, don't add anything to Ops.
12856 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
12857                                                      std::string &Constraint,
12858                                                      std::vector<SDValue>&Ops,
12859                                                      SelectionDAG &DAG) const {
12860   SDValue Result;
12861 
12862   // Currently only support length 1 constraints.
12863   if (Constraint.length() != 1) return;
12864 
12865   char ConstraintLetter = Constraint[0];
12866   switch (ConstraintLetter) {
12867   default: break;
12868   case 'j':
12869   case 'I': case 'J': case 'K': case 'L':
12870   case 'M': case 'N': case 'O':
12871     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
12872     if (!C)
12873       return;
12874 
12875     int64_t CVal64 = C->getSExtValue();
12876     int CVal = (int) CVal64;
12877     // None of these constraints allow values larger than 32 bits.  Check
12878     // that the value fits in an int.
12879     if (CVal != CVal64)
12880       return;
12881 
12882     switch (ConstraintLetter) {
12883       case 'j':
12884         // Constant suitable for movw, must be between 0 and
12885         // 65535.
12886         if (Subtarget->hasV6T2Ops())
12887           if (CVal >= 0 && CVal <= 65535)
12888             break;
12889         return;
12890       case 'I':
12891         if (Subtarget->isThumb1Only()) {
12892           // This must be a constant between 0 and 255, for ADD
12893           // immediates.
12894           if (CVal >= 0 && CVal <= 255)
12895             break;
12896         } else if (Subtarget->isThumb2()) {
12897           // A constant that can be used as an immediate value in a
12898           // data-processing instruction.
12899           if (ARM_AM::getT2SOImmVal(CVal) != -1)
12900             break;
12901         } else {
12902           // A constant that can be used as an immediate value in a
12903           // data-processing instruction.
12904           if (ARM_AM::getSOImmVal(CVal) != -1)
12905             break;
12906         }
12907         return;
12908 
12909       case 'J':
12910         if (Subtarget->isThumb1Only()) {
12911           // This must be a constant between -255 and -1, for negated ADD
12912           // immediates. This can be used in GCC with an "n" modifier that
12913           // prints the negated value, for use with SUB instructions. It is
12914           // not useful otherwise but is implemented for compatibility.
12915           if (CVal >= -255 && CVal <= -1)
12916             break;
12917         } else {
12918           // This must be a constant between -4095 and 4095. It is not clear
12919           // what this constraint is intended for. Implemented for
12920           // compatibility with GCC.
12921           if (CVal >= -4095 && CVal <= 4095)
12922             break;
12923         }
12924         return;
12925 
12926       case 'K':
12927         if (Subtarget->isThumb1Only()) {
12928           // A 32-bit value where only one byte has a nonzero value. Exclude
12929           // zero to match GCC. This constraint is used by GCC internally for
12930           // constants that can be loaded with a move/shift combination.
12931           // It is not useful otherwise but is implemented for compatibility.
12932           if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
12933             break;
12934         } else if (Subtarget->isThumb2()) {
12935           // A constant whose bitwise inverse can be used as an immediate
12936           // value in a data-processing instruction. This can be used in GCC
12937           // with a "B" modifier that prints the inverted value, for use with
12938           // BIC and MVN instructions. It is not useful otherwise but is
12939           // implemented for compatibility.
12940           if (ARM_AM::getT2SOImmVal(~CVal) != -1)
12941             break;
12942         } else {
12943           // A constant whose bitwise inverse can be used as an immediate
12944           // value in a data-processing instruction. This can be used in GCC
12945           // with a "B" modifier that prints the inverted value, for use with
12946           // BIC and MVN instructions. It is not useful otherwise but is
12947           // implemented for compatibility.
12948           if (ARM_AM::getSOImmVal(~CVal) != -1)
12949             break;
12950         }
12951         return;
12952 
12953       case 'L':
12954         if (Subtarget->isThumb1Only()) {
12955           // This must be a constant between -7 and 7,
12956           // for 3-operand ADD/SUB immediate instructions.
12957           if (CVal >= -7 && CVal < 7)
12958             break;
12959         } else if (Subtarget->isThumb2()) {
12960           // A constant whose negation can be used as an immediate value in a
12961           // data-processing instruction. This can be used in GCC with an "n"
12962           // modifier that prints the negated value, for use with SUB
12963           // instructions. It is not useful otherwise but is implemented for
12964           // compatibility.
12965           if (ARM_AM::getT2SOImmVal(-CVal) != -1)
12966             break;
12967         } else {
12968           // A constant whose negation can be used as an immediate value in a
12969           // data-processing instruction. This can be used in GCC with an "n"
12970           // modifier that prints the negated value, for use with SUB
12971           // instructions. It is not useful otherwise but is implemented for
12972           // compatibility.
12973           if (ARM_AM::getSOImmVal(-CVal) != -1)
12974             break;
12975         }
12976         return;
12977 
12978       case 'M':
12979         if (Subtarget->isThumb1Only()) {
12980           // This must be a multiple of 4 between 0 and 1020, for
12981           // ADD sp + immediate.
12982           if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
12983             break;
12984         } else {
12985           // A power of two or a constant between 0 and 32.  This is used in
12986           // GCC for the shift amount on shifted register operands, but it is
12987           // useful in general for any shift amounts.
12988           if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
12989             break;
12990         }
12991         return;
12992 
12993       case 'N':
12994         if (Subtarget->isThumb()) {  // FIXME thumb2
12995           // This must be a constant between 0 and 31, for shift amounts.
12996           if (CVal >= 0 && CVal <= 31)
12997             break;
12998         }
12999         return;
13000 
13001       case 'O':
13002         if (Subtarget->isThumb()) {  // FIXME thumb2
13003           // This must be a multiple of 4 between -508 and 508, for
13004           // ADD/SUB sp = sp + immediate.
13005           if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
13006             break;
13007         }
13008         return;
13009     }
13010     Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
13011     break;
13012   }
13013 
13014   if (Result.getNode()) {
13015     Ops.push_back(Result);
13016     return;
13017   }
13018   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
13019 }
13020 
13021 static RTLIB::Libcall getDivRemLibcall(
13022     const SDNode *N, MVT::SimpleValueType SVT) {
13023   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
13024           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
13025          "Unhandled Opcode in getDivRemLibcall");
13026   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
13027                   N->getOpcode() == ISD::SREM;
13028   RTLIB::Libcall LC;
13029   switch (SVT) {
13030   default: llvm_unreachable("Unexpected request for libcall!");
13031   case MVT::i8:  LC = isSigned ? RTLIB::SDIVREM_I8  : RTLIB::UDIVREM_I8;  break;
13032   case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
13033   case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
13034   case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
13035   }
13036   return LC;
13037 }
13038 
13039 static TargetLowering::ArgListTy getDivRemArgList(
13040     const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
13041   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
13042           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
13043          "Unhandled Opcode in getDivRemArgList");
13044   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
13045                   N->getOpcode() == ISD::SREM;
13046   TargetLowering::ArgListTy Args;
13047   TargetLowering::ArgListEntry Entry;
13048   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
13049     EVT ArgVT = N->getOperand(i).getValueType();
13050     Type *ArgTy = ArgVT.getTypeForEVT(*Context);
13051     Entry.Node = N->getOperand(i);
13052     Entry.Ty = ArgTy;
13053     Entry.IsSExt = isSigned;
13054     Entry.IsZExt = !isSigned;
13055     Args.push_back(Entry);
13056   }
13057   if (Subtarget->isTargetWindows() && Args.size() >= 2)
13058     std::swap(Args[0], Args[1]);
13059   return Args;
13060 }
13061 
13062 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
13063   assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
13064           Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
13065           Subtarget->isTargetWindows()) &&
13066          "Register-based DivRem lowering only");
13067   unsigned Opcode = Op->getOpcode();
13068   assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
13069          "Invalid opcode for Div/Rem lowering");
13070   bool isSigned = (Opcode == ISD::SDIVREM);
13071   EVT VT = Op->getValueType(0);
13072   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
13073   SDLoc dl(Op);
13074 
13075   // If the target has hardware divide, use divide + multiply + subtract:
13076   //     div = a / b
13077   //     rem = a - b * div
13078   //     return {div, rem}
13079   // This should be lowered into UDIV/SDIV + MLS later on.
13080   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
13081                                         : Subtarget->hasDivideInARMMode();
13082   if (hasDivide && Op->getValueType(0).isSimple() &&
13083       Op->getSimpleValueType(0) == MVT::i32) {
13084     unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
13085     const SDValue Dividend = Op->getOperand(0);
13086     const SDValue Divisor = Op->getOperand(1);
13087     SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
13088     SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
13089     SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
13090 
13091     SDValue Values[2] = {Div, Rem};
13092     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
13093   }
13094 
13095   RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
13096                                        VT.getSimpleVT().SimpleTy);
13097   SDValue InChain = DAG.getEntryNode();
13098 
13099   TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
13100                                                     DAG.getContext(),
13101                                                     Subtarget);
13102 
13103   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
13104                                          getPointerTy(DAG.getDataLayout()));
13105 
13106   Type *RetTy = StructType::get(Ty, Ty);
13107 
13108   if (Subtarget->isTargetWindows())
13109     InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);
13110 
13111   TargetLowering::CallLoweringInfo CLI(DAG);
13112   CLI.setDebugLoc(dl).setChain(InChain)
13113     .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
13114     .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
13115 
13116   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
13117   return CallInfo.first;
13118 }
13119 
13120 // Lowers REM using divmod helpers
13121 // see RTABI section 4.2/4.3
13122 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
13123   // Build return types (div and rem)
13124   std::vector<Type*> RetTyParams;
13125   Type *RetTyElement;
13126 
13127   switch (N->getValueType(0).getSimpleVT().SimpleTy) {
13128   default: llvm_unreachable("Unexpected request for libcall!");
13129   case MVT::i8:   RetTyElement = Type::getInt8Ty(*DAG.getContext());  break;
13130   case MVT::i16:  RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
13131   case MVT::i32:  RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
13132   case MVT::i64:  RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
13133   }
13134 
13135   RetTyParams.push_back(RetTyElement);
13136   RetTyParams.push_back(RetTyElement);
13137   ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
13138   Type *RetTy = StructType::get(*DAG.getContext(), ret);
13139 
13140   RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
13141                                                              SimpleTy);
13142   SDValue InChain = DAG.getEntryNode();
13143   TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
13144                                                     Subtarget);
13145   bool isSigned = N->getOpcode() == ISD::SREM;
13146   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
13147                                          getPointerTy(DAG.getDataLayout()));
13148 
13149   if (Subtarget->isTargetWindows())
13150     InChain = WinDBZCheckDenominator(DAG, N, InChain);
13151 
13152   // Lower call
13153   CallLoweringInfo CLI(DAG);
13154   CLI.setChain(InChain)
13155      .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
13156      .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
13157   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
13158 
13159   // Return second (rem) result operand (first contains div)
13160   SDNode *ResNode = CallResult.first.getNode();
13161   assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
13162   return ResNode->getOperand(1);
13163 }
13164 
13165 SDValue
13166 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
13167   assert(Subtarget->isTargetWindows() && "unsupported target platform");
13168   SDLoc DL(Op);
13169 
13170   // Get the inputs.
13171   SDValue Chain = Op.getOperand(0);
13172   SDValue Size  = Op.getOperand(1);
13173 
13174   SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
13175                               DAG.getConstant(2, DL, MVT::i32));
13176 
13177   SDValue Flag;
13178   Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
13179   Flag = Chain.getValue(1);
13180 
13181   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13182   Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);
13183 
13184   SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
13185   Chain = NewSP.getValue(1);
13186 
13187   SDValue Ops[2] = { NewSP, Chain };
13188   return DAG.getMergeValues(Ops, DL);
13189 }
13190 
13191 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
13192   assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() &&
13193          "Unexpected type for custom-lowering FP_EXTEND");
13194 
13195   RTLIB::Libcall LC;
13196   LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
13197 
13198   SDValue SrcVal = Op.getOperand(0);
13199   return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false,
13200                      SDLoc(Op)).first;
13201 }
13202 
13203 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
13204   assert(Op.getOperand(0).getValueType() == MVT::f64 &&
13205          Subtarget->isFPOnlySP() &&
13206          "Unexpected type for custom-lowering FP_ROUND");
13207 
13208   RTLIB::Libcall LC;
13209   LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
13210 
13211   SDValue SrcVal = Op.getOperand(0);
13212   return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false,
13213                      SDLoc(Op)).first;
13214 }
13215 
13216 bool
13217 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
13218   // The ARM target isn't yet aware of offsets.
13219   return false;
13220 }
13221 
13222 bool ARM::isBitFieldInvertedMask(unsigned v) {
13223   if (v == 0xffffffff)
13224     return false;
13225 
13226   // there can be 1's on either or both "outsides", all the "inside"
13227   // bits must be 0's
13228   return isShiftedMask_32(~v);
13229 }
13230 
13231 /// isFPImmLegal - Returns true if the target can instruction select the
13232 /// specified FP immediate natively. If false, the legalizer will
13233 /// materialize the FP immediate as a load from a constant pool.
13234 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
13235   if (!Subtarget->hasVFP3())
13236     return false;
13237   if (VT == MVT::f32)
13238     return ARM_AM::getFP32Imm(Imm) != -1;
13239   if (VT == MVT::f64 && !Subtarget->isFPOnlySP())
13240     return ARM_AM::getFP64Imm(Imm) != -1;
13241   return false;
13242 }
13243 
13244 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
13245 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
13246 /// specified in the intrinsic calls.
13247 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
13248                                            const CallInst &I,
13249                                            unsigned Intrinsic) const {
13250   switch (Intrinsic) {
13251   case Intrinsic::arm_neon_vld1:
13252   case Intrinsic::arm_neon_vld2:
13253   case Intrinsic::arm_neon_vld3:
13254   case Intrinsic::arm_neon_vld4:
13255   case Intrinsic::arm_neon_vld2lane:
13256   case Intrinsic::arm_neon_vld3lane:
13257   case Intrinsic::arm_neon_vld4lane: {
13258     Info.opc = ISD::INTRINSIC_W_CHAIN;
13259     // Conservatively set memVT to the entire set of vectors loaded.
13260     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
13261     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
13262     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
13263     Info.ptrVal = I.getArgOperand(0);
13264     Info.offset = 0;
13265     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
13266     Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
13267     Info.vol = false; // volatile loads with NEON intrinsics not supported
13268     Info.readMem = true;
13269     Info.writeMem = false;
13270     return true;
13271   }
13272   case Intrinsic::arm_neon_vst1:
13273   case Intrinsic::arm_neon_vst2:
13274   case Intrinsic::arm_neon_vst3:
13275   case Intrinsic::arm_neon_vst4:
13276   case Intrinsic::arm_neon_vst2lane:
13277   case Intrinsic::arm_neon_vst3lane:
13278   case Intrinsic::arm_neon_vst4lane: {
13279     Info.opc = ISD::INTRINSIC_VOID;
13280     // Conservatively set memVT to the entire set of vectors stored.
13281     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
13282     unsigned NumElts = 0;
13283     for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
13284       Type *ArgTy = I.getArgOperand(ArgI)->getType();
13285       if (!ArgTy->isVectorTy())
13286         break;
13287       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
13288     }
13289     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
13290     Info.ptrVal = I.getArgOperand(0);
13291     Info.offset = 0;
13292     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
13293     Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
13294     Info.vol = false; // volatile stores with NEON intrinsics not supported
13295     Info.readMem = false;
13296     Info.writeMem = true;
13297     return true;
13298   }
13299   case Intrinsic::arm_ldaex:
13300   case Intrinsic::arm_ldrex: {
13301     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
13302     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
13303     Info.opc = ISD::INTRINSIC_W_CHAIN;
13304     Info.memVT = MVT::getVT(PtrTy->getElementType());
13305     Info.ptrVal = I.getArgOperand(0);
13306     Info.offset = 0;
13307     Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
13308     Info.vol = true;
13309     Info.readMem = true;
13310     Info.writeMem = false;
13311     return true;
13312   }
13313   case Intrinsic::arm_stlex:
13314   case Intrinsic::arm_strex: {
13315     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
13316     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
13317     Info.opc = ISD::INTRINSIC_W_CHAIN;
13318     Info.memVT = MVT::getVT(PtrTy->getElementType());
13319     Info.ptrVal = I.getArgOperand(1);
13320     Info.offset = 0;
13321     Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
13322     Info.vol = true;
13323     Info.readMem = false;
13324     Info.writeMem = true;
13325     return true;
13326   }
13327   case Intrinsic::arm_stlexd:
13328   case Intrinsic::arm_strexd:
13329     Info.opc = ISD::INTRINSIC_W_CHAIN;
13330     Info.memVT = MVT::i64;
13331     Info.ptrVal = I.getArgOperand(2);
13332     Info.offset = 0;
13333     Info.align = 8;
13334     Info.vol = true;
13335     Info.readMem = false;
13336     Info.writeMem = true;
13337     return true;
13338 
13339   case Intrinsic::arm_ldaexd:
13340   case Intrinsic::arm_ldrexd:
13341     Info.opc = ISD::INTRINSIC_W_CHAIN;
13342     Info.memVT = MVT::i64;
13343     Info.ptrVal = I.getArgOperand(0);
13344     Info.offset = 0;
13345     Info.align = 8;
13346     Info.vol = true;
13347     Info.readMem = true;
13348     Info.writeMem = false;
13349     return true;
13350 
13351   default:
13352     break;
13353   }
13354 
13355   return false;
13356 }
13357 
13358 /// \brief Returns true if it is beneficial to convert a load of a constant
13359 /// to just the constant itself.
13360 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13361                                                           Type *Ty) const {
13362   assert(Ty->isIntegerTy());
13363 
13364   unsigned Bits = Ty->getPrimitiveSizeInBits();
13365   if (Bits == 0 || Bits > 32)
13366     return false;
13367   return true;
13368 }
13369 
13370 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT,
13371                                                 unsigned Index) const {
13372   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
13373     return false;
13374 
13375   return (Index == 0 || Index == ResVT.getVectorNumElements());
13376 }
13377 
13378 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
13379                                         ARM_MB::MemBOpt Domain) const {
13380   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13381 
13382   // First, if the target has no DMB, see what fallback we can use.
13383   if (!Subtarget->hasDataBarrier()) {
13384     // Some ARMv6 cpus can support data barriers with an mcr instruction.
13385     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
13386     // here.
13387     if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
13388       Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
13389       Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
13390                         Builder.getInt32(0), Builder.getInt32(7),
13391                         Builder.getInt32(10), Builder.getInt32(5)};
13392       return Builder.CreateCall(MCR, args);
13393     } else {
13394       // Instead of using barriers, atomic accesses on these subtargets use
13395       // libcalls.
13396       llvm_unreachable("makeDMB on a target so old that it has no barriers");
13397     }
13398   } else {
13399     Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
13400     // Only a full system barrier exists in the M-class architectures.
13401     Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
13402     Constant *CDomain = Builder.getInt32(Domain);
13403     return Builder.CreateCall(DMB, CDomain);
13404   }
13405 }
13406 
13407 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
13408 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
13409                                                  Instruction *Inst,
13410                                                  AtomicOrdering Ord) const {
13411   switch (Ord) {
13412   case AtomicOrdering::NotAtomic:
13413   case AtomicOrdering::Unordered:
13414     llvm_unreachable("Invalid fence: unordered/non-atomic");
13415   case AtomicOrdering::Monotonic:
13416   case AtomicOrdering::Acquire:
13417     return nullptr; // Nothing to do
13418   case AtomicOrdering::SequentiallyConsistent:
13419     if (!Inst->hasAtomicStore())
13420       return nullptr; // Nothing to do
13421     /*FALLTHROUGH*/
13422   case AtomicOrdering::Release:
13423   case AtomicOrdering::AcquireRelease:
13424     if (Subtarget->preferISHSTBarriers())
13425       return makeDMB(Builder, ARM_MB::ISHST);
13426     // FIXME: add a comment with a link to documentation justifying this.
13427     else
13428       return makeDMB(Builder, ARM_MB::ISH);
13429   }
13430   llvm_unreachable("Unknown fence ordering in emitLeadingFence");
13431 }
13432 
13433 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
13434                                                   Instruction *Inst,
13435                                                   AtomicOrdering Ord) const {
13436   switch (Ord) {
13437   case AtomicOrdering::NotAtomic:
13438   case AtomicOrdering::Unordered:
13439     llvm_unreachable("Invalid fence: unordered/not-atomic");
13440   case AtomicOrdering::Monotonic:
13441   case AtomicOrdering::Release:
13442     return nullptr; // Nothing to do
13443   case AtomicOrdering::Acquire:
13444   case AtomicOrdering::AcquireRelease:
13445   case AtomicOrdering::SequentiallyConsistent:
13446     return makeDMB(Builder, ARM_MB::ISH);
13447   }
13448   llvm_unreachable("Unknown fence ordering in emitTrailingFence");
13449 }
13450 
13451 // Loads and stores less than 64-bits are already atomic; ones above that
13452 // are doomed anyway, so defer to the default libcall and blame the OS when
13453 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
13454 // anything for those.
13455 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
13456   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
13457   return (Size == 64) && !Subtarget->isMClass();
13458 }
13459 
13460 // Loads and stores less than 64-bits are already atomic; ones above that
13461 // are doomed anyway, so defer to the default libcall and blame the OS when
13462 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
13463 // anything for those.
13464 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
13465 // guarantee, see DDI0406C ARM architecture reference manual,
13466 // sections A8.8.72-74 LDRD)
13467 TargetLowering::AtomicExpansionKind
13468 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
13469   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
13470   return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
13471                                                   : AtomicExpansionKind::None;
13472 }
13473 
13474 // For the real atomic operations, we have ldrex/strex up to 32 bits,
13475 // and up to 64 bits on the non-M profiles
13476 TargetLowering::AtomicExpansionKind
13477 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
13478   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
13479   bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
13480   return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
13481              ? AtomicExpansionKind::LLSC
13482              : AtomicExpansionKind::None;
13483 }
13484 
13485 bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(
13486     AtomicCmpXchgInst *AI) const {
13487   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
13488   // implement cmpxchg without spilling. If the address being exchanged is also
13489   // on the stack and close enough to the spill slot, this can lead to a
13490   // situation where the monitor always gets cleared and the atomic operation
13491   // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
13492   bool hasAtomicCmpXchg =
13493       !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
13494   return getTargetMachine().getOptLevel() != 0 && hasAtomicCmpXchg;
13495 }
13496 
13497 bool ARMTargetLowering::shouldInsertFencesForAtomic(
13498     const Instruction *I) const {
13499   return InsertFencesForAtomic;
13500 }
13501 
13502 // This has so far only been implemented for MachO.
13503 bool ARMTargetLowering::useLoadStackGuardNode() const {
13504   return Subtarget->isTargetMachO();
13505 }
13506 
13507 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
13508                                                   unsigned &Cost) const {
13509   // If we do not have NEON, vector types are not natively supported.
13510   if (!Subtarget->hasNEON())
13511     return false;
13512 
13513   // Floating point values and vector values map to the same register file.
13514   // Therefore, although we could do a store extract of a vector type, this is
13515   // better to leave at float as we have more freedom in the addressing mode for
13516   // those.
13517   if (VectorTy->isFPOrFPVectorTy())
13518     return false;
13519 
13520   // If the index is unknown at compile time, this is very expensive to lower
13521   // and it is not possible to combine the store with the extract.
13522   if (!isa<ConstantInt>(Idx))
13523     return false;
13524 
13525   assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
13526   unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth();
13527   // We can do a store + vector extract on any vector that fits perfectly in a D
13528   // or Q register.
13529   if (BitWidth == 64 || BitWidth == 128) {
13530     Cost = 0;
13531     return true;
13532   }
13533   return false;
13534 }
13535 
13536 bool ARMTargetLowering::isCheapToSpeculateCttz() const {
13537   return Subtarget->hasV6T2Ops();
13538 }
13539 
13540 bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
13541   return Subtarget->hasV6T2Ops();
13542 }
13543 
13544 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
13545                                          AtomicOrdering Ord) const {
13546   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13547   Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
13548   bool IsAcquire = isAcquireOrStronger(Ord);
13549 
13550   // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
13551   // intrinsic must return {i32, i32} and we have to recombine them into a
13552   // single i64 here.
13553   if (ValTy->getPrimitiveSizeInBits() == 64) {
13554     Intrinsic::ID Int =
13555         IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
13556     Function *Ldrex = Intrinsic::getDeclaration(M, Int);
13557 
13558     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
13559     Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
13560 
13561     Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
13562     Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
13563     if (!Subtarget->isLittle())
13564       std::swap (Lo, Hi);
13565     Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
13566     Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
13567     return Builder.CreateOr(
13568         Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
13569   }
13570 
13571   Type *Tys[] = { Addr->getType() };
13572   Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
13573   Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys);
13574 
13575   return Builder.CreateTruncOrBitCast(
13576       Builder.CreateCall(Ldrex, Addr),
13577       cast<PointerType>(Addr->getType())->getElementType());
13578 }
13579 
13580 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
13581     IRBuilder<> &Builder) const {
13582   if (!Subtarget->hasV7Ops())
13583     return;
13584   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13585   Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
13586 }
13587 
13588 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
13589                                                Value *Addr,
13590                                                AtomicOrdering Ord) const {
13591   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13592   bool IsRelease = isReleaseOrStronger(Ord);
13593 
13594   // Since the intrinsics must have legal type, the i64 intrinsics take two
13595   // parameters: "i32, i32". We must marshal Val into the appropriate form
13596   // before the call.
13597   if (Val->getType()->getPrimitiveSizeInBits() == 64) {
13598     Intrinsic::ID Int =
13599         IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
13600     Function *Strex = Intrinsic::getDeclaration(M, Int);
13601     Type *Int32Ty = Type::getInt32Ty(M->getContext());
13602 
13603     Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
13604     Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
13605     if (!Subtarget->isLittle())
13606       std::swap (Lo, Hi);
13607     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
13608     return Builder.CreateCall(Strex, {Lo, Hi, Addr});
13609   }
13610 
13611   Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
13612   Type *Tys[] = { Addr->getType() };
13613   Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
13614 
13615   return Builder.CreateCall(
13616       Strex, {Builder.CreateZExtOrBitCast(
13617                   Val, Strex->getFunctionType()->getParamType(0)),
13618               Addr});
13619 }
13620 
13621 /// A helper function for determining the number of interleaved accesses we
13622 /// will generate when lowering accesses of the given type.
13623 unsigned
13624 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
13625                                              const DataLayout &DL) const {
13626   return (DL.getTypeSizeInBits(VecTy) + 127) / 128;
13627 }
13628 
13629 bool ARMTargetLowering::isLegalInterleavedAccessType(
13630     VectorType *VecTy, const DataLayout &DL) const {
13631 
13632   unsigned VecSize = DL.getTypeSizeInBits(VecTy);
13633   unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
13634 
13635   // Ensure the vector doesn't have f16 elements. Even though we could do an
13636   // i16 vldN, we can't hold the f16 vectors and will end up converting via
13637   // f32.
13638   if (VecTy->getElementType()->isHalfTy())
13639     return false;
13640 
13641   // Ensure the number of vector elements is greater than 1.
13642   if (VecTy->getNumElements() < 2)
13643     return false;
13644 
13645   // Ensure the element type is legal.
13646   if (ElSize != 8 && ElSize != 16 && ElSize != 32)
13647     return false;
13648 
13649   // Ensure the total vector size is 64 or a multiple of 128. Types larger than
13650   // 128 will be split into multiple interleaved accesses.
13651   return VecSize == 64 || VecSize % 128 == 0;
13652 }
13653 
13654 /// \brief Lower an interleaved load into a vldN intrinsic.
13655 ///
13656 /// E.g. Lower an interleaved load (Factor = 2):
13657 ///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
13658 ///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
13659 ///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
13660 ///
13661 ///      Into:
13662 ///        %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
13663 ///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
13664 ///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
13665 bool ARMTargetLowering::lowerInterleavedLoad(
13666     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
13667     ArrayRef<unsigned> Indices, unsigned Factor) const {
13668   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13669          "Invalid interleave factor");
13670   assert(!Shuffles.empty() && "Empty shufflevector input");
13671   assert(Shuffles.size() == Indices.size() &&
13672          "Unmatched number of shufflevectors and indices");
13673 
13674   VectorType *VecTy = Shuffles[0]->getType();
13675   Type *EltTy = VecTy->getVectorElementType();
13676 
13677   const DataLayout &DL = LI->getModule()->getDataLayout();
13678 
13679   // Skip if we do not have NEON and skip illegal vector types. We can
13680   // "legalize" wide vector types into multiple interleaved accesses as long as
13681   // the vector types are divisible by 128.
13682   if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VecTy, DL))
13683     return false;
13684 
13685   unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL);
13686 
13687   // A pointer vector can not be the return type of the ldN intrinsics. Need to
13688   // load integer vectors first and then convert to pointer vectors.
13689   if (EltTy->isPointerTy())
13690     VecTy =
13691         VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
13692 
13693   IRBuilder<> Builder(LI);
13694 
13695   // The base address of the load.
13696   Value *BaseAddr = LI->getPointerOperand();
13697 
13698   if (NumLoads > 1) {
13699     // If we're going to generate more than one load, reset the sub-vector type
13700     // to something legal.
13701     VecTy = VectorType::get(VecTy->getVectorElementType(),
13702                             VecTy->getVectorNumElements() / NumLoads);
13703 
13704     // We will compute the pointer operand of each load from the original base
13705     // address using GEPs. Cast the base address to a pointer to the scalar
13706     // element type.
13707     BaseAddr = Builder.CreateBitCast(
13708         BaseAddr, VecTy->getVectorElementType()->getPointerTo(
13709                       LI->getPointerAddressSpace()));
13710   }
13711 
13712   assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!");
13713 
13714   Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
13715   Type *Tys[] = {VecTy, Int8Ptr};
13716   static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
13717                                             Intrinsic::arm_neon_vld3,
13718                                             Intrinsic::arm_neon_vld4};
13719   Function *VldnFunc =
13720       Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
13721 
13722   // Holds sub-vectors extracted from the load intrinsic return values. The
13723   // sub-vectors are associated with the shufflevector instructions they will
13724   // replace.
13725   DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
13726 
13727   for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
13728 
13729     // If we're generating more than one load, compute the base address of
13730     // subsequent loads as an offset from the previous.
13731     if (LoadCount > 0)
13732       BaseAddr = Builder.CreateConstGEP1_32(
13733           BaseAddr, VecTy->getVectorNumElements() * Factor);
13734 
13735     SmallVector<Value *, 2> Ops;
13736     Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
13737     Ops.push_back(Builder.getInt32(LI->getAlignment()));
13738 
13739     CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN");
13740 
13741     // Replace uses of each shufflevector with the corresponding vector loaded
13742     // by ldN.
13743     for (unsigned i = 0; i < Shuffles.size(); i++) {
13744       ShuffleVectorInst *SV = Shuffles[i];
13745       unsigned Index = Indices[i];
13746 
13747       Value *SubVec = Builder.CreateExtractValue(VldN, Index);
13748 
13749       // Convert the integer vector to pointer vector if the element is pointer.
13750       if (EltTy->isPointerTy())
13751         SubVec = Builder.CreateIntToPtr(SubVec, SV->getType());
13752 
13753       SubVecs[SV].push_back(SubVec);
13754     }
13755   }
13756 
13757   // Replace uses of the shufflevector instructions with the sub-vectors
13758   // returned by the load intrinsic. If a shufflevector instruction is
13759   // associated with more than one sub-vector, those sub-vectors will be
13760   // concatenated into a single wide vector.
13761   for (ShuffleVectorInst *SVI : Shuffles) {
13762     auto &SubVec = SubVecs[SVI];
13763     auto *WideVec =
13764         SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
13765     SVI->replaceAllUsesWith(WideVec);
13766   }
13767 
13768   return true;
13769 }
13770 
13771 /// \brief Lower an interleaved store into a vstN intrinsic.
13772 ///
13773 /// E.g. Lower an interleaved store (Factor = 3):
13774 ///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
13775 ///                                  <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
13776 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
13777 ///
13778 ///      Into:
13779 ///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
13780 ///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
13781 ///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
13782 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
13783 ///
13784 /// Note that the new shufflevectors will be removed and we'll only generate one
13785 /// vst3 instruction in CodeGen.
13786 ///
13787 /// Example for a more general valid mask (Factor 3). Lower:
13788 ///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
13789 ///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
13790 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
13791 ///
13792 ///      Into:
13793 ///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
13794 ///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
13795 ///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
13796 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
13797 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
13798                                               ShuffleVectorInst *SVI,
13799                                               unsigned Factor) const {
13800   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13801          "Invalid interleave factor");
13802 
13803   VectorType *VecTy = SVI->getType();
13804   assert(VecTy->getVectorNumElements() % Factor == 0 &&
13805          "Invalid interleaved store");
13806 
13807   unsigned LaneLen = VecTy->getVectorNumElements() / Factor;
13808   Type *EltTy = VecTy->getVectorElementType();
13809   VectorType *SubVecTy = VectorType::get(EltTy, LaneLen);
13810 
13811   const DataLayout &DL = SI->getModule()->getDataLayout();
13812 
13813   // Skip if we do not have NEON and skip illegal vector types. We can
13814   // "legalize" wide vector types into multiple interleaved accesses as long as
13815   // the vector types are divisible by 128.
13816   if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL))
13817     return false;
13818 
13819   unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL);
13820 
13821   Value *Op0 = SVI->getOperand(0);
13822   Value *Op1 = SVI->getOperand(1);
13823   IRBuilder<> Builder(SI);
13824 
13825   // StN intrinsics don't support pointer vectors as arguments. Convert pointer
13826   // vectors to integer vectors.
13827   if (EltTy->isPointerTy()) {
13828     Type *IntTy = DL.getIntPtrType(EltTy);
13829 
13830     // Convert to the corresponding integer vector.
13831     Type *IntVecTy =
13832         VectorType::get(IntTy, Op0->getType()->getVectorNumElements());
13833     Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
13834     Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
13835 
13836     SubVecTy = VectorType::get(IntTy, LaneLen);
13837   }
13838 
13839   // The base address of the store.
13840   Value *BaseAddr = SI->getPointerOperand();
13841 
13842   if (NumStores > 1) {
13843     // If we're going to generate more than one store, reset the lane length
13844     // and sub-vector type to something legal.
13845     LaneLen /= NumStores;
13846     SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen);
13847 
13848     // We will compute the pointer operand of each store from the original base
13849     // address using GEPs. Cast the base address to a pointer to the scalar
13850     // element type.
13851     BaseAddr = Builder.CreateBitCast(
13852         BaseAddr, SubVecTy->getVectorElementType()->getPointerTo(
13853                       SI->getPointerAddressSpace()));
13854   }
13855 
13856   assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");
13857 
13858   auto Mask = SVI->getShuffleMask();
13859 
13860   Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
13861   Type *Tys[] = {Int8Ptr, SubVecTy};
13862   static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
13863                                              Intrinsic::arm_neon_vst3,
13864                                              Intrinsic::arm_neon_vst4};
13865 
13866   for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
13867 
13868     // If we generating more than one store, we compute the base address of
13869     // subsequent stores as an offset from the previous.
13870     if (StoreCount > 0)
13871       BaseAddr = Builder.CreateConstGEP1_32(BaseAddr, LaneLen * Factor);
13872 
13873     SmallVector<Value *, 6> Ops;
13874     Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
13875 
13876     Function *VstNFunc =
13877         Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys);
13878 
13879     // Split the shufflevector operands into sub vectors for the new vstN call.
13880     for (unsigned i = 0; i < Factor; i++) {
13881       unsigned IdxI = StoreCount * LaneLen * Factor + i;
13882       if (Mask[IdxI] >= 0) {
13883         Ops.push_back(Builder.CreateShuffleVector(
13884             Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0)));
13885       } else {
13886         unsigned StartMask = 0;
13887         for (unsigned j = 1; j < LaneLen; j++) {
13888           unsigned IdxJ = StoreCount * LaneLen * Factor + j;
13889           if (Mask[IdxJ * Factor + IdxI] >= 0) {
13890             StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
13891             break;
13892           }
13893         }
13894         // Note: If all elements in a chunk are undefs, StartMask=0!
13895         // Note: Filling undef gaps with random elements is ok, since
13896         // those elements were being written anyway (with undefs).
13897         // In the case of all undefs we're defaulting to using elems from 0
13898         // Note: StartMask cannot be negative, it's checked in
13899         // isReInterleaveMask
13900         Ops.push_back(Builder.CreateShuffleVector(
13901             Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0)));
13902       }
13903     }
13904 
13905     Ops.push_back(Builder.getInt32(SI->getAlignment()));
13906     Builder.CreateCall(VstNFunc, Ops);
13907   }
13908   return true;
13909 }
13910 
13911 enum HABaseType {
13912   HA_UNKNOWN = 0,
13913   HA_FLOAT,
13914   HA_DOUBLE,
13915   HA_VECT64,
13916   HA_VECT128
13917 };
13918 
13919 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
13920                                    uint64_t &Members) {
13921   if (auto *ST = dyn_cast<StructType>(Ty)) {
13922     for (unsigned i = 0; i < ST->getNumElements(); ++i) {
13923       uint64_t SubMembers = 0;
13924       if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
13925         return false;
13926       Members += SubMembers;
13927     }
13928   } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
13929     uint64_t SubMembers = 0;
13930     if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
13931       return false;
13932     Members += SubMembers * AT->getNumElements();
13933   } else if (Ty->isFloatTy()) {
13934     if (Base != HA_UNKNOWN && Base != HA_FLOAT)
13935       return false;
13936     Members = 1;
13937     Base = HA_FLOAT;
13938   } else if (Ty->isDoubleTy()) {
13939     if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
13940       return false;
13941     Members = 1;
13942     Base = HA_DOUBLE;
13943   } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
13944     Members = 1;
13945     switch (Base) {
13946     case HA_FLOAT:
13947     case HA_DOUBLE:
13948       return false;
13949     case HA_VECT64:
13950       return VT->getBitWidth() == 64;
13951     case HA_VECT128:
13952       return VT->getBitWidth() == 128;
13953     case HA_UNKNOWN:
13954       switch (VT->getBitWidth()) {
13955       case 64:
13956         Base = HA_VECT64;
13957         return true;
13958       case 128:
13959         Base = HA_VECT128;
13960         return true;
13961       default:
13962         return false;
13963       }
13964     }
13965   }
13966 
13967   return (Members > 0 && Members <= 4);
13968 }
13969 
13970 /// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
13971 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
13972 /// passing according to AAPCS rules.
13973 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
13974     Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
13975   if (getEffectiveCallingConv(CallConv, isVarArg) !=
13976       CallingConv::ARM_AAPCS_VFP)
13977     return false;
13978 
13979   HABaseType Base = HA_UNKNOWN;
13980   uint64_t Members = 0;
13981   bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
13982   DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
13983 
13984   bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
13985   return IsHA || IsIntArray;
13986 }
13987 
13988 unsigned ARMTargetLowering::getExceptionPointerRegister(
13989     const Constant *PersonalityFn) const {
13990   // Platforms which do not use SjLj EH may return values in these registers
13991   // via the personality function.
13992   return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0;
13993 }
13994 
13995 unsigned ARMTargetLowering::getExceptionSelectorRegister(
13996     const Constant *PersonalityFn) const {
13997   // Platforms which do not use SjLj EH may return values in these registers
13998   // via the personality function.
13999   return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1;
14000 }
14001 
14002 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
14003   // Update IsSplitCSR in ARMFunctionInfo.
14004   ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
14005   AFI->setIsSplitCSR(true);
14006 }
14007 
14008 void ARMTargetLowering::insertCopiesSplitCSR(
14009     MachineBasicBlock *Entry,
14010     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
14011   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
14012   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
14013   if (!IStart)
14014     return;
14015 
14016   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
14017   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
14018   MachineBasicBlock::iterator MBBI = Entry->begin();
14019   for (const MCPhysReg *I = IStart; *I; ++I) {
14020     const TargetRegisterClass *RC = nullptr;
14021     if (ARM::GPRRegClass.contains(*I))
14022       RC = &ARM::GPRRegClass;
14023     else if (ARM::DPRRegClass.contains(*I))
14024       RC = &ARM::DPRRegClass;
14025     else
14026       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
14027 
14028     unsigned NewVR = MRI->createVirtualRegister(RC);
14029     // Create copy from CSR to a virtual register.
14030     // FIXME: this currently does not emit CFI pseudo-instructions, it works
14031     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
14032     // nounwind. If we want to generalize this later, we may need to emit
14033     // CFI pseudo-instructions.
14034     assert(Entry->getParent()->getFunction()->hasFnAttribute(
14035                Attribute::NoUnwind) &&
14036            "Function should be nounwind in insertCopiesSplitCSR!");
14037     Entry->addLiveIn(*I);
14038     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
14039         .addReg(*I);
14040 
14041     // Insert the copy-back instructions right before the terminator.
14042     for (auto *Exit : Exits)
14043       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
14044               TII->get(TargetOpcode::COPY), *I)
14045           .addReg(NewVR);
14046   }
14047 }
14048 
14049 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
14050   MF.getFrameInfo().computeMaxCallFrameSize(MF);
14051   TargetLoweringBase::finalizeLowering(MF);
14052 }
14053