1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "ARMISelLowering.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMCallingConv.h"
19 #include "ARMConstantPoolValue.h"
20 #include "ARMMachineFunctionInfo.h"
21 #include "ARMPerfectShuffle.h"
22 #include "ARMRegisterInfo.h"
23 #include "ARMSelectionDAGInfo.h"
24 #include "ARMSubtarget.h"
25 #include "MCTargetDesc/ARMAddressingModes.h"
26 #include "MCTargetDesc/ARMBaseInfo.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/ArrayRef.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/DenseMap.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/ADT/StringRef.h"
38 #include "llvm/ADT/StringSwitch.h"
39 #include "llvm/ADT/Triple.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/CodeGen/CallingConvLower.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/IntrinsicLowering.h"
45 #include "llvm/CodeGen/MachineBasicBlock.h"
46 #include "llvm/CodeGen/MachineConstantPool.h"
47 #include "llvm/CodeGen/MachineFrameInfo.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineInstr.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineJumpTableInfo.h"
52 #include "llvm/CodeGen/MachineMemOperand.h"
53 #include "llvm/CodeGen/MachineOperand.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/MachineValueType.h"
56 #include "llvm/CodeGen/RuntimeLibcalls.h"
57 #include "llvm/CodeGen/SelectionDAG.h"
58 #include "llvm/CodeGen/SelectionDAGNodes.h"
59 #include "llvm/CodeGen/ValueTypes.h"
60 #include "llvm/IR/Attributes.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/Constants.h"
64 #include "llvm/IR/DataLayout.h"
65 #include "llvm/IR/DebugLoc.h"
66 #include "llvm/IR/DerivedTypes.h"
67 #include "llvm/IR/Function.h"
68 #include "llvm/IR/GlobalAlias.h"
69 #include "llvm/IR/GlobalValue.h"
70 #include "llvm/IR/GlobalVariable.h"
71 #include "llvm/IR/IRBuilder.h"
72 #include "llvm/IR/InlineAsm.h"
73 #include "llvm/IR/Instruction.h"
74 #include "llvm/IR/Instructions.h"
75 #include "llvm/IR/IntrinsicInst.h"
76 #include "llvm/IR/Intrinsics.h"
77 #include "llvm/IR/Module.h"
78 #include "llvm/IR/Type.h"
79 #include "llvm/IR/User.h"
80 #include "llvm/IR/Value.h"
81 #include "llvm/MC/MCInstrDesc.h"
82 #include "llvm/MC/MCInstrItineraries.h"
83 #include "llvm/MC/MCRegisterInfo.h"
84 #include "llvm/MC/MCSchedule.h"
85 #include "llvm/Support/AtomicOrdering.h"
86 #include "llvm/Support/BranchProbability.h"
87 #include "llvm/Support/Casting.h"
88 #include "llvm/Support/CodeGen.h"
89 #include "llvm/Support/CommandLine.h"
90 #include "llvm/Support/Compiler.h"
91 #include "llvm/Support/Debug.h"
92 #include "llvm/Support/ErrorHandling.h"
93 #include "llvm/Support/KnownBits.h"
94 #include "llvm/Support/MathExtras.h"
95 #include "llvm/Support/raw_ostream.h"
96 #include "llvm/Target/TargetInstrInfo.h"
97 #include "llvm/Target/TargetMachine.h"
98 #include "llvm/Target/TargetOptions.h"
99 #include <algorithm>
100 #include <cassert>
101 #include <cstdint>
102 #include <cstdlib>
103 #include <iterator>
104 #include <limits>
105 #include <string>
106 #include <tuple>
107 #include <utility>
108 #include <vector>
109 
110 using namespace llvm;
111 
112 #define DEBUG_TYPE "arm-isel"
113 
114 STATISTIC(NumTailCalls, "Number of tail calls");
115 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
116 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
117 STATISTIC(NumConstpoolPromoted,
118   "Number of constants with their storage promoted into constant pools");
119 
120 static cl::opt<bool>
121 ARMInterworking("arm-interworking", cl::Hidden,
122   cl::desc("Enable / disable ARM interworking (for debugging only)"),
123   cl::init(true));
124 
125 static cl::opt<bool> EnableConstpoolPromotion(
126     "arm-promote-constant", cl::Hidden,
127     cl::desc("Enable / disable promotion of unnamed_addr constants into "
128              "constant pools"),
129     cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
130 static cl::opt<unsigned> ConstpoolPromotionMaxSize(
131     "arm-promote-constant-max-size", cl::Hidden,
132     cl::desc("Maximum size of constant to promote into a constant pool"),
133     cl::init(64));
134 static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
135     "arm-promote-constant-max-total", cl::Hidden,
136     cl::desc("Maximum size of ALL constants to promote into a constant pool"),
137     cl::init(128));
138 
139 // The APCS parameter registers.
140 static const MCPhysReg GPRArgRegs[] = {
141   ARM::R0, ARM::R1, ARM::R2, ARM::R3
142 };
143 
144 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
145                                        MVT PromotedBitwiseVT) {
146   if (VT != PromotedLdStVT) {
147     setOperationAction(ISD::LOAD, VT, Promote);
148     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
149 
150     setOperationAction(ISD::STORE, VT, Promote);
151     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
152   }
153 
154   MVT ElemTy = VT.getVectorElementType();
155   if (ElemTy != MVT::f64)
156     setOperationAction(ISD::SETCC, VT, Custom);
157   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
158   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
159   if (ElemTy == MVT::i32) {
160     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
161     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
162     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
163     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
164   } else {
165     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
166     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
167     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
168     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
169   }
170   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
171   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
172   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
173   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
174   setOperationAction(ISD::SELECT,            VT, Expand);
175   setOperationAction(ISD::SELECT_CC,         VT, Expand);
176   setOperationAction(ISD::VSELECT,           VT, Expand);
177   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
178   if (VT.isInteger()) {
179     setOperationAction(ISD::SHL, VT, Custom);
180     setOperationAction(ISD::SRA, VT, Custom);
181     setOperationAction(ISD::SRL, VT, Custom);
182   }
183 
184   // Promote all bit-wise operations.
185   if (VT.isInteger() && VT != PromotedBitwiseVT) {
186     setOperationAction(ISD::AND, VT, Promote);
187     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
188     setOperationAction(ISD::OR,  VT, Promote);
189     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
190     setOperationAction(ISD::XOR, VT, Promote);
191     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
192   }
193 
194   // Neon does not support vector divide/remainder operations.
195   setOperationAction(ISD::SDIV, VT, Expand);
196   setOperationAction(ISD::UDIV, VT, Expand);
197   setOperationAction(ISD::FDIV, VT, Expand);
198   setOperationAction(ISD::SREM, VT, Expand);
199   setOperationAction(ISD::UREM, VT, Expand);
200   setOperationAction(ISD::FREM, VT, Expand);
201 
202   if (!VT.isFloatingPoint() &&
203       VT != MVT::v2i64 && VT != MVT::v1i64)
204     for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
205       setOperationAction(Opcode, VT, Legal);
206 }
207 
208 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
209   addRegisterClass(VT, &ARM::DPRRegClass);
210   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
211 }
212 
213 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
214   addRegisterClass(VT, &ARM::DPairRegClass);
215   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
216 }
217 
218 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
219                                      const ARMSubtarget &STI)
220     : TargetLowering(TM), Subtarget(&STI) {
221   RegInfo = Subtarget->getRegisterInfo();
222   Itins = Subtarget->getInstrItineraryData();
223 
224   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
225 
226   if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() &&
227       !Subtarget->isTargetWatchOS()) {
228     const auto &E = Subtarget->getTargetTriple().getEnvironment();
229 
230     bool IsHFTarget = E == Triple::EABIHF || E == Triple::GNUEABIHF ||
231                       E == Triple::MuslEABIHF;
232     // Windows is a special case.  Technically, we will replace all of the "GNU"
233     // calls with calls to MSVCRT if appropriate and adjust the calling
234     // convention then.
235     IsHFTarget = IsHFTarget || Subtarget->isTargetWindows();
236 
237     for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
238       setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID),
239                             IsHFTarget ? CallingConv::ARM_AAPCS_VFP
240                                        : CallingConv::ARM_AAPCS);
241   }
242 
243   if (Subtarget->isTargetMachO()) {
244     // Uses VFP for Thumb libfuncs if available.
245     if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
246         Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
247       static const struct {
248         const RTLIB::Libcall Op;
249         const char * const Name;
250         const ISD::CondCode Cond;
251       } LibraryCalls[] = {
252         // Single-precision floating-point arithmetic.
253         { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
254         { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
255         { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
256         { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },
257 
258         // Double-precision floating-point arithmetic.
259         { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
260         { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
261         { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
262         { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },
263 
264         // Single-precision comparisons.
265         { RTLIB::OEQ_F32, "__eqsf2vfp",    ISD::SETNE },
266         { RTLIB::UNE_F32, "__nesf2vfp",    ISD::SETNE },
267         { RTLIB::OLT_F32, "__ltsf2vfp",    ISD::SETNE },
268         { RTLIB::OLE_F32, "__lesf2vfp",    ISD::SETNE },
269         { RTLIB::OGE_F32, "__gesf2vfp",    ISD::SETNE },
270         { RTLIB::OGT_F32, "__gtsf2vfp",    ISD::SETNE },
271         { RTLIB::UO_F32,  "__unordsf2vfp", ISD::SETNE },
272         { RTLIB::O_F32,   "__unordsf2vfp", ISD::SETEQ },
273 
274         // Double-precision comparisons.
275         { RTLIB::OEQ_F64, "__eqdf2vfp",    ISD::SETNE },
276         { RTLIB::UNE_F64, "__nedf2vfp",    ISD::SETNE },
277         { RTLIB::OLT_F64, "__ltdf2vfp",    ISD::SETNE },
278         { RTLIB::OLE_F64, "__ledf2vfp",    ISD::SETNE },
279         { RTLIB::OGE_F64, "__gedf2vfp",    ISD::SETNE },
280         { RTLIB::OGT_F64, "__gtdf2vfp",    ISD::SETNE },
281         { RTLIB::UO_F64,  "__unorddf2vfp", ISD::SETNE },
282         { RTLIB::O_F64,   "__unorddf2vfp", ISD::SETEQ },
283 
284         // Floating-point to integer conversions.
285         // i64 conversions are done via library routines even when generating VFP
286         // instructions, so use the same ones.
287         { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp",    ISD::SETCC_INVALID },
288         { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
289         { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp",    ISD::SETCC_INVALID },
290         { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },
291 
292         // Conversions between floating types.
293         { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp",  ISD::SETCC_INVALID },
294         { RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp", ISD::SETCC_INVALID },
295 
296         // Integer to floating-point conversions.
297         // i64 conversions are done via library routines even when generating VFP
298         // instructions, so use the same ones.
299         // FIXME: There appears to be some naming inconsistency in ARM libgcc:
300         // e.g., __floatunsidf vs. __floatunssidfvfp.
301         { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp",    ISD::SETCC_INVALID },
302         { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
303         { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp",    ISD::SETCC_INVALID },
304         { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
305       };
306 
307       for (const auto &LC : LibraryCalls) {
308         setLibcallName(LC.Op, LC.Name);
309         if (LC.Cond != ISD::SETCC_INVALID)
310           setCmpLibcallCC(LC.Op, LC.Cond);
311       }
312     }
313 
314     // Set the correct calling convention for ARMv7k WatchOS. It's just
315     // AAPCS_VFP for functions as simple as libcalls.
316     if (Subtarget->isTargetWatchABI()) {
317       for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i)
318         setLibcallCallingConv((RTLIB::Libcall)i, CallingConv::ARM_AAPCS_VFP);
319     }
320   }
321 
322   // These libcalls are not available in 32-bit.
323   setLibcallName(RTLIB::SHL_I128, nullptr);
324   setLibcallName(RTLIB::SRL_I128, nullptr);
325   setLibcallName(RTLIB::SRA_I128, nullptr);
326 
327   // RTLIB
328   if (Subtarget->isAAPCS_ABI() &&
329       (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
330        Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
331     static const struct {
332       const RTLIB::Libcall Op;
333       const char * const Name;
334       const CallingConv::ID CC;
335       const ISD::CondCode Cond;
336     } LibraryCalls[] = {
337       // Double-precision floating-point arithmetic helper functions
338       // RTABI chapter 4.1.2, Table 2
339       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
340       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
341       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
342       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
343 
344       // Double-precision floating-point comparison helper functions
345       // RTABI chapter 4.1.2, Table 3
346       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
347       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
348       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
349       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
350       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
351       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
352       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
353       { RTLIB::O_F64,   "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
354 
355       // Single-precision floating-point arithmetic helper functions
356       // RTABI chapter 4.1.2, Table 4
357       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
358       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
359       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
360       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
361 
362       // Single-precision floating-point comparison helper functions
363       // RTABI chapter 4.1.2, Table 5
364       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
365       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
366       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
367       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
368       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
369       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
370       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
371       { RTLIB::O_F32,   "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
372 
373       // Floating-point to integer conversions.
374       // RTABI chapter 4.1.2, Table 6
375       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
376       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
377       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
378       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
379       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
380       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
381       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
382       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
383 
384       // Conversions between floating types.
385       // RTABI chapter 4.1.2, Table 7
386       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
387       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
388       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
389 
390       // Integer to floating-point conversions.
391       // RTABI chapter 4.1.2, Table 8
392       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
393       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
394       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
395       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
396       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
397       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
398       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
399       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
400 
401       // Long long helper functions
402       // RTABI chapter 4.2, Table 9
403       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
404       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
405       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
406       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
407 
408       // Integer division functions
409       // RTABI chapter 4.3.1
410       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
411       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
412       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
413       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
414       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
415       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
416       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
417       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
418     };
419 
420     for (const auto &LC : LibraryCalls) {
421       setLibcallName(LC.Op, LC.Name);
422       setLibcallCallingConv(LC.Op, LC.CC);
423       if (LC.Cond != ISD::SETCC_INVALID)
424         setCmpLibcallCC(LC.Op, LC.Cond);
425     }
426 
427     // EABI dependent RTLIB
428     if (TM.Options.EABIVersion == EABI::EABI4 ||
429         TM.Options.EABIVersion == EABI::EABI5) {
430       static const struct {
431         const RTLIB::Libcall Op;
432         const char *const Name;
433         const CallingConv::ID CC;
434         const ISD::CondCode Cond;
435       } MemOpsLibraryCalls[] = {
436         // Memory operations
437         // RTABI chapter 4.3.4
438         { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
439         { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
440         { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
441       };
442 
443       for (const auto &LC : MemOpsLibraryCalls) {
444         setLibcallName(LC.Op, LC.Name);
445         setLibcallCallingConv(LC.Op, LC.CC);
446         if (LC.Cond != ISD::SETCC_INVALID)
447           setCmpLibcallCC(LC.Op, LC.Cond);
448       }
449     }
450   }
451 
452   if (Subtarget->isTargetWindows()) {
453     static const struct {
454       const RTLIB::Libcall Op;
455       const char * const Name;
456       const CallingConv::ID CC;
457     } LibraryCalls[] = {
458       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
459       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
460       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
461       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
462       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
463       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
464       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
465       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
466     };
467 
468     for (const auto &LC : LibraryCalls) {
469       setLibcallName(LC.Op, LC.Name);
470       setLibcallCallingConv(LC.Op, LC.CC);
471     }
472   }
473 
474   // Use divmod compiler-rt calls for iOS 5.0 and later.
475   if (Subtarget->isTargetMachO() &&
476       !(Subtarget->isTargetIOS() &&
477         Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
478     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
479     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
480   }
481 
482   // The half <-> float conversion functions are always soft-float on
483   // non-watchos platforms, but are needed for some targets which use a
484   // hard-float calling convention by default.
485   if (!Subtarget->isTargetWatchABI()) {
486     if (Subtarget->isAAPCS_ABI()) {
487       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
488       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
489       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
490     } else {
491       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
492       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
493       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
494     }
495   }
496 
497   // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
498   // a __gnu_ prefix (which is the default).
499   if (Subtarget->isTargetAEABI()) {
500     static const struct {
501       const RTLIB::Libcall Op;
502       const char * const Name;
503       const CallingConv::ID CC;
504     } LibraryCalls[] = {
505       { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS },
506       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS },
507       { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS },
508     };
509 
510     for (const auto &LC : LibraryCalls) {
511       setLibcallName(LC.Op, LC.Name);
512       setLibcallCallingConv(LC.Op, LC.CC);
513     }
514   }
515 
516   if (Subtarget->isThumb1Only())
517     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
518   else
519     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
520 
521   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
522       !Subtarget->isThumb1Only()) {
523     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
524     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
525   }
526 
527   for (MVT VT : MVT::vector_valuetypes()) {
528     for (MVT InnerVT : MVT::vector_valuetypes()) {
529       setTruncStoreAction(VT, InnerVT, Expand);
530       setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
531       setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
532       setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
533     }
534 
535     setOperationAction(ISD::MULHS, VT, Expand);
536     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
537     setOperationAction(ISD::MULHU, VT, Expand);
538     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
539 
540     setOperationAction(ISD::BSWAP, VT, Expand);
541   }
542 
543   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
544   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
545 
546   setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
547   setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
548 
549   if (Subtarget->hasNEON()) {
550     addDRTypeForNEON(MVT::v2f32);
551     addDRTypeForNEON(MVT::v8i8);
552     addDRTypeForNEON(MVT::v4i16);
553     addDRTypeForNEON(MVT::v2i32);
554     addDRTypeForNEON(MVT::v1i64);
555 
556     addQRTypeForNEON(MVT::v4f32);
557     addQRTypeForNEON(MVT::v2f64);
558     addQRTypeForNEON(MVT::v16i8);
559     addQRTypeForNEON(MVT::v8i16);
560     addQRTypeForNEON(MVT::v4i32);
561     addQRTypeForNEON(MVT::v2i64);
562 
563     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
564     // neither Neon nor VFP support any arithmetic operations on it.
565     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
566     // supported for v4f32.
567     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
568     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
569     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
570     // FIXME: Code duplication: FDIV and FREM are expanded always, see
571     // ARMTargetLowering::addTypeForNEON method for details.
572     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
573     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
574     // FIXME: Create unittest.
575     // In another words, find a way when "copysign" appears in DAG with vector
576     // operands.
577     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
578     // FIXME: Code duplication: SETCC has custom operation action, see
579     // ARMTargetLowering::addTypeForNEON method for details.
580     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
581     // FIXME: Create unittest for FNEG and for FABS.
582     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
583     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
584     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
585     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
586     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
587     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
588     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
589     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
590     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
591     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
592     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
593     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
594     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
595     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
596     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
597     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
598     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
599     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
600 
601     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
602     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
603     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
604     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
605     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
606     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
607     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
608     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
609     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
610     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
611     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
612     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
613     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
614     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
615 
616     // Mark v2f32 intrinsics.
617     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
618     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
619     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
620     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
621     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
622     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
623     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
624     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
625     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
626     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
627     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
628     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
629     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
630     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
631 
632     // Neon does not support some operations on v1i64 and v2i64 types.
633     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
634     // Custom handling for some quad-vector types to detect VMULL.
635     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
636     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
637     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
638     // Custom handling for some vector types to avoid expensive expansions
639     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
640     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
641     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
642     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
643     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
644     // a destination type that is wider than the source, and nor does
645     // it have a FP_TO_[SU]INT instruction with a narrower destination than
646     // source.
647     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
648     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
649     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
650     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
651 
652     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
653     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
654 
655     // NEON does not have single instruction CTPOP for vectors with element
656     // types wider than 8-bits.  However, custom lowering can leverage the
657     // v8i8/v16i8 vcnt instruction.
658     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
659     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
660     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
661     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
662     setOperationAction(ISD::CTPOP,      MVT::v1i64, Expand);
663     setOperationAction(ISD::CTPOP,      MVT::v2i64, Expand);
664 
665     setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
666     setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);
667 
668     // NEON does not have single instruction CTTZ for vectors.
669     setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
670     setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
671     setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
672     setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
673 
674     setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
675     setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
676     setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
677     setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);
678 
679     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
680     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
681     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
682     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);
683 
684     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
685     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
686     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
687     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
688 
689     // NEON only has FMA instructions as of VFP4.
690     if (!Subtarget->hasVFP4()) {
691       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
692       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
693     }
694 
695     setTargetDAGCombine(ISD::INTRINSIC_VOID);
696     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
697     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
698     setTargetDAGCombine(ISD::SHL);
699     setTargetDAGCombine(ISD::SRL);
700     setTargetDAGCombine(ISD::SRA);
701     setTargetDAGCombine(ISD::SIGN_EXTEND);
702     setTargetDAGCombine(ISD::ZERO_EXTEND);
703     setTargetDAGCombine(ISD::ANY_EXTEND);
704     setTargetDAGCombine(ISD::BUILD_VECTOR);
705     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
706     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
707     setTargetDAGCombine(ISD::STORE);
708     setTargetDAGCombine(ISD::FP_TO_SINT);
709     setTargetDAGCombine(ISD::FP_TO_UINT);
710     setTargetDAGCombine(ISD::FDIV);
711     setTargetDAGCombine(ISD::LOAD);
712 
713     // It is legal to extload from v4i8 to v4i16 or v4i32.
714     for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
715                    MVT::v2i32}) {
716       for (MVT VT : MVT::integer_vector_valuetypes()) {
717         setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
718         setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
719         setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
720       }
721     }
722   }
723 
724   if (Subtarget->isFPOnlySP()) {
725     // When targeting a floating-point unit with only single-precision
726     // operations, f64 is legal for the few double-precision instructions which
727     // are present However, no double-precision operations other than moves,
728     // loads and stores are provided by the hardware.
729     setOperationAction(ISD::FADD,       MVT::f64, Expand);
730     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
731     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
732     setOperationAction(ISD::FMA,        MVT::f64, Expand);
733     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
734     setOperationAction(ISD::FREM,       MVT::f64, Expand);
735     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
736     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
737     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
738     setOperationAction(ISD::FABS,       MVT::f64, Expand);
739     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
740     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
741     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
742     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
743     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
744     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
745     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
746     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
747     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
748     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
749     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
750     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
751     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
752     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
753     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
754     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
755     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
756     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
757     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
758     setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
759     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
760     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
761   }
762 
763   computeRegisterProperties(Subtarget->getRegisterInfo());
764 
765   // ARM does not have floating-point extending loads.
766   for (MVT VT : MVT::fp_valuetypes()) {
767     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
768     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
769   }
770 
771   // ... or truncating stores
772   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
773   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
774   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
775 
776   // ARM does not have i1 sign extending load.
777   for (MVT VT : MVT::integer_valuetypes())
778     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
779 
780   // ARM supports all 4 flavors of integer indexed load / store.
781   if (!Subtarget->isThumb1Only()) {
782     for (unsigned im = (unsigned)ISD::PRE_INC;
783          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
784       setIndexedLoadAction(im,  MVT::i1,  Legal);
785       setIndexedLoadAction(im,  MVT::i8,  Legal);
786       setIndexedLoadAction(im,  MVT::i16, Legal);
787       setIndexedLoadAction(im,  MVT::i32, Legal);
788       setIndexedStoreAction(im, MVT::i1,  Legal);
789       setIndexedStoreAction(im, MVT::i8,  Legal);
790       setIndexedStoreAction(im, MVT::i16, Legal);
791       setIndexedStoreAction(im, MVT::i32, Legal);
792     }
793   } else {
794     // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
795     setIndexedLoadAction(ISD::POST_INC, MVT::i32,  Legal);
796     setIndexedStoreAction(ISD::POST_INC, MVT::i32,  Legal);
797   }
798 
799   setOperationAction(ISD::SADDO, MVT::i32, Custom);
800   setOperationAction(ISD::UADDO, MVT::i32, Custom);
801   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
802   setOperationAction(ISD::USUBO, MVT::i32, Custom);
803 
804   // i64 operation support.
805   setOperationAction(ISD::MUL,     MVT::i64, Expand);
806   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
807   if (Subtarget->isThumb1Only()) {
808     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
809     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
810   }
811   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
812       || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
813     setOperationAction(ISD::MULHS, MVT::i32, Expand);
814 
815   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
816   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
817   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
818   setOperationAction(ISD::SRL,       MVT::i64, Custom);
819   setOperationAction(ISD::SRA,       MVT::i64, Custom);
820   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
821 
822   setOperationAction(ISD::ADDC,      MVT::i32, Custom);
823   setOperationAction(ISD::ADDE,      MVT::i32, Custom);
824   setOperationAction(ISD::SUBC,      MVT::i32, Custom);
825   setOperationAction(ISD::SUBE,      MVT::i32, Custom);
826 
827   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
828     setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
829 
830   // ARM does not have ROTL.
831   setOperationAction(ISD::ROTL, MVT::i32, Expand);
832   for (MVT VT : MVT::vector_valuetypes()) {
833     setOperationAction(ISD::ROTL, VT, Expand);
834     setOperationAction(ISD::ROTR, VT, Expand);
835   }
836   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
837   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
838   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
839     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
840 
841   // @llvm.readcyclecounter requires the Performance Monitors extension.
842   // Default to the 0 expansion on unsupported platforms.
843   // FIXME: Technically there are older ARM CPUs that have
844   // implementation-specific ways of obtaining this information.
845   if (Subtarget->hasPerfMon())
846     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
847 
848   // Only ARMv6 has BSWAP.
849   if (!Subtarget->hasV6Ops())
850     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
851 
852   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
853                                         : Subtarget->hasDivideInARMMode();
854   if (!hasDivide) {
855     // These are expanded into libcalls if the cpu doesn't have HW divider.
856     setOperationAction(ISD::SDIV,  MVT::i32, LibCall);
857     setOperationAction(ISD::UDIV,  MVT::i32, LibCall);
858   }
859 
860   if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
861     setOperationAction(ISD::SDIV, MVT::i32, Custom);
862     setOperationAction(ISD::UDIV, MVT::i32, Custom);
863 
864     setOperationAction(ISD::SDIV, MVT::i64, Custom);
865     setOperationAction(ISD::UDIV, MVT::i64, Custom);
866   }
867 
868   setOperationAction(ISD::SREM,  MVT::i32, Expand);
869   setOperationAction(ISD::UREM,  MVT::i32, Expand);
870 
871   // Register based DivRem for AEABI (RTABI 4.2)
872   if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
873       Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
874       Subtarget->isTargetWindows()) {
875     setOperationAction(ISD::SREM, MVT::i64, Custom);
876     setOperationAction(ISD::UREM, MVT::i64, Custom);
877     HasStandaloneRem = false;
878 
879     if (Subtarget->isTargetWindows()) {
880       const struct {
881         const RTLIB::Libcall Op;
882         const char * const Name;
883         const CallingConv::ID CC;
884       } LibraryCalls[] = {
885         { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS },
886         { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS },
887         { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS },
888         { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS },
889 
890         { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS },
891         { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS },
892         { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS },
893         { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS },
894       };
895 
896       for (const auto &LC : LibraryCalls) {
897         setLibcallName(LC.Op, LC.Name);
898         setLibcallCallingConv(LC.Op, LC.CC);
899       }
900     } else {
901       const struct {
902         const RTLIB::Libcall Op;
903         const char * const Name;
904         const CallingConv::ID CC;
905       } LibraryCalls[] = {
906         { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
907         { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
908         { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
909         { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS },
910 
911         { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
912         { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
913         { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
914         { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS },
915       };
916 
917       for (const auto &LC : LibraryCalls) {
918         setLibcallName(LC.Op, LC.Name);
919         setLibcallCallingConv(LC.Op, LC.CC);
920       }
921     }
922 
923     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
924     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
925     setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
926     setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
927   } else {
928     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
929     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
930   }
931 
932   if (Subtarget->isTargetWindows() && Subtarget->getTargetTriple().isOSMSVCRT())
933     for (auto &VT : {MVT::f32, MVT::f64})
934       setOperationAction(ISD::FPOWI, VT, Custom);
935 
936   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
937   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
938   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
939   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
940 
941   setOperationAction(ISD::TRAP, MVT::Other, Legal);
942 
943   // Use the default implementation.
944   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
945   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
946   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
947   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
948   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
949   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
950 
951   if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
952     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
953   else
954     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
955 
956   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
957   // the default expansion.
958   InsertFencesForAtomic = false;
959   if (Subtarget->hasAnyDataBarrier() &&
960       (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
961     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
962     // to ldrex/strex loops already.
963     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
964     if (!Subtarget->isThumb() || !Subtarget->isMClass())
965       setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Custom);
966 
967     // On v8, we have particularly efficient implementations of atomic fences
968     // if they can be combined with nearby atomic loads and stores.
969     if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) {
970       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
971       InsertFencesForAtomic = true;
972     }
973   } else {
974     // If there's anything we can use as a barrier, go through custom lowering
975     // for ATOMIC_FENCE.
976     // If target has DMB in thumb, Fences can be inserted.
977     if (Subtarget->hasDataBarrier())
978       InsertFencesForAtomic = true;
979 
980     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
981                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
982 
983     // Set them all for expansion, which will force libcalls.
984     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
985     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
986     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
987     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
988     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
989     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
990     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
991     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
992     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
993     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
994     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
995     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
996     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
997     // Unordered/Monotonic case.
998     if (!InsertFencesForAtomic) {
999       setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1000       setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1001     }
1002   }
1003 
1004   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
1005 
1006   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1007   if (!Subtarget->hasV6Ops()) {
1008     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1009     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
1010   }
1011   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1012 
1013   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
1014       !Subtarget->isThumb1Only()) {
1015     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1016     // iff target supports vfp2.
1017     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1018     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
1019   }
1020 
1021   // We want to custom lower some of our intrinsics.
1022   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1023   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1024   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1025   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
1026   if (Subtarget->useSjLjEH())
1027     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
1028 
1029   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
1030   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
1031   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
1032   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
1033   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
1034   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
1035   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1036   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1037   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1038 
1039   // Thumb-1 cannot currently select ARMISD::SUBE.
1040   if (!Subtarget->isThumb1Only())
1041     setOperationAction(ISD::SETCCE, MVT::i32, Custom);
1042 
1043   setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
1044   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
1045   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
1046   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
1047   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
1048 
1049   // We don't support sin/cos/fmod/copysign/pow
1050   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
1051   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
1052   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
1053   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
1054   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
1055   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
1056   setOperationAction(ISD::FREM,      MVT::f64, Expand);
1057   setOperationAction(ISD::FREM,      MVT::f32, Expand);
1058   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
1059       !Subtarget->isThumb1Only()) {
1060     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
1061     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
1062   }
1063   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
1064   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
1065 
1066   if (!Subtarget->hasVFP4()) {
1067     setOperationAction(ISD::FMA, MVT::f64, Expand);
1068     setOperationAction(ISD::FMA, MVT::f32, Expand);
1069   }
1070 
1071   // Various VFP goodness
1072   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
1073     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1074     if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) {
1075       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1076       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1077     }
1078 
1079     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1080     if (!Subtarget->hasFP16()) {
1081       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1082       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1083     }
1084   }
1085 
1086   // Combine sin / cos into one node or libcall if possible.
1087   if (Subtarget->hasSinCos()) {
1088     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1089     setLibcallName(RTLIB::SINCOS_F64, "sincos");
1090     if (Subtarget->isTargetWatchABI()) {
1091       setLibcallCallingConv(RTLIB::SINCOS_F32, CallingConv::ARM_AAPCS_VFP);
1092       setLibcallCallingConv(RTLIB::SINCOS_F64, CallingConv::ARM_AAPCS_VFP);
1093     }
1094     if (Subtarget->isTargetIOS() || Subtarget->isTargetWatchOS()) {
1095       // For iOS, we don't want to the normal expansion of a libcall to
1096       // sincos. We want to issue a libcall to __sincos_stret.
1097       setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1098       setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1099     }
1100   }
1101 
1102   // FP-ARMv8 implements a lot of rounding-like FP operations.
1103   if (Subtarget->hasFPARMv8()) {
1104     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1105     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1106     setOperationAction(ISD::FROUND, MVT::f32, Legal);
1107     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1108     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1109     setOperationAction(ISD::FRINT, MVT::f32, Legal);
1110     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1111     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1112     setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
1113     setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
1114     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1115     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1116 
1117     if (!Subtarget->isFPOnlySP()) {
1118       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1119       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1120       setOperationAction(ISD::FROUND, MVT::f64, Legal);
1121       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1122       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1123       setOperationAction(ISD::FRINT, MVT::f64, Legal);
1124       setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1125       setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1126     }
1127   }
1128 
1129   if (Subtarget->hasNEON()) {
1130     // vmin and vmax aren't available in a scalar form, so we use
1131     // a NEON instruction with an undef lane instead.
1132     setOperationAction(ISD::FMINNAN, MVT::f32, Legal);
1133     setOperationAction(ISD::FMAXNAN, MVT::f32, Legal);
1134     setOperationAction(ISD::FMINNAN, MVT::v2f32, Legal);
1135     setOperationAction(ISD::FMAXNAN, MVT::v2f32, Legal);
1136     setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal);
1137     setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal);
1138   }
1139 
1140   // We have target-specific dag combine patterns for the following nodes:
1141   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
1142   setTargetDAGCombine(ISD::ADD);
1143   setTargetDAGCombine(ISD::SUB);
1144   setTargetDAGCombine(ISD::MUL);
1145   setTargetDAGCombine(ISD::AND);
1146   setTargetDAGCombine(ISD::OR);
1147   setTargetDAGCombine(ISD::XOR);
1148 
1149   if (Subtarget->hasV6Ops())
1150     setTargetDAGCombine(ISD::SRL);
1151 
1152   setStackPointerRegisterToSaveRestore(ARM::SP);
1153 
1154   if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
1155       !Subtarget->hasVFP2())
1156     setSchedulingPreference(Sched::RegPressure);
1157   else
1158     setSchedulingPreference(Sched::Hybrid);
1159 
1160   //// temporary - rewrite interface to use type
1161   MaxStoresPerMemset = 8;
1162   MaxStoresPerMemsetOptSize = 4;
1163   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
1164   MaxStoresPerMemcpyOptSize = 2;
1165   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
1166   MaxStoresPerMemmoveOptSize = 2;
1167 
1168   // On ARM arguments smaller than 4 bytes are extended, so all arguments
1169   // are at least 4 bytes aligned.
1170   setMinStackArgumentAlignment(4);
1171 
1172   // Prefer likely predicted branches to selects on out-of-order cores.
1173   PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
1174 
1175   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
1176 }
1177 
1178 bool ARMTargetLowering::useSoftFloat() const {
1179   return Subtarget->useSoftFloat();
1180 }
1181 
1182 // FIXME: It might make sense to define the representative register class as the
1183 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1184 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1185 // SPR's representative would be DPR_VFP2. This should work well if register
1186 // pressure tracking were modified such that a register use would increment the
1187 // pressure of the register class's representative and all of it's super
1188 // classes' representatives transitively. We have not implemented this because
1189 // of the difficulty prior to coalescing of modeling operand register classes
1190 // due to the common occurrence of cross class copies and subregister insertions
1191 // and extractions.
1192 std::pair<const TargetRegisterClass *, uint8_t>
1193 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1194                                            MVT VT) const {
1195   const TargetRegisterClass *RRC = nullptr;
1196   uint8_t Cost = 1;
1197   switch (VT.SimpleTy) {
1198   default:
1199     return TargetLowering::findRepresentativeClass(TRI, VT);
1200   // Use DPR as representative register class for all floating point
1201   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1202   // the cost is 1 for both f32 and f64.
1203   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
1204   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
1205     RRC = &ARM::DPRRegClass;
1206     // When NEON is used for SP, only half of the register file is available
1207     // because operations that define both SP and DP results will be constrained
1208     // to the VFP2 class (D0-D15). We currently model this constraint prior to
1209     // coalescing by double-counting the SP regs. See the FIXME above.
1210     if (Subtarget->useNEONForSinglePrecisionFP())
1211       Cost = 2;
1212     break;
1213   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1214   case MVT::v4f32: case MVT::v2f64:
1215     RRC = &ARM::DPRRegClass;
1216     Cost = 2;
1217     break;
1218   case MVT::v4i64:
1219     RRC = &ARM::DPRRegClass;
1220     Cost = 4;
1221     break;
1222   case MVT::v8i64:
1223     RRC = &ARM::DPRRegClass;
1224     Cost = 8;
1225     break;
1226   }
1227   return std::make_pair(RRC, Cost);
1228 }
1229 
1230 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
1231   switch ((ARMISD::NodeType)Opcode) {
1232   case ARMISD::FIRST_NUMBER:  break;
1233   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
1234   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
1235   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
1236   case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
1237   case ARMISD::CALL:          return "ARMISD::CALL";
1238   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
1239   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
1240   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
1241   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
1242   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
1243   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
1244   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
1245   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
1246   case ARMISD::CMP:           return "ARMISD::CMP";
1247   case ARMISD::CMN:           return "ARMISD::CMN";
1248   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
1249   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
1250   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
1251   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
1252   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
1253 
1254   case ARMISD::CMOV:          return "ARMISD::CMOV";
1255 
1256   case ARMISD::SSAT:          return "ARMISD::SSAT";
1257 
1258   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
1259   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
1260   case ARMISD::RRX:           return "ARMISD::RRX";
1261 
1262   case ARMISD::ADDC:          return "ARMISD::ADDC";
1263   case ARMISD::ADDE:          return "ARMISD::ADDE";
1264   case ARMISD::SUBC:          return "ARMISD::SUBC";
1265   case ARMISD::SUBE:          return "ARMISD::SUBE";
1266 
1267   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
1268   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
1269 
1270   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
1271   case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
1272   case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
1273 
1274   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
1275 
1276   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
1277 
1278   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
1279 
1280   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
1281 
1282   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
1283 
1284   case ARMISD::WIN__CHKSTK:   return "ARMISD::WIN__CHKSTK";
1285   case ARMISD::WIN__DBZCHK:   return "ARMISD::WIN__DBZCHK";
1286 
1287   case ARMISD::VCEQ:          return "ARMISD::VCEQ";
1288   case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
1289   case ARMISD::VCGE:          return "ARMISD::VCGE";
1290   case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
1291   case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
1292   case ARMISD::VCGEU:         return "ARMISD::VCGEU";
1293   case ARMISD::VCGT:          return "ARMISD::VCGT";
1294   case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
1295   case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
1296   case ARMISD::VCGTU:         return "ARMISD::VCGTU";
1297   case ARMISD::VTST:          return "ARMISD::VTST";
1298 
1299   case ARMISD::VSHL:          return "ARMISD::VSHL";
1300   case ARMISD::VSHRs:         return "ARMISD::VSHRs";
1301   case ARMISD::VSHRu:         return "ARMISD::VSHRu";
1302   case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
1303   case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
1304   case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
1305   case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
1306   case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
1307   case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
1308   case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
1309   case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
1310   case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
1311   case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
1312   case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
1313   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
1314   case ARMISD::VSLI:          return "ARMISD::VSLI";
1315   case ARMISD::VSRI:          return "ARMISD::VSRI";
1316   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
1317   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
1318   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
1319   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
1320   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
1321   case ARMISD::VDUP:          return "ARMISD::VDUP";
1322   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
1323   case ARMISD::VEXT:          return "ARMISD::VEXT";
1324   case ARMISD::VREV64:        return "ARMISD::VREV64";
1325   case ARMISD::VREV32:        return "ARMISD::VREV32";
1326   case ARMISD::VREV16:        return "ARMISD::VREV16";
1327   case ARMISD::VZIP:          return "ARMISD::VZIP";
1328   case ARMISD::VUZP:          return "ARMISD::VUZP";
1329   case ARMISD::VTRN:          return "ARMISD::VTRN";
1330   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
1331   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
1332   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
1333   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
1334   case ARMISD::UMAAL:         return "ARMISD::UMAAL";
1335   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
1336   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
1337   case ARMISD::SMLALBB:       return "ARMISD::SMLALBB";
1338   case ARMISD::SMLALBT:       return "ARMISD::SMLALBT";
1339   case ARMISD::SMLALTB:       return "ARMISD::SMLALTB";
1340   case ARMISD::SMLALTT:       return "ARMISD::SMLALTT";
1341   case ARMISD::SMULWB:        return "ARMISD::SMULWB";
1342   case ARMISD::SMULWT:        return "ARMISD::SMULWT";
1343   case ARMISD::SMLALD:        return "ARMISD::SMLALD";
1344   case ARMISD::SMLALDX:       return "ARMISD::SMLALDX";
1345   case ARMISD::SMLSLD:        return "ARMISD::SMLSLD";
1346   case ARMISD::SMLSLDX:       return "ARMISD::SMLSLDX";
1347   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
1348   case ARMISD::BFI:           return "ARMISD::BFI";
1349   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
1350   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
1351   case ARMISD::VBSL:          return "ARMISD::VBSL";
1352   case ARMISD::MEMCPY:        return "ARMISD::MEMCPY";
1353   case ARMISD::VLD1DUP:       return "ARMISD::VLD1DUP";
1354   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
1355   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
1356   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
1357   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
1358   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
1359   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
1360   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
1361   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
1362   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
1363   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
1364   case ARMISD::VLD1DUP_UPD:   return "ARMISD::VLD1DUP_UPD";
1365   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
1366   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
1367   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
1368   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
1369   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
1370   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
1371   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
1372   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
1373   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
1374   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
1375   }
1376   return nullptr;
1377 }
1378 
1379 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1380                                           EVT VT) const {
1381   if (!VT.isVector())
1382     return getPointerTy(DL);
1383   return VT.changeVectorElementTypeToInteger();
1384 }
1385 
1386 /// getRegClassFor - Return the register class that should be used for the
1387 /// specified value type.
1388 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const {
1389   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1390   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1391   // load / store 4 to 8 consecutive D registers.
1392   if (Subtarget->hasNEON()) {
1393     if (VT == MVT::v4i64)
1394       return &ARM::QQPRRegClass;
1395     if (VT == MVT::v8i64)
1396       return &ARM::QQQQPRRegClass;
1397   }
1398   return TargetLowering::getRegClassFor(VT);
1399 }
1400 
1401 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1402 // source/dest is aligned and the copy size is large enough. We therefore want
1403 // to align such objects passed to memory intrinsics.
1404 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
1405                                                unsigned &PrefAlign) const {
1406   if (!isa<MemIntrinsic>(CI))
1407     return false;
1408   MinSize = 8;
1409   // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1410   // cycle faster than 4-byte aligned LDM.
1411   PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
1412   return true;
1413 }
1414 
1415 // Create a fast isel object.
1416 FastISel *
1417 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
1418                                   const TargetLibraryInfo *libInfo) const {
1419   return ARM::createFastISel(funcInfo, libInfo);
1420 }
1421 
1422 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
1423   unsigned NumVals = N->getNumValues();
1424   if (!NumVals)
1425     return Sched::RegPressure;
1426 
1427   for (unsigned i = 0; i != NumVals; ++i) {
1428     EVT VT = N->getValueType(i);
1429     if (VT == MVT::Glue || VT == MVT::Other)
1430       continue;
1431     if (VT.isFloatingPoint() || VT.isVector())
1432       return Sched::ILP;
1433   }
1434 
1435   if (!N->isMachineOpcode())
1436     return Sched::RegPressure;
1437 
1438   // Load are scheduled for latency even if there instruction itinerary
1439   // is not available.
1440   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1441   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1442 
1443   if (MCID.getNumDefs() == 0)
1444     return Sched::RegPressure;
1445   if (!Itins->isEmpty() &&
1446       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1447     return Sched::ILP;
1448 
1449   return Sched::RegPressure;
1450 }
1451 
1452 //===----------------------------------------------------------------------===//
1453 // Lowering Code
1454 //===----------------------------------------------------------------------===//
1455 
1456 static bool isSRL16(const SDValue &Op) {
1457   if (Op.getOpcode() != ISD::SRL)
1458     return false;
1459   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1460     return Const->getZExtValue() == 16;
1461   return false;
1462 }
1463 
1464 static bool isSRA16(const SDValue &Op) {
1465   if (Op.getOpcode() != ISD::SRA)
1466     return false;
1467   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1468     return Const->getZExtValue() == 16;
1469   return false;
1470 }
1471 
1472 static bool isSHL16(const SDValue &Op) {
1473   if (Op.getOpcode() != ISD::SHL)
1474     return false;
1475   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1476     return Const->getZExtValue() == 16;
1477   return false;
1478 }
1479 
1480 // Check for a signed 16-bit value. We special case SRA because it makes it
1481 // more simple when also looking for SRAs that aren't sign extending a
1482 // smaller value. Without the check, we'd need to take extra care with
1483 // checking order for some operations.
1484 static bool isS16(const SDValue &Op, SelectionDAG &DAG) {
1485   if (isSRA16(Op))
1486     return isSHL16(Op.getOperand(0));
1487   return DAG.ComputeNumSignBits(Op) == 17;
1488 }
1489 
1490 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
1491 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1492   switch (CC) {
1493   default: llvm_unreachable("Unknown condition code!");
1494   case ISD::SETNE:  return ARMCC::NE;
1495   case ISD::SETEQ:  return ARMCC::EQ;
1496   case ISD::SETGT:  return ARMCC::GT;
1497   case ISD::SETGE:  return ARMCC::GE;
1498   case ISD::SETLT:  return ARMCC::LT;
1499   case ISD::SETLE:  return ARMCC::LE;
1500   case ISD::SETUGT: return ARMCC::HI;
1501   case ISD::SETUGE: return ARMCC::HS;
1502   case ISD::SETULT: return ARMCC::LO;
1503   case ISD::SETULE: return ARMCC::LS;
1504   }
1505 }
1506 
1507 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
1508 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1509                         ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN) {
1510   CondCode2 = ARMCC::AL;
1511   InvalidOnQNaN = true;
1512   switch (CC) {
1513   default: llvm_unreachable("Unknown FP condition!");
1514   case ISD::SETEQ:
1515   case ISD::SETOEQ:
1516     CondCode = ARMCC::EQ;
1517     InvalidOnQNaN = false;
1518     break;
1519   case ISD::SETGT:
1520   case ISD::SETOGT: CondCode = ARMCC::GT; break;
1521   case ISD::SETGE:
1522   case ISD::SETOGE: CondCode = ARMCC::GE; break;
1523   case ISD::SETOLT: CondCode = ARMCC::MI; break;
1524   case ISD::SETOLE: CondCode = ARMCC::LS; break;
1525   case ISD::SETONE:
1526     CondCode = ARMCC::MI;
1527     CondCode2 = ARMCC::GT;
1528     InvalidOnQNaN = false;
1529     break;
1530   case ISD::SETO:   CondCode = ARMCC::VC; break;
1531   case ISD::SETUO:  CondCode = ARMCC::VS; break;
1532   case ISD::SETUEQ:
1533     CondCode = ARMCC::EQ;
1534     CondCode2 = ARMCC::VS;
1535     InvalidOnQNaN = false;
1536     break;
1537   case ISD::SETUGT: CondCode = ARMCC::HI; break;
1538   case ISD::SETUGE: CondCode = ARMCC::PL; break;
1539   case ISD::SETLT:
1540   case ISD::SETULT: CondCode = ARMCC::LT; break;
1541   case ISD::SETLE:
1542   case ISD::SETULE: CondCode = ARMCC::LE; break;
1543   case ISD::SETNE:
1544   case ISD::SETUNE:
1545     CondCode = ARMCC::NE;
1546     InvalidOnQNaN = false;
1547     break;
1548   }
1549 }
1550 
1551 //===----------------------------------------------------------------------===//
1552 //                      Calling Convention Implementation
1553 //===----------------------------------------------------------------------===//
1554 
1555 #include "ARMGenCallingConv.inc"
1556 
1557 /// getEffectiveCallingConv - Get the effective calling convention, taking into
1558 /// account presence of floating point hardware and calling convention
1559 /// limitations, such as support for variadic functions.
1560 CallingConv::ID
1561 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
1562                                            bool isVarArg) const {
1563   switch (CC) {
1564   default:
1565     llvm_unreachable("Unsupported calling convention");
1566   case CallingConv::ARM_AAPCS:
1567   case CallingConv::ARM_APCS:
1568   case CallingConv::GHC:
1569     return CC;
1570   case CallingConv::PreserveMost:
1571     return CallingConv::PreserveMost;
1572   case CallingConv::ARM_AAPCS_VFP:
1573   case CallingConv::Swift:
1574     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
1575   case CallingConv::C:
1576     if (!Subtarget->isAAPCS_ABI())
1577       return CallingConv::ARM_APCS;
1578     else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
1579              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
1580              !isVarArg)
1581       return CallingConv::ARM_AAPCS_VFP;
1582     else
1583       return CallingConv::ARM_AAPCS;
1584   case CallingConv::Fast:
1585   case CallingConv::CXX_FAST_TLS:
1586     if (!Subtarget->isAAPCS_ABI()) {
1587       if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
1588         return CallingConv::Fast;
1589       return CallingConv::ARM_APCS;
1590     } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
1591       return CallingConv::ARM_AAPCS_VFP;
1592     else
1593       return CallingConv::ARM_AAPCS;
1594   }
1595 }
1596 
1597 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1598                                                  bool isVarArg) const {
1599   return CCAssignFnForNode(CC, false, isVarArg);
1600 }
1601 
1602 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1603                                                    bool isVarArg) const {
1604   return CCAssignFnForNode(CC, true, isVarArg);
1605 }
1606 
1607 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
1608 /// CallingConvention.
1609 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
1610                                                  bool Return,
1611                                                  bool isVarArg) const {
1612   switch (getEffectiveCallingConv(CC, isVarArg)) {
1613   default:
1614     llvm_unreachable("Unsupported calling convention");
1615   case CallingConv::ARM_APCS:
1616     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1617   case CallingConv::ARM_AAPCS:
1618     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1619   case CallingConv::ARM_AAPCS_VFP:
1620     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1621   case CallingConv::Fast:
1622     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1623   case CallingConv::GHC:
1624     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1625   case CallingConv::PreserveMost:
1626     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1627   }
1628 }
1629 
1630 /// LowerCallResult - Lower the result values of a call into the
1631 /// appropriate copies out of appropriate physical registers.
1632 SDValue ARMTargetLowering::LowerCallResult(
1633     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
1634     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1635     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
1636     SDValue ThisVal) const {
1637 
1638   // Assign locations to each value returned by this call.
1639   SmallVector<CCValAssign, 16> RVLocs;
1640   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1641                  *DAG.getContext());
1642   CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));
1643 
1644   // Copy all of the result registers out of their specified physreg.
1645   for (unsigned i = 0; i != RVLocs.size(); ++i) {
1646     CCValAssign VA = RVLocs[i];
1647 
1648     // Pass 'this' value directly from the argument to return value, to avoid
1649     // reg unit interference
1650     if (i == 0 && isThisReturn) {
1651       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
1652              "unexpected return calling convention register assignment");
1653       InVals.push_back(ThisVal);
1654       continue;
1655     }
1656 
1657     SDValue Val;
1658     if (VA.needsCustom()) {
1659       // Handle f64 or half of a v2f64.
1660       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1661                                       InFlag);
1662       Chain = Lo.getValue(1);
1663       InFlag = Lo.getValue(2);
1664       VA = RVLocs[++i]; // skip ahead to next loc
1665       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1666                                       InFlag);
1667       Chain = Hi.getValue(1);
1668       InFlag = Hi.getValue(2);
1669       if (!Subtarget->isLittle())
1670         std::swap (Lo, Hi);
1671       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1672 
1673       if (VA.getLocVT() == MVT::v2f64) {
1674         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1675         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1676                           DAG.getConstant(0, dl, MVT::i32));
1677 
1678         VA = RVLocs[++i]; // skip ahead to next loc
1679         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1680         Chain = Lo.getValue(1);
1681         InFlag = Lo.getValue(2);
1682         VA = RVLocs[++i]; // skip ahead to next loc
1683         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1684         Chain = Hi.getValue(1);
1685         InFlag = Hi.getValue(2);
1686         if (!Subtarget->isLittle())
1687           std::swap (Lo, Hi);
1688         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1689         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1690                           DAG.getConstant(1, dl, MVT::i32));
1691       }
1692     } else {
1693       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1694                                InFlag);
1695       Chain = Val.getValue(1);
1696       InFlag = Val.getValue(2);
1697     }
1698 
1699     switch (VA.getLocInfo()) {
1700     default: llvm_unreachable("Unknown loc info!");
1701     case CCValAssign::Full: break;
1702     case CCValAssign::BCvt:
1703       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1704       break;
1705     }
1706 
1707     InVals.push_back(Val);
1708   }
1709 
1710   return Chain;
1711 }
1712 
1713 /// LowerMemOpCallTo - Store the argument to the stack.
1714 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
1715                                             SDValue Arg, const SDLoc &dl,
1716                                             SelectionDAG &DAG,
1717                                             const CCValAssign &VA,
1718                                             ISD::ArgFlagsTy Flags) const {
1719   unsigned LocMemOffset = VA.getLocMemOffset();
1720   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
1721   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
1722                        StackPtr, PtrOff);
1723   return DAG.getStore(
1724       Chain, dl, Arg, PtrOff,
1725       MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
1726 }
1727 
1728 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
1729                                          SDValue Chain, SDValue &Arg,
1730                                          RegsToPassVector &RegsToPass,
1731                                          CCValAssign &VA, CCValAssign &NextVA,
1732                                          SDValue &StackPtr,
1733                                          SmallVectorImpl<SDValue> &MemOpChains,
1734                                          ISD::ArgFlagsTy Flags) const {
1735 
1736   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1737                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
1738   unsigned id = Subtarget->isLittle() ? 0 : 1;
1739   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
1740 
1741   if (NextVA.isRegLoc())
1742     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
1743   else {
1744     assert(NextVA.isMemLoc());
1745     if (!StackPtr.getNode())
1746       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
1747                                     getPointerTy(DAG.getDataLayout()));
1748 
1749     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
1750                                            dl, DAG, NextVA,
1751                                            Flags));
1752   }
1753 }
1754 
1755 /// LowerCall - Lowering a call into a callseq_start <-
1756 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
1757 /// nodes.
1758 SDValue
1759 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1760                              SmallVectorImpl<SDValue> &InVals) const {
1761   SelectionDAG &DAG                     = CLI.DAG;
1762   SDLoc &dl                             = CLI.DL;
1763   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1764   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
1765   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
1766   SDValue Chain                         = CLI.Chain;
1767   SDValue Callee                        = CLI.Callee;
1768   bool &isTailCall                      = CLI.IsTailCall;
1769   CallingConv::ID CallConv              = CLI.CallConv;
1770   bool doesNotRet                       = CLI.DoesNotReturn;
1771   bool isVarArg                         = CLI.IsVarArg;
1772 
1773   MachineFunction &MF = DAG.getMachineFunction();
1774   bool isStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
1775   bool isThisReturn   = false;
1776   bool isSibCall      = false;
1777   auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
1778 
1779   // Disable tail calls if they're not supported.
1780   if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true")
1781     isTailCall = false;
1782 
1783   if (isTailCall) {
1784     // Check if it's really possible to do a tail call.
1785     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1786                     isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
1787                                                    Outs, OutVals, Ins, DAG);
1788     if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
1789       report_fatal_error("failed to perform tail call elimination on a call "
1790                          "site marked musttail");
1791     // We don't support GuaranteedTailCallOpt for ARM, only automatically
1792     // detected sibcalls.
1793     if (isTailCall) {
1794       ++NumTailCalls;
1795       isSibCall = true;
1796     }
1797   }
1798 
1799   // Analyze operands of the call, assigning locations to each operand.
1800   SmallVector<CCValAssign, 16> ArgLocs;
1801   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1802                  *DAG.getContext());
1803   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));
1804 
1805   // Get a count of how many bytes are to be pushed on the stack.
1806   unsigned NumBytes = CCInfo.getNextStackOffset();
1807 
1808   // For tail calls, memory operands are available in our caller's stack.
1809   if (isSibCall)
1810     NumBytes = 0;
1811 
1812   // Adjust the stack pointer for the new arguments...
1813   // These operations are automatically eliminated by the prolog/epilog pass
1814   if (!isSibCall)
1815     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1816 
1817   SDValue StackPtr =
1818       DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));
1819 
1820   RegsToPassVector RegsToPass;
1821   SmallVector<SDValue, 8> MemOpChains;
1822 
1823   // Walk the register/memloc assignments, inserting copies/loads.  In the case
1824   // of tail call optimization, arguments are handled later.
1825   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1826        i != e;
1827        ++i, ++realArgIdx) {
1828     CCValAssign &VA = ArgLocs[i];
1829     SDValue Arg = OutVals[realArgIdx];
1830     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1831     bool isByVal = Flags.isByVal();
1832 
1833     // Promote the value if needed.
1834     switch (VA.getLocInfo()) {
1835     default: llvm_unreachable("Unknown loc info!");
1836     case CCValAssign::Full: break;
1837     case CCValAssign::SExt:
1838       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1839       break;
1840     case CCValAssign::ZExt:
1841       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1842       break;
1843     case CCValAssign::AExt:
1844       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1845       break;
1846     case CCValAssign::BCvt:
1847       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1848       break;
1849     }
1850 
1851     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
1852     if (VA.needsCustom()) {
1853       if (VA.getLocVT() == MVT::v2f64) {
1854         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1855                                   DAG.getConstant(0, dl, MVT::i32));
1856         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1857                                   DAG.getConstant(1, dl, MVT::i32));
1858 
1859         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1860                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1861 
1862         VA = ArgLocs[++i]; // skip ahead to next loc
1863         if (VA.isRegLoc()) {
1864           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1865                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1866         } else {
1867           assert(VA.isMemLoc());
1868 
1869           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1870                                                  dl, DAG, VA, Flags));
1871         }
1872       } else {
1873         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1874                          StackPtr, MemOpChains, Flags);
1875       }
1876     } else if (VA.isRegLoc()) {
1877       if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
1878           Outs[0].VT == MVT::i32) {
1879         assert(VA.getLocVT() == MVT::i32 &&
1880                "unexpected calling convention register assignment");
1881         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
1882                "unexpected use of 'returned'");
1883         isThisReturn = true;
1884       }
1885       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1886     } else if (isByVal) {
1887       assert(VA.isMemLoc());
1888       unsigned offset = 0;
1889 
1890       // True if this byval aggregate will be split between registers
1891       // and memory.
1892       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
1893       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
1894 
1895       if (CurByValIdx < ByValArgsCount) {
1896 
1897         unsigned RegBegin, RegEnd;
1898         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
1899 
1900         EVT PtrVT =
1901             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
1902         unsigned int i, j;
1903         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
1904           SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
1905           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
1906           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
1907                                      MachinePointerInfo(),
1908                                      DAG.InferPtrAlignment(AddArg));
1909           MemOpChains.push_back(Load.getValue(1));
1910           RegsToPass.push_back(std::make_pair(j, Load));
1911         }
1912 
1913         // If parameter size outsides register area, "offset" value
1914         // helps us to calculate stack slot for remained part properly.
1915         offset = RegEnd - RegBegin;
1916 
1917         CCInfo.nextInRegsParam();
1918       }
1919 
1920       if (Flags.getByValSize() > 4*offset) {
1921         auto PtrVT = getPointerTy(DAG.getDataLayout());
1922         unsigned LocMemOffset = VA.getLocMemOffset();
1923         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
1924         SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff);
1925         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
1926         SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
1927         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
1928                                            MVT::i32);
1929         SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl,
1930                                             MVT::i32);
1931 
1932         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
1933         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
1934         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
1935                                           Ops));
1936       }
1937     } else if (!isSibCall) {
1938       assert(VA.isMemLoc());
1939 
1940       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1941                                              dl, DAG, VA, Flags));
1942     }
1943   }
1944 
1945   if (!MemOpChains.empty())
1946     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1947 
1948   // Build a sequence of copy-to-reg nodes chained together with token chain
1949   // and flag operands which copy the outgoing args into the appropriate regs.
1950   SDValue InFlag;
1951   // Tail call byval lowering might overwrite argument registers so in case of
1952   // tail call optimization the copies to registers are lowered later.
1953   if (!isTailCall)
1954     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1955       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1956                                RegsToPass[i].second, InFlag);
1957       InFlag = Chain.getValue(1);
1958     }
1959 
1960   // For tail calls lower the arguments to the 'real' stack slot.
1961   if (isTailCall) {
1962     // Force all the incoming stack arguments to be loaded from the stack
1963     // before any new outgoing arguments are stored to the stack, because the
1964     // outgoing stack slots may alias the incoming argument stack slots, and
1965     // the alias isn't otherwise explicit. This is slightly more conservative
1966     // than necessary, because it means that each store effectively depends
1967     // on every argument instead of just those arguments it would clobber.
1968 
1969     // Do not flag preceding copytoreg stuff together with the following stuff.
1970     InFlag = SDValue();
1971     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1972       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1973                                RegsToPass[i].second, InFlag);
1974       InFlag = Chain.getValue(1);
1975     }
1976     InFlag = SDValue();
1977   }
1978 
1979   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1980   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1981   // node so that legalize doesn't hack it.
1982   bool isDirect = false;
1983 
1984   const TargetMachine &TM = getTargetMachine();
1985   const Module *Mod = MF.getFunction()->getParent();
1986   const GlobalValue *GV = nullptr;
1987   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1988     GV = G->getGlobal();
1989   bool isStub =
1990       !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();
1991 
1992   bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
1993   bool isLocalARMFunc = false;
1994   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1995   auto PtrVt = getPointerTy(DAG.getDataLayout());
1996 
1997   if (Subtarget->genLongCalls()) {
1998     assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
1999            "long-calls codegen is not position independent!");
2000     // Handle a global address or an external symbol. If it's not one of
2001     // those, the target's already in a register, so we don't need to do
2002     // anything extra.
2003     if (isa<GlobalAddressSDNode>(Callee)) {
2004       // Create a constant pool entry for the callee address
2005       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2006       ARMConstantPoolValue *CPV =
2007         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
2008 
2009       // Get the address of the callee into a register
2010       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2011       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2012       Callee = DAG.getLoad(
2013           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2014           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2015     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
2016       const char *Sym = S->getSymbol();
2017 
2018       // Create a constant pool entry for the callee address
2019       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2020       ARMConstantPoolValue *CPV =
2021         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2022                                       ARMPCLabelIndex, 0);
2023       // Get the address of the callee into a register
2024       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2025       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2026       Callee = DAG.getLoad(
2027           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2028           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2029     }
2030   } else if (isa<GlobalAddressSDNode>(Callee)) {
2031     // If we're optimizing for minimum size and the function is called three or
2032     // more times in this block, we can improve codesize by calling indirectly
2033     // as BLXr has a 16-bit encoding.
2034     auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2035     auto *BB = CLI.CS->getParent();
2036     bool PreferIndirect =
2037         Subtarget->isThumb() && MF.getFunction()->optForMinSize() &&
2038         count_if(GV->users(), [&BB](const User *U) {
2039           return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
2040         }) > 2;
2041 
2042     if (!PreferIndirect) {
2043       isDirect = true;
2044       bool isDef = GV->isStrongDefinitionForLinker();
2045 
2046       // ARM call to a local ARM function is predicable.
2047       isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
2048       // tBX takes a register source operand.
2049       if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2050         assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
2051         Callee = DAG.getNode(
2052             ARMISD::WrapperPIC, dl, PtrVt,
2053             DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
2054         Callee = DAG.getLoad(
2055             PtrVt, dl, DAG.getEntryNode(), Callee,
2056             MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2057             /* Alignment = */ 0, MachineMemOperand::MODereferenceable |
2058                                      MachineMemOperand::MOInvariant);
2059       } else if (Subtarget->isTargetCOFF()) {
2060         assert(Subtarget->isTargetWindows() &&
2061                "Windows is the only supported COFF target");
2062         unsigned TargetFlags = GV->hasDLLImportStorageClass()
2063                                    ? ARMII::MO_DLLIMPORT
2064                                    : ARMII::MO_NO_FLAG;
2065         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0,
2066                                             TargetFlags);
2067         if (GV->hasDLLImportStorageClass())
2068           Callee =
2069               DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
2070                           DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
2071                           MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2072       } else {
2073         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
2074       }
2075     }
2076   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2077     isDirect = true;
2078     // tBX takes a register source operand.
2079     const char *Sym = S->getSymbol();
2080     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2081       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2082       ARMConstantPoolValue *CPV =
2083         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2084                                       ARMPCLabelIndex, 4);
2085       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2086       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2087       Callee = DAG.getLoad(
2088           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2089           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2090       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2091       Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
2092     } else {
2093       Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
2094     }
2095   }
2096 
2097   // FIXME: handle tail calls differently.
2098   unsigned CallOpc;
2099   if (Subtarget->isThumb()) {
2100     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2101       CallOpc = ARMISD::CALL_NOLINK;
2102     else
2103       CallOpc = ARMISD::CALL;
2104   } else {
2105     if (!isDirect && !Subtarget->hasV5TOps())
2106       CallOpc = ARMISD::CALL_NOLINK;
2107     else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2108              // Emit regular call when code size is the priority
2109              !MF.getFunction()->optForMinSize())
2110       // "mov lr, pc; b _foo" to avoid confusing the RSP
2111       CallOpc = ARMISD::CALL_NOLINK;
2112     else
2113       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
2114   }
2115 
2116   std::vector<SDValue> Ops;
2117   Ops.push_back(Chain);
2118   Ops.push_back(Callee);
2119 
2120   // Add argument registers to the end of the list so that they are known live
2121   // into the call.
2122   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2123     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2124                                   RegsToPass[i].second.getValueType()));
2125 
2126   // Add a register mask operand representing the call-preserved registers.
2127   if (!isTailCall) {
2128     const uint32_t *Mask;
2129     const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
2130     if (isThisReturn) {
2131       // For 'this' returns, use the R0-preserving mask if applicable
2132       Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
2133       if (!Mask) {
2134         // Set isThisReturn to false if the calling convention is not one that
2135         // allows 'returned' to be modeled in this way, so LowerCallResult does
2136         // not try to pass 'this' straight through
2137         isThisReturn = false;
2138         Mask = ARI->getCallPreservedMask(MF, CallConv);
2139       }
2140     } else
2141       Mask = ARI->getCallPreservedMask(MF, CallConv);
2142 
2143     assert(Mask && "Missing call preserved mask for calling convention");
2144     Ops.push_back(DAG.getRegisterMask(Mask));
2145   }
2146 
2147   if (InFlag.getNode())
2148     Ops.push_back(InFlag);
2149 
2150   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2151   if (isTailCall) {
2152     MF.getFrameInfo().setHasTailCall();
2153     return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
2154   }
2155 
2156   // Returns a chain and a flag for retval copy to use.
2157   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
2158   InFlag = Chain.getValue(1);
2159 
2160   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
2161                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
2162   if (!Ins.empty())
2163     InFlag = Chain.getValue(1);
2164 
2165   // Handle result values, copying them out of physregs into vregs that we
2166   // return.
2167   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2168                          InVals, isThisReturn,
2169                          isThisReturn ? OutVals[0] : SDValue());
2170 }
2171 
2172 /// HandleByVal - Every parameter *after* a byval parameter is passed
2173 /// on the stack.  Remember the next parameter register to allocate,
2174 /// and then confiscate the rest of the parameter registers to insure
2175 /// this.
2176 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
2177                                     unsigned Align) const {
2178   // Byval (as with any stack) slots are always at least 4 byte aligned.
2179   Align = std::max(Align, 4U);
2180 
2181   unsigned Reg = State->AllocateReg(GPRArgRegs);
2182   if (!Reg)
2183     return;
2184 
2185   unsigned AlignInRegs = Align / 4;
2186   unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
2187   for (unsigned i = 0; i < Waste; ++i)
2188     Reg = State->AllocateReg(GPRArgRegs);
2189 
2190   if (!Reg)
2191     return;
2192 
2193   unsigned Excess = 4 * (ARM::R4 - Reg);
2194 
2195   // Special case when NSAA != SP and parameter size greater than size of
2196   // all remained GPR regs. In that case we can't split parameter, we must
2197   // send it to stack. We also must set NCRN to R4, so waste all
2198   // remained registers.
2199   const unsigned NSAAOffset = State->getNextStackOffset();
2200   if (NSAAOffset != 0 && Size > Excess) {
2201     while (State->AllocateReg(GPRArgRegs))
2202       ;
2203     return;
2204   }
2205 
2206   // First register for byval parameter is the first register that wasn't
2207   // allocated before this method call, so it would be "reg".
2208   // If parameter is small enough to be saved in range [reg, r4), then
2209   // the end (first after last) register would be reg + param-size-in-regs,
2210   // else parameter would be splitted between registers and stack,
2211   // end register would be r4 in this case.
2212   unsigned ByValRegBegin = Reg;
2213   unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
2214   State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
2215   // Note, first register is allocated in the beginning of function already,
2216   // allocate remained amount of registers we need.
2217   for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2218     State->AllocateReg(GPRArgRegs);
2219   // A byval parameter that is split between registers and memory needs its
2220   // size truncated here.
2221   // In the case where the entire structure fits in registers, we set the
2222   // size in memory to zero.
2223   Size = std::max<int>(Size - Excess, 0);
2224 }
2225 
2226 /// MatchingStackOffset - Return true if the given stack call argument is
2227 /// already available in the same position (relatively) of the caller's
2228 /// incoming argument stack.
2229 static
2230 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2231                          MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
2232                          const TargetInstrInfo *TII) {
2233   unsigned Bytes = Arg.getValueSizeInBits() / 8;
2234   int FI = std::numeric_limits<int>::max();
2235   if (Arg.getOpcode() == ISD::CopyFromReg) {
2236     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2237     if (!TargetRegisterInfo::isVirtualRegister(VR))
2238       return false;
2239     MachineInstr *Def = MRI->getVRegDef(VR);
2240     if (!Def)
2241       return false;
2242     if (!Flags.isByVal()) {
2243       if (!TII->isLoadFromStackSlot(*Def, FI))
2244         return false;
2245     } else {
2246       return false;
2247     }
2248   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2249     if (Flags.isByVal())
2250       // ByVal argument is passed in as a pointer but it's now being
2251       // dereferenced. e.g.
2252       // define @foo(%struct.X* %A) {
2253       //   tail call @bar(%struct.X* byval %A)
2254       // }
2255       return false;
2256     SDValue Ptr = Ld->getBasePtr();
2257     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2258     if (!FINode)
2259       return false;
2260     FI = FINode->getIndex();
2261   } else
2262     return false;
2263 
2264   assert(FI != std::numeric_limits<int>::max());
2265   if (!MFI.isFixedObjectIndex(FI))
2266     return false;
2267   return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
2268 }
2269 
2270 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2271 /// for tail call optimization. Targets which want to do tail call
2272 /// optimization should implement this function.
2273 bool
2274 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
2275                                                      CallingConv::ID CalleeCC,
2276                                                      bool isVarArg,
2277                                                      bool isCalleeStructRet,
2278                                                      bool isCallerStructRet,
2279                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
2280                                     const SmallVectorImpl<SDValue> &OutVals,
2281                                     const SmallVectorImpl<ISD::InputArg> &Ins,
2282                                                      SelectionDAG& DAG) const {
2283   MachineFunction &MF = DAG.getMachineFunction();
2284   const Function *CallerF = MF.getFunction();
2285   CallingConv::ID CallerCC = CallerF->getCallingConv();
2286 
2287   assert(Subtarget->supportsTailCall());
2288 
2289   // Look for obvious safe cases to perform tail call optimization that do not
2290   // require ABI changes. This is what gcc calls sibcall.
2291 
2292   // Exception-handling functions need a special set of instructions to indicate
2293   // a return to the hardware. Tail-calling another function would probably
2294   // break this.
2295   if (CallerF->hasFnAttribute("interrupt"))
2296     return false;
2297 
2298   // Also avoid sibcall optimization if either caller or callee uses struct
2299   // return semantics.
2300   if (isCalleeStructRet || isCallerStructRet)
2301     return false;
2302 
2303   // Externally-defined functions with weak linkage should not be
2304   // tail-called on ARM when the OS does not support dynamic
2305   // pre-emption of symbols, as the AAELF spec requires normal calls
2306   // to undefined weak functions to be replaced with a NOP or jump to the
2307   // next instruction. The behaviour of branch instructions in this
2308   // situation (as used for tail calls) is implementation-defined, so we
2309   // cannot rely on the linker replacing the tail call with a return.
2310   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2311     const GlobalValue *GV = G->getGlobal();
2312     const Triple &TT = getTargetMachine().getTargetTriple();
2313     if (GV->hasExternalWeakLinkage() &&
2314         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
2315       return false;
2316   }
2317 
2318   // Check that the call results are passed in the same way.
2319   LLVMContext &C = *DAG.getContext();
2320   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
2321                                   CCAssignFnForReturn(CalleeCC, isVarArg),
2322                                   CCAssignFnForReturn(CallerCC, isVarArg)))
2323     return false;
2324   // The callee has to preserve all registers the caller needs to preserve.
2325   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2326   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2327   if (CalleeCC != CallerCC) {
2328     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2329     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2330       return false;
2331   }
2332 
2333   // If Caller's vararg or byval argument has been split between registers and
2334   // stack, do not perform tail call, since part of the argument is in caller's
2335   // local frame.
2336   const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
2337   if (AFI_Caller->getArgRegsSaveSize())
2338     return false;
2339 
2340   // If the callee takes no arguments then go on to check the results of the
2341   // call.
2342   if (!Outs.empty()) {
2343     // Check if stack adjustment is needed. For now, do not do this if any
2344     // argument is passed on the stack.
2345     SmallVector<CCValAssign, 16> ArgLocs;
2346     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2347     CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
2348     if (CCInfo.getNextStackOffset()) {
2349       // Check if the arguments are already laid out in the right way as
2350       // the caller's fixed stack objects.
2351       MachineFrameInfo &MFI = MF.getFrameInfo();
2352       const MachineRegisterInfo *MRI = &MF.getRegInfo();
2353       const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2354       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2355            i != e;
2356            ++i, ++realArgIdx) {
2357         CCValAssign &VA = ArgLocs[i];
2358         EVT RegVT = VA.getLocVT();
2359         SDValue Arg = OutVals[realArgIdx];
2360         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2361         if (VA.getLocInfo() == CCValAssign::Indirect)
2362           return false;
2363         if (VA.needsCustom()) {
2364           // f64 and vector types are split into multiple registers or
2365           // register/stack-slot combinations.  The types will not match
2366           // the registers; give up on memory f64 refs until we figure
2367           // out what to do about this.
2368           if (!VA.isRegLoc())
2369             return false;
2370           if (!ArgLocs[++i].isRegLoc())
2371             return false;
2372           if (RegVT == MVT::v2f64) {
2373             if (!ArgLocs[++i].isRegLoc())
2374               return false;
2375             if (!ArgLocs[++i].isRegLoc())
2376               return false;
2377           }
2378         } else if (!VA.isRegLoc()) {
2379           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
2380                                    MFI, MRI, TII))
2381             return false;
2382         }
2383       }
2384     }
2385 
2386     const MachineRegisterInfo &MRI = MF.getRegInfo();
2387     if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
2388       return false;
2389   }
2390 
2391   return true;
2392 }
2393 
2394 bool
2395 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2396                                   MachineFunction &MF, bool isVarArg,
2397                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
2398                                   LLVMContext &Context) const {
2399   SmallVector<CCValAssign, 16> RVLocs;
2400   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2401   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2402 }
2403 
2404 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
2405                                     const SDLoc &DL, SelectionDAG &DAG) {
2406   const MachineFunction &MF = DAG.getMachineFunction();
2407   const Function *F = MF.getFunction();
2408 
2409   StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
2410 
2411   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
2412   // version of the "preferred return address". These offsets affect the return
2413   // instruction if this is a return from PL1 without hypervisor extensions.
2414   //    IRQ/FIQ: +4     "subs pc, lr, #4"
2415   //    SWI:     0      "subs pc, lr, #0"
2416   //    ABORT:   +4     "subs pc, lr, #4"
2417   //    UNDEF:   +4/+2  "subs pc, lr, #0"
2418   // UNDEF varies depending on where the exception came from ARM or Thumb
2419   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
2420 
2421   int64_t LROffset;
2422   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
2423       IntKind == "ABORT")
2424     LROffset = 4;
2425   else if (IntKind == "SWI" || IntKind == "UNDEF")
2426     LROffset = 0;
2427   else
2428     report_fatal_error("Unsupported interrupt attribute. If present, value "
2429                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2430 
2431   RetOps.insert(RetOps.begin() + 1,
2432                 DAG.getConstant(LROffset, DL, MVT::i32, false));
2433 
2434   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
2435 }
2436 
2437 SDValue
2438 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2439                                bool isVarArg,
2440                                const SmallVectorImpl<ISD::OutputArg> &Outs,
2441                                const SmallVectorImpl<SDValue> &OutVals,
2442                                const SDLoc &dl, SelectionDAG &DAG) const {
2443 
2444   // CCValAssign - represent the assignment of the return value to a location.
2445   SmallVector<CCValAssign, 16> RVLocs;
2446 
2447   // CCState - Info about the registers and stack slots.
2448   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2449                  *DAG.getContext());
2450 
2451   // Analyze outgoing return values.
2452   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2453 
2454   SDValue Flag;
2455   SmallVector<SDValue, 4> RetOps;
2456   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2457   bool isLittleEndian = Subtarget->isLittle();
2458 
2459   MachineFunction &MF = DAG.getMachineFunction();
2460   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2461   AFI->setReturnRegsCount(RVLocs.size());
2462 
2463   // Copy the result values into the output registers.
2464   for (unsigned i = 0, realRVLocIdx = 0;
2465        i != RVLocs.size();
2466        ++i, ++realRVLocIdx) {
2467     CCValAssign &VA = RVLocs[i];
2468     assert(VA.isRegLoc() && "Can only return in registers!");
2469 
2470     SDValue Arg = OutVals[realRVLocIdx];
2471 
2472     switch (VA.getLocInfo()) {
2473     default: llvm_unreachable("Unknown loc info!");
2474     case CCValAssign::Full: break;
2475     case CCValAssign::BCvt:
2476       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2477       break;
2478     }
2479 
2480     if (VA.needsCustom()) {
2481       if (VA.getLocVT() == MVT::v2f64) {
2482         // Extract the first half and return it in two registers.
2483         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2484                                    DAG.getConstant(0, dl, MVT::i32));
2485         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
2486                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
2487 
2488         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2489                                  HalfGPRs.getValue(isLittleEndian ? 0 : 1),
2490                                  Flag);
2491         Flag = Chain.getValue(1);
2492         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2493         VA = RVLocs[++i]; // skip ahead to next loc
2494         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2495                                  HalfGPRs.getValue(isLittleEndian ? 1 : 0),
2496                                  Flag);
2497         Flag = Chain.getValue(1);
2498         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2499         VA = RVLocs[++i]; // skip ahead to next loc
2500 
2501         // Extract the 2nd half and fall through to handle it as an f64 value.
2502         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2503                           DAG.getConstant(1, dl, MVT::i32));
2504       }
2505       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
2506       // available.
2507       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2508                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
2509       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2510                                fmrrd.getValue(isLittleEndian ? 0 : 1),
2511                                Flag);
2512       Flag = Chain.getValue(1);
2513       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2514       VA = RVLocs[++i]; // skip ahead to next loc
2515       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2516                                fmrrd.getValue(isLittleEndian ? 1 : 0),
2517                                Flag);
2518     } else
2519       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
2520 
2521     // Guarantee that all emitted copies are
2522     // stuck together, avoiding something bad.
2523     Flag = Chain.getValue(1);
2524     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2525   }
2526   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2527   const MCPhysReg *I =
2528       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2529   if (I) {
2530     for (; *I; ++I) {
2531       if (ARM::GPRRegClass.contains(*I))
2532         RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2533       else if (ARM::DPRRegClass.contains(*I))
2534         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
2535       else
2536         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2537     }
2538   }
2539 
2540   // Update chain and glue.
2541   RetOps[0] = Chain;
2542   if (Flag.getNode())
2543     RetOps.push_back(Flag);
2544 
2545   // CPUs which aren't M-class use a special sequence to return from
2546   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
2547   // though we use "subs pc, lr, #N").
2548   //
2549   // M-class CPUs actually use a normal return sequence with a special
2550   // (hardware-provided) value in LR, so the normal code path works.
2551   if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
2552       !Subtarget->isMClass()) {
2553     if (Subtarget->isThumb1Only())
2554       report_fatal_error("interrupt attribute is not supported in Thumb1");
2555     return LowerInterruptReturn(RetOps, dl, DAG);
2556   }
2557 
2558   return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
2559 }
2560 
2561 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2562   if (N->getNumValues() != 1)
2563     return false;
2564   if (!N->hasNUsesOfValue(1, 0))
2565     return false;
2566 
2567   SDValue TCChain = Chain;
2568   SDNode *Copy = *N->use_begin();
2569   if (Copy->getOpcode() == ISD::CopyToReg) {
2570     // If the copy has a glue operand, we conservatively assume it isn't safe to
2571     // perform a tail call.
2572     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2573       return false;
2574     TCChain = Copy->getOperand(0);
2575   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
2576     SDNode *VMov = Copy;
2577     // f64 returned in a pair of GPRs.
2578     SmallPtrSet<SDNode*, 2> Copies;
2579     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2580          UI != UE; ++UI) {
2581       if (UI->getOpcode() != ISD::CopyToReg)
2582         return false;
2583       Copies.insert(*UI);
2584     }
2585     if (Copies.size() > 2)
2586       return false;
2587 
2588     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2589          UI != UE; ++UI) {
2590       SDValue UseChain = UI->getOperand(0);
2591       if (Copies.count(UseChain.getNode()))
2592         // Second CopyToReg
2593         Copy = *UI;
2594       else {
2595         // We are at the top of this chain.
2596         // If the copy has a glue operand, we conservatively assume it
2597         // isn't safe to perform a tail call.
2598         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
2599           return false;
2600         // First CopyToReg
2601         TCChain = UseChain;
2602       }
2603     }
2604   } else if (Copy->getOpcode() == ISD::BITCAST) {
2605     // f32 returned in a single GPR.
2606     if (!Copy->hasOneUse())
2607       return false;
2608     Copy = *Copy->use_begin();
2609     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
2610       return false;
2611     // If the copy has a glue operand, we conservatively assume it isn't safe to
2612     // perform a tail call.
2613     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2614       return false;
2615     TCChain = Copy->getOperand(0);
2616   } else {
2617     return false;
2618   }
2619 
2620   bool HasRet = false;
2621   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2622        UI != UE; ++UI) {
2623     if (UI->getOpcode() != ARMISD::RET_FLAG &&
2624         UI->getOpcode() != ARMISD::INTRET_FLAG)
2625       return false;
2626     HasRet = true;
2627   }
2628 
2629   if (!HasRet)
2630     return false;
2631 
2632   Chain = TCChain;
2633   return true;
2634 }
2635 
2636 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2637   if (!Subtarget->supportsTailCall())
2638     return false;
2639 
2640   auto Attr =
2641       CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2642   if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2643     return false;
2644 
2645   return true;
2646 }
2647 
2648 // Trying to write a 64 bit value so need to split into two 32 bit values first,
2649 // and pass the lower and high parts through.
2650 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
2651   SDLoc DL(Op);
2652   SDValue WriteValue = Op->getOperand(2);
2653 
2654   // This function is only supposed to be called for i64 type argument.
2655   assert(WriteValue.getValueType() == MVT::i64
2656           && "LowerWRITE_REGISTER called for non-i64 type argument.");
2657 
2658   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2659                            DAG.getConstant(0, DL, MVT::i32));
2660   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2661                            DAG.getConstant(1, DL, MVT::i32));
2662   SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
2663   return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
2664 }
2665 
2666 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2667 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
2668 // one of the above mentioned nodes. It has to be wrapped because otherwise
2669 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2670 // be used to form addressing mode. These wrapped nodes will be selected
2671 // into MOVi.
2672 SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
2673                                              SelectionDAG &DAG) const {
2674   EVT PtrVT = Op.getValueType();
2675   // FIXME there is no actual debug info here
2676   SDLoc dl(Op);
2677   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2678   SDValue Res;
2679 
2680   // When generating execute-only code Constant Pools must be promoted to the
2681   // global data section. It's a bit ugly that we can't share them across basic
2682   // blocks, but this way we guarantee that execute-only behaves correct with
2683   // position-independent addressing modes.
2684   if (Subtarget->genExecuteOnly()) {
2685     auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
2686     auto T = const_cast<Type*>(CP->getType());
2687     auto C = const_cast<Constant*>(CP->getConstVal());
2688     auto M = const_cast<Module*>(DAG.getMachineFunction().
2689                                  getFunction()->getParent());
2690     auto GV = new GlobalVariable(
2691                     *M, T, /*isConst=*/true, GlobalVariable::InternalLinkage, C,
2692                     Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
2693                     Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" +
2694                     Twine(AFI->createPICLabelUId())
2695                   );
2696     SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV),
2697                                             dl, PtrVT);
2698     return LowerGlobalAddress(GA, DAG);
2699   }
2700 
2701   if (CP->isMachineConstantPoolEntry())
2702     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
2703                                     CP->getAlignment());
2704   else
2705     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
2706                                     CP->getAlignment());
2707   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
2708 }
2709 
2710 unsigned ARMTargetLowering::getJumpTableEncoding() const {
2711   return MachineJumpTableInfo::EK_Inline;
2712 }
2713 
2714 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
2715                                              SelectionDAG &DAG) const {
2716   MachineFunction &MF = DAG.getMachineFunction();
2717   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2718   unsigned ARMPCLabelIndex = 0;
2719   SDLoc DL(Op);
2720   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2721   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2722   SDValue CPAddr;
2723   bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
2724   if (!IsPositionIndependent) {
2725     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
2726   } else {
2727     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2728     ARMPCLabelIndex = AFI->createPICLabelUId();
2729     ARMConstantPoolValue *CPV =
2730       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
2731                                       ARMCP::CPBlockAddress, PCAdj);
2732     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2733   }
2734   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
2735   SDValue Result = DAG.getLoad(
2736       PtrVT, DL, DAG.getEntryNode(), CPAddr,
2737       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2738   if (!IsPositionIndependent)
2739     return Result;
2740   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
2741   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
2742 }
2743 
2744 /// \brief Convert a TLS address reference into the correct sequence of loads
2745 /// and calls to compute the variable's address for Darwin, and return an
2746 /// SDValue containing the final node.
2747 
2748 /// Darwin only has one TLS scheme which must be capable of dealing with the
2749 /// fully general situation, in the worst case. This means:
2750 ///     + "extern __thread" declaration.
2751 ///     + Defined in a possibly unknown dynamic library.
2752 ///
2753 /// The general system is that each __thread variable has a [3 x i32] descriptor
2754 /// which contains information used by the runtime to calculate the address. The
2755 /// only part of this the compiler needs to know about is the first word, which
2756 /// contains a function pointer that must be called with the address of the
2757 /// entire descriptor in "r0".
2758 ///
2759 /// Since this descriptor may be in a different unit, in general access must
2760 /// proceed along the usual ARM rules. A common sequence to produce is:
2761 ///
2762 ///     movw rT1, :lower16:_var$non_lazy_ptr
2763 ///     movt rT1, :upper16:_var$non_lazy_ptr
2764 ///     ldr r0, [rT1]
2765 ///     ldr rT2, [r0]
2766 ///     blx rT2
2767 ///     [...address now in r0...]
2768 SDValue
2769 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
2770                                                SelectionDAG &DAG) const {
2771   assert(Subtarget->isTargetDarwin() && "TLS only supported on Darwin");
2772   SDLoc DL(Op);
2773 
2774   // First step is to get the address of the actua global symbol. This is where
2775   // the TLS descriptor lives.
2776   SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
2777 
2778   // The first entry in the descriptor is a function pointer that we must call
2779   // to obtain the address of the variable.
2780   SDValue Chain = DAG.getEntryNode();
2781   SDValue FuncTLVGet = DAG.getLoad(
2782       MVT::i32, DL, Chain, DescAddr,
2783       MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2784       /* Alignment = */ 4,
2785       MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
2786           MachineMemOperand::MOInvariant);
2787   Chain = FuncTLVGet.getValue(1);
2788 
2789   MachineFunction &F = DAG.getMachineFunction();
2790   MachineFrameInfo &MFI = F.getFrameInfo();
2791   MFI.setAdjustsStack(true);
2792 
2793   // TLS calls preserve all registers except those that absolutely must be
2794   // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
2795   // silly).
2796   auto TRI =
2797       getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo();
2798   auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
2799   const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
2800 
2801   // Finally, we can make the call. This is just a degenerate version of a
2802   // normal AArch64 call node: r0 takes the address of the descriptor, and
2803   // returns the address of the variable in this thread.
2804   Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
2805   Chain =
2806       DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
2807                   Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
2808                   DAG.getRegisterMask(Mask), Chain.getValue(1));
2809   return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
2810 }
2811 
2812 SDValue
2813 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
2814                                                 SelectionDAG &DAG) const {
2815   assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
2816 
2817   SDValue Chain = DAG.getEntryNode();
2818   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2819   SDLoc DL(Op);
2820 
2821   // Load the current TEB (thread environment block)
2822   SDValue Ops[] = {Chain,
2823                    DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
2824                    DAG.getConstant(15, DL, MVT::i32),
2825                    DAG.getConstant(0, DL, MVT::i32),
2826                    DAG.getConstant(13, DL, MVT::i32),
2827                    DAG.getConstant(0, DL, MVT::i32),
2828                    DAG.getConstant(2, DL, MVT::i32)};
2829   SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
2830                                    DAG.getVTList(MVT::i32, MVT::Other), Ops);
2831 
2832   SDValue TEB = CurrentTEB.getValue(0);
2833   Chain = CurrentTEB.getValue(1);
2834 
2835   // Load the ThreadLocalStoragePointer from the TEB
2836   // A pointer to the TLS array is located at offset 0x2c from the TEB.
2837   SDValue TLSArray =
2838       DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
2839   TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
2840 
2841   // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
2842   // offset into the TLSArray.
2843 
2844   // Load the TLS index from the C runtime
2845   SDValue TLSIndex =
2846       DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
2847   TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
2848   TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
2849 
2850   SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
2851                               DAG.getConstant(2, DL, MVT::i32));
2852   SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
2853                             DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
2854                             MachinePointerInfo());
2855 
2856   // Get the offset of the start of the .tls section (section base)
2857   const auto *GA = cast<GlobalAddressSDNode>(Op);
2858   auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
2859   SDValue Offset = DAG.getLoad(
2860       PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
2861                                     DAG.getTargetConstantPool(CPV, PtrVT, 4)),
2862       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2863 
2864   return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
2865 }
2866 
2867 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
2868 SDValue
2869 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
2870                                                  SelectionDAG &DAG) const {
2871   SDLoc dl(GA);
2872   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2873   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2874   MachineFunction &MF = DAG.getMachineFunction();
2875   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2876   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2877   ARMConstantPoolValue *CPV =
2878     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2879                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
2880   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2881   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
2882   Argument = DAG.getLoad(
2883       PtrVT, dl, DAG.getEntryNode(), Argument,
2884       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2885   SDValue Chain = Argument.getValue(1);
2886 
2887   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2888   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
2889 
2890   // call __tls_get_addr.
2891   ArgListTy Args;
2892   ArgListEntry Entry;
2893   Entry.Node = Argument;
2894   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
2895   Args.push_back(Entry);
2896 
2897   // FIXME: is there useful debug info available here?
2898   TargetLowering::CallLoweringInfo CLI(DAG);
2899   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
2900       CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
2901       DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
2902 
2903   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2904   return CallResult.first;
2905 }
2906 
2907 // Lower ISD::GlobalTLSAddress using the "initial exec" or
2908 // "local exec" model.
2909 SDValue
2910 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
2911                                         SelectionDAG &DAG,
2912                                         TLSModel::Model model) const {
2913   const GlobalValue *GV = GA->getGlobal();
2914   SDLoc dl(GA);
2915   SDValue Offset;
2916   SDValue Chain = DAG.getEntryNode();
2917   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2918   // Get the Thread Pointer
2919   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
2920 
2921   if (model == TLSModel::InitialExec) {
2922     MachineFunction &MF = DAG.getMachineFunction();
2923     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2924     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2925     // Initial exec model.
2926     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2927     ARMConstantPoolValue *CPV =
2928       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2929                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
2930                                       true);
2931     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2932     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2933     Offset = DAG.getLoad(
2934         PtrVT, dl, Chain, Offset,
2935         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2936     Chain = Offset.getValue(1);
2937 
2938     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2939     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
2940 
2941     Offset = DAG.getLoad(
2942         PtrVT, dl, Chain, Offset,
2943         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2944   } else {
2945     // local exec model
2946     assert(model == TLSModel::LocalExec);
2947     ARMConstantPoolValue *CPV =
2948       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
2949     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2950     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2951     Offset = DAG.getLoad(
2952         PtrVT, dl, Chain, Offset,
2953         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2954   }
2955 
2956   // The address of the thread local variable is the add of the thread
2957   // pointer with the offset of the variable.
2958   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
2959 }
2960 
2961 SDValue
2962 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
2963   if (Subtarget->isTargetDarwin())
2964     return LowerGlobalTLSAddressDarwin(Op, DAG);
2965 
2966   if (Subtarget->isTargetWindows())
2967     return LowerGlobalTLSAddressWindows(Op, DAG);
2968 
2969   // TODO: implement the "local dynamic" model
2970   assert(Subtarget->isTargetELF() && "Only ELF implemented here");
2971   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2972   if (DAG.getTarget().Options.EmulatedTLS)
2973     return LowerToTLSEmulatedModel(GA, DAG);
2974 
2975   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
2976 
2977   switch (model) {
2978     case TLSModel::GeneralDynamic:
2979     case TLSModel::LocalDynamic:
2980       return LowerToTLSGeneralDynamicModel(GA, DAG);
2981     case TLSModel::InitialExec:
2982     case TLSModel::LocalExec:
2983       return LowerToTLSExecModels(GA, DAG, model);
2984   }
2985   llvm_unreachable("bogus TLS model");
2986 }
2987 
2988 /// Return true if all users of V are within function F, looking through
2989 /// ConstantExprs.
2990 static bool allUsersAreInFunction(const Value *V, const Function *F) {
2991   SmallVector<const User*,4> Worklist;
2992   for (auto *U : V->users())
2993     Worklist.push_back(U);
2994   while (!Worklist.empty()) {
2995     auto *U = Worklist.pop_back_val();
2996     if (isa<ConstantExpr>(U)) {
2997       for (auto *UU : U->users())
2998         Worklist.push_back(UU);
2999       continue;
3000     }
3001 
3002     auto *I = dyn_cast<Instruction>(U);
3003     if (!I || I->getParent()->getParent() != F)
3004       return false;
3005   }
3006   return true;
3007 }
3008 
3009 /// Return true if all users of V are within some (any) function, looking through
3010 /// ConstantExprs. In other words, are there any global constant users?
3011 static bool allUsersAreInFunctions(const Value *V) {
3012   SmallVector<const User*,4> Worklist;
3013   for (auto *U : V->users())
3014     Worklist.push_back(U);
3015   while (!Worklist.empty()) {
3016     auto *U = Worklist.pop_back_val();
3017     if (isa<ConstantExpr>(U)) {
3018       for (auto *UU : U->users())
3019         Worklist.push_back(UU);
3020       continue;
3021     }
3022 
3023     if (!isa<Instruction>(U))
3024       return false;
3025   }
3026   return true;
3027 }
3028 
3029 // Return true if T is an integer, float or an array/vector of either.
3030 static bool isSimpleType(Type *T) {
3031   if (T->isIntegerTy() || T->isFloatingPointTy())
3032     return true;
3033   Type *SubT = nullptr;
3034   if (T->isArrayTy())
3035     SubT = T->getArrayElementType();
3036   else if (T->isVectorTy())
3037     SubT = T->getVectorElementType();
3038   else
3039     return false;
3040   return SubT->isIntegerTy() || SubT->isFloatingPointTy();
3041 }
3042 
3043 static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG,
3044                                      EVT PtrVT, const SDLoc &dl) {
3045   // If we're creating a pool entry for a constant global with unnamed address,
3046   // and the global is small enough, we can emit it inline into the constant pool
3047   // to save ourselves an indirection.
3048   //
3049   // This is a win if the constant is only used in one function (so it doesn't
3050   // need to be duplicated) or duplicating the constant wouldn't increase code
3051   // size (implying the constant is no larger than 4 bytes).
3052   const Function *F = DAG.getMachineFunction().getFunction();
3053 
3054   // We rely on this decision to inline being idemopotent and unrelated to the
3055   // use-site. We know that if we inline a variable at one use site, we'll
3056   // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3057   // doesn't know about this optimization, so bail out if it's enabled else
3058   // we could decide to inline here (and thus never emit the GV) but require
3059   // the GV from fast-isel generated code.
3060   if (!EnableConstpoolPromotion ||
3061       DAG.getMachineFunction().getTarget().Options.EnableFastISel)
3062       return SDValue();
3063 
3064   auto *GVar = dyn_cast<GlobalVariable>(GV);
3065   if (!GVar || !GVar->hasInitializer() ||
3066       !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3067       !GVar->hasLocalLinkage())
3068     return SDValue();
3069 
3070   // Ensure that we don't try and inline any type that contains pointers. If
3071   // we inline a value that contains relocations, we move the relocations from
3072   // .data to .text which is not ideal.
3073   auto *Init = GVar->getInitializer();
3074   if (!isSimpleType(Init->getType()))
3075     return SDValue();
3076 
3077   // The constant islands pass can only really deal with alignment requests
3078   // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3079   // any type wanting greater alignment requirements than 4 bytes. We also
3080   // can only promote constants that are multiples of 4 bytes in size or
3081   // are paddable to a multiple of 4. Currently we only try and pad constants
3082   // that are strings for simplicity.
3083   auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
3084   unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
3085   unsigned Align = GVar->getAlignment();
3086   unsigned RequiredPadding = 4 - (Size % 4);
3087   bool PaddingPossible =
3088     RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3089   if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize ||
3090       Size == 0)
3091     return SDValue();
3092 
3093   unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3094   MachineFunction &MF = DAG.getMachineFunction();
3095   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3096 
3097   // We can't bloat the constant pool too much, else the ConstantIslands pass
3098   // may fail to converge. If we haven't promoted this global yet (it may have
3099   // multiple uses), and promoting it would increase the constant pool size (Sz
3100   // > 4), ensure we have space to do so up to MaxTotal.
3101   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
3102     if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
3103         ConstpoolPromotionMaxTotal)
3104       return SDValue();
3105 
3106   // This is only valid if all users are in a single function OR it has users
3107   // in multiple functions but it no larger than a pointer. We also check if
3108   // GVar has constant (non-ConstantExpr) users. If so, it essentially has its
3109   // address taken.
3110   if (!allUsersAreInFunction(GVar, F) &&
3111       !(Size <= 4 && allUsersAreInFunctions(GVar)))
3112     return SDValue();
3113 
3114   // We're going to inline this global. Pad it out if needed.
3115   if (RequiredPadding != 4) {
3116     StringRef S = CDAInit->getAsString();
3117 
3118     SmallVector<uint8_t,16> V(S.size());
3119     std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
3120     while (RequiredPadding--)
3121       V.push_back(0);
3122     Init = ConstantDataArray::get(*DAG.getContext(), V);
3123   }
3124 
3125   auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
3126   SDValue CPAddr =
3127     DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4);
3128   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
3129     AFI->markGlobalAsPromotedToConstantPool(GVar);
3130     AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
3131                                       PaddedSize - 4);
3132   }
3133   ++NumConstpoolPromoted;
3134   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3135 }
3136 
3137 static bool isReadOnly(const GlobalValue *GV) {
3138   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3139     GV = GA->getBaseObject();
3140   return (isa<GlobalVariable>(GV) && cast<GlobalVariable>(GV)->isConstant()) ||
3141          isa<Function>(GV);
3142 }
3143 
3144 SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op,
3145                                               SelectionDAG &DAG) const {
3146   switch (Subtarget->getTargetTriple().getObjectFormat()) {
3147   default: llvm_unreachable("unknown object format");
3148   case Triple::COFF:
3149     return LowerGlobalAddressWindows(Op, DAG);
3150   case Triple::ELF:
3151     return LowerGlobalAddressELF(Op, DAG);
3152   case Triple::MachO:
3153     return LowerGlobalAddressDarwin(Op, DAG);
3154   }
3155 }
3156 
3157 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
3158                                                  SelectionDAG &DAG) const {
3159   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3160   SDLoc dl(Op);
3161   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3162   const TargetMachine &TM = getTargetMachine();
3163   bool IsRO = isReadOnly(GV);
3164 
3165   // promoteToConstantPool only if not generating XO text section
3166   if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
3167     if (SDValue V = promoteToConstantPool(GV, DAG, PtrVT, dl))
3168       return V;
3169 
3170   if (isPositionIndependent()) {
3171     bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
3172 
3173     MachineFunction &MF = DAG.getMachineFunction();
3174     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3175     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3176     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3177     SDLoc dl(Op);
3178     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3179     ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
3180         GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj,
3181         UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier,
3182         /*AddCurrentAddress=*/UseGOT_PREL);
3183     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3184     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3185     SDValue Result = DAG.getLoad(
3186         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3187         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3188     SDValue Chain = Result.getValue(1);
3189     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3190     Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3191     if (UseGOT_PREL)
3192       Result =
3193           DAG.getLoad(PtrVT, dl, Chain, Result,
3194                       MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3195     return Result;
3196   } else if (Subtarget->isROPI() && IsRO) {
3197     // PC-relative.
3198     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
3199     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3200     return Result;
3201   } else if (Subtarget->isRWPI() && !IsRO) {
3202     // SB-relative.
3203     SDValue RelAddr;
3204     if (Subtarget->useMovt(DAG.getMachineFunction())) {
3205       ++NumMovwMovt;
3206       SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL);
3207       RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G);
3208     } else { // use literal pool for address constant
3209       ARMConstantPoolValue *CPV =
3210         ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
3211       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3212       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3213       RelAddr = DAG.getLoad(
3214           PtrVT, dl, DAG.getEntryNode(), CPAddr,
3215           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3216     }
3217     SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
3218     SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr);
3219     return Result;
3220   }
3221 
3222   // If we have T2 ops, we can materialize the address directly via movt/movw
3223   // pair. This is always cheaper.
3224   if (Subtarget->useMovt(DAG.getMachineFunction())) {
3225     ++NumMovwMovt;
3226     // FIXME: Once remat is capable of dealing with instructions with register
3227     // operands, expand this into two nodes.
3228     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
3229                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
3230   } else {
3231     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
3232     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3233     return DAG.getLoad(
3234         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3235         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3236   }
3237 }
3238 
3239 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
3240                                                     SelectionDAG &DAG) const {
3241   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3242          "ROPI/RWPI not currently supported for Darwin");
3243   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3244   SDLoc dl(Op);
3245   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3246 
3247   if (Subtarget->useMovt(DAG.getMachineFunction()))
3248     ++NumMovwMovt;
3249 
3250   // FIXME: Once remat is capable of dealing with instructions with register
3251   // operands, expand this into multiple nodes
3252   unsigned Wrapper =
3253       isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
3254 
3255   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
3256   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
3257 
3258   if (Subtarget->isGVIndirectSymbol(GV))
3259     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3260                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3261   return Result;
3262 }
3263 
3264 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
3265                                                      SelectionDAG &DAG) const {
3266   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
3267   assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
3268          "Windows on ARM expects to use movw/movt");
3269   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3270          "ROPI/RWPI not currently supported for Windows");
3271 
3272   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3273   const ARMII::TOF TargetFlags =
3274     (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
3275   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3276   SDValue Result;
3277   SDLoc DL(Op);
3278 
3279   ++NumMovwMovt;
3280 
3281   // FIXME: Once remat is capable of dealing with instructions with register
3282   // operands, expand this into two nodes.
3283   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
3284                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
3285                                                   TargetFlags));
3286   if (GV->hasDLLImportStorageClass())
3287     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3288                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3289   return Result;
3290 }
3291 
3292 SDValue
3293 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
3294   SDLoc dl(Op);
3295   SDValue Val = DAG.getConstant(0, dl, MVT::i32);
3296   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
3297                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
3298                      Op.getOperand(1), Val);
3299 }
3300 
3301 SDValue
3302 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
3303   SDLoc dl(Op);
3304   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
3305                      Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
3306 }
3307 
3308 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
3309                                                       SelectionDAG &DAG) const {
3310   SDLoc dl(Op);
3311   return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
3312                      Op.getOperand(0));
3313 }
3314 
3315 SDValue
3316 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
3317                                           const ARMSubtarget *Subtarget) const {
3318   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3319   SDLoc dl(Op);
3320   switch (IntNo) {
3321   default: return SDValue();    // Don't custom lower most intrinsics.
3322   case Intrinsic::thread_pointer: {
3323     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3324     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3325   }
3326   case Intrinsic::eh_sjlj_lsda: {
3327     MachineFunction &MF = DAG.getMachineFunction();
3328     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3329     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3330     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3331     SDValue CPAddr;
3332     bool IsPositionIndependent = isPositionIndependent();
3333     unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
3334     ARMConstantPoolValue *CPV =
3335       ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
3336                                       ARMCP::CPLSDA, PCAdj);
3337     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3338     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3339     SDValue Result = DAG.getLoad(
3340         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3341         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3342 
3343     if (IsPositionIndependent) {
3344       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3345       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3346     }
3347     return Result;
3348   }
3349   case Intrinsic::arm_neon_vabs:
3350     return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
3351                         Op.getOperand(1));
3352   case Intrinsic::arm_neon_vmulls:
3353   case Intrinsic::arm_neon_vmullu: {
3354     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3355       ? ARMISD::VMULLs : ARMISD::VMULLu;
3356     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3357                        Op.getOperand(1), Op.getOperand(2));
3358   }
3359   case Intrinsic::arm_neon_vminnm:
3360   case Intrinsic::arm_neon_vmaxnm: {
3361     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3362       ? ISD::FMINNUM : ISD::FMAXNUM;
3363     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3364                        Op.getOperand(1), Op.getOperand(2));
3365   }
3366   case Intrinsic::arm_neon_vminu:
3367   case Intrinsic::arm_neon_vmaxu: {
3368     if (Op.getValueType().isFloatingPoint())
3369       return SDValue();
3370     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3371       ? ISD::UMIN : ISD::UMAX;
3372     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3373                          Op.getOperand(1), Op.getOperand(2));
3374   }
3375   case Intrinsic::arm_neon_vmins:
3376   case Intrinsic::arm_neon_vmaxs: {
3377     // v{min,max}s is overloaded between signed integers and floats.
3378     if (!Op.getValueType().isFloatingPoint()) {
3379       unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3380         ? ISD::SMIN : ISD::SMAX;
3381       return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3382                          Op.getOperand(1), Op.getOperand(2));
3383     }
3384     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3385       ? ISD::FMINNAN : ISD::FMAXNAN;
3386     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3387                        Op.getOperand(1), Op.getOperand(2));
3388   }
3389   case Intrinsic::arm_neon_vtbl1:
3390     return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(),
3391                        Op.getOperand(1), Op.getOperand(2));
3392   case Intrinsic::arm_neon_vtbl2:
3393     return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(),
3394                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3395   }
3396 }
3397 
3398 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
3399                                  const ARMSubtarget *Subtarget) {
3400   SDLoc dl(Op);
3401   ConstantSDNode *ScopeN = cast<ConstantSDNode>(Op.getOperand(2));
3402   auto Scope = static_cast<SynchronizationScope>(ScopeN->getZExtValue());
3403   if (Scope == SynchronizationScope::SingleThread)
3404     return Op;
3405 
3406   if (!Subtarget->hasDataBarrier()) {
3407     // Some ARMv6 cpus can support data barriers with an mcr instruction.
3408     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
3409     // here.
3410     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
3411            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3412     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
3413                        DAG.getConstant(0, dl, MVT::i32));
3414   }
3415 
3416   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
3417   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
3418   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
3419   if (Subtarget->isMClass()) {
3420     // Only a full system barrier exists in the M-class architectures.
3421     Domain = ARM_MB::SY;
3422   } else if (Subtarget->preferISHSTBarriers() &&
3423              Ord == AtomicOrdering::Release) {
3424     // Swift happens to implement ISHST barriers in a way that's compatible with
3425     // Release semantics but weaker than ISH so we'd be fools not to use
3426     // it. Beware: other processors probably don't!
3427     Domain = ARM_MB::ISHST;
3428   }
3429 
3430   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
3431                      DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
3432                      DAG.getConstant(Domain, dl, MVT::i32));
3433 }
3434 
3435 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
3436                              const ARMSubtarget *Subtarget) {
3437   // ARM pre v5TE and Thumb1 does not have preload instructions.
3438   if (!(Subtarget->isThumb2() ||
3439         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
3440     // Just preserve the chain.
3441     return Op.getOperand(0);
3442 
3443   SDLoc dl(Op);
3444   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
3445   if (!isRead &&
3446       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
3447     // ARMv7 with MP extension has PLDW.
3448     return Op.getOperand(0);
3449 
3450   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3451   if (Subtarget->isThumb()) {
3452     // Invert the bits.
3453     isRead = ~isRead & 1;
3454     isData = ~isData & 1;
3455   }
3456 
3457   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
3458                      Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
3459                      DAG.getConstant(isData, dl, MVT::i32));
3460 }
3461 
3462 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
3463   MachineFunction &MF = DAG.getMachineFunction();
3464   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
3465 
3466   // vastart just stores the address of the VarArgsFrameIndex slot into the
3467   // memory location argument.
3468   SDLoc dl(Op);
3469   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
3470   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3471   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3472   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3473                       MachinePointerInfo(SV));
3474 }
3475 
3476 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
3477                                                 CCValAssign &NextVA,
3478                                                 SDValue &Root,
3479                                                 SelectionDAG &DAG,
3480                                                 const SDLoc &dl) const {
3481   MachineFunction &MF = DAG.getMachineFunction();
3482   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3483 
3484   const TargetRegisterClass *RC;
3485   if (AFI->isThumb1OnlyFunction())
3486     RC = &ARM::tGPRRegClass;
3487   else
3488     RC = &ARM::GPRRegClass;
3489 
3490   // Transform the arguments stored in physical registers into virtual ones.
3491   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3492   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3493 
3494   SDValue ArgValue2;
3495   if (NextVA.isMemLoc()) {
3496     MachineFrameInfo &MFI = MF.getFrameInfo();
3497     int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);
3498 
3499     // Create load node to retrieve arguments from the stack.
3500     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3501     ArgValue2 = DAG.getLoad(
3502         MVT::i32, dl, Root, FIN,
3503         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3504   } else {
3505     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3506     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3507   }
3508   if (!Subtarget->isLittle())
3509     std::swap (ArgValue, ArgValue2);
3510   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
3511 }
3512 
3513 // The remaining GPRs hold either the beginning of variable-argument
3514 // data, or the beginning of an aggregate passed by value (usually
3515 // byval).  Either way, we allocate stack slots adjacent to the data
3516 // provided by our caller, and store the unallocated registers there.
3517 // If this is a variadic function, the va_list pointer will begin with
3518 // these values; otherwise, this reassembles a (byval) structure that
3519 // was split between registers and memory.
3520 // Return: The frame index registers were stored into.
3521 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
3522                                       const SDLoc &dl, SDValue &Chain,
3523                                       const Value *OrigArg,
3524                                       unsigned InRegsParamRecordIdx,
3525                                       int ArgOffset, unsigned ArgSize) const {
3526   // Currently, two use-cases possible:
3527   // Case #1. Non-var-args function, and we meet first byval parameter.
3528   //          Setup first unallocated register as first byval register;
3529   //          eat all remained registers
3530   //          (these two actions are performed by HandleByVal method).
3531   //          Then, here, we initialize stack frame with
3532   //          "store-reg" instructions.
3533   // Case #2. Var-args function, that doesn't contain byval parameters.
3534   //          The same: eat all remained unallocated registers,
3535   //          initialize stack frame.
3536 
3537   MachineFunction &MF = DAG.getMachineFunction();
3538   MachineFrameInfo &MFI = MF.getFrameInfo();
3539   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3540   unsigned RBegin, REnd;
3541   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
3542     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
3543   } else {
3544     unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3545     RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
3546     REnd = ARM::R4;
3547   }
3548 
3549   if (REnd != RBegin)
3550     ArgOffset = -4 * (ARM::R4 - RBegin);
3551 
3552   auto PtrVT = getPointerTy(DAG.getDataLayout());
3553   int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
3554   SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);
3555 
3556   SmallVector<SDValue, 4> MemOps;
3557   const TargetRegisterClass *RC =
3558       AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
3559 
3560   for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
3561     unsigned VReg = MF.addLiveIn(Reg, RC);
3562     SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
3563     SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3564                                  MachinePointerInfo(OrigArg, 4 * i));
3565     MemOps.push_back(Store);
3566     FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
3567   }
3568 
3569   if (!MemOps.empty())
3570     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3571   return FrameIndex;
3572 }
3573 
3574 // Setup stack frame, the va_list pointer will start from.
3575 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
3576                                              const SDLoc &dl, SDValue &Chain,
3577                                              unsigned ArgOffset,
3578                                              unsigned TotalArgRegsSaveSize,
3579                                              bool ForceMutable) const {
3580   MachineFunction &MF = DAG.getMachineFunction();
3581   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3582 
3583   // Try to store any remaining integer argument regs
3584   // to their spots on the stack so that they may be loaded by dereferencing
3585   // the result of va_next.
3586   // If there is no regs to be stored, just point address after last
3587   // argument passed via stack.
3588   int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
3589                                   CCInfo.getInRegsParamsCount(),
3590                                   CCInfo.getNextStackOffset(), 4);
3591   AFI->setVarArgsFrameIndex(FrameIndex);
3592 }
3593 
3594 SDValue ARMTargetLowering::LowerFormalArguments(
3595     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3596     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3597     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3598   MachineFunction &MF = DAG.getMachineFunction();
3599   MachineFrameInfo &MFI = MF.getFrameInfo();
3600 
3601   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3602 
3603   // Assign locations to all of the incoming arguments.
3604   SmallVector<CCValAssign, 16> ArgLocs;
3605   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3606                  *DAG.getContext());
3607   CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
3608 
3609   SmallVector<SDValue, 16> ArgValues;
3610   SDValue ArgValue;
3611   Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
3612   unsigned CurArgIdx = 0;
3613 
3614   // Initially ArgRegsSaveSize is zero.
3615   // Then we increase this value each time we meet byval parameter.
3616   // We also increase this value in case of varargs function.
3617   AFI->setArgRegsSaveSize(0);
3618 
3619   // Calculate the amount of stack space that we need to allocate to store
3620   // byval and variadic arguments that are passed in registers.
3621   // We need to know this before we allocate the first byval or variadic
3622   // argument, as they will be allocated a stack slot below the CFA (Canonical
3623   // Frame Address, the stack pointer at entry to the function).
3624   unsigned ArgRegBegin = ARM::R4;
3625   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3626     if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
3627       break;
3628 
3629     CCValAssign &VA = ArgLocs[i];
3630     unsigned Index = VA.getValNo();
3631     ISD::ArgFlagsTy Flags = Ins[Index].Flags;
3632     if (!Flags.isByVal())
3633       continue;
3634 
3635     assert(VA.isMemLoc() && "unexpected byval pointer in reg");
3636     unsigned RBegin, REnd;
3637     CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
3638     ArgRegBegin = std::min(ArgRegBegin, RBegin);
3639 
3640     CCInfo.nextInRegsParam();
3641   }
3642   CCInfo.rewindByValRegsInfo();
3643 
3644   int lastInsIndex = -1;
3645   if (isVarArg && MFI.hasVAStart()) {
3646     unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3647     if (RegIdx != array_lengthof(GPRArgRegs))
3648       ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
3649   }
3650 
3651   unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
3652   AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
3653   auto PtrVT = getPointerTy(DAG.getDataLayout());
3654 
3655   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3656     CCValAssign &VA = ArgLocs[i];
3657     if (Ins[VA.getValNo()].isOrigArg()) {
3658       std::advance(CurOrigArg,
3659                    Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
3660       CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
3661     }
3662     // Arguments stored in registers.
3663     if (VA.isRegLoc()) {
3664       EVT RegVT = VA.getLocVT();
3665 
3666       if (VA.needsCustom()) {
3667         // f64 and vector types are split up into multiple registers or
3668         // combinations of registers and stack slots.
3669         if (VA.getLocVT() == MVT::v2f64) {
3670           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3671                                                    Chain, DAG, dl);
3672           VA = ArgLocs[++i]; // skip ahead to next loc
3673           SDValue ArgValue2;
3674           if (VA.isMemLoc()) {
3675             int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
3676             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3677             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
3678                                     MachinePointerInfo::getFixedStack(
3679                                         DAG.getMachineFunction(), FI));
3680           } else {
3681             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3682                                              Chain, DAG, dl);
3683           }
3684           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
3685           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3686                                  ArgValue, ArgValue1,
3687                                  DAG.getIntPtrConstant(0, dl));
3688           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3689                                  ArgValue, ArgValue2,
3690                                  DAG.getIntPtrConstant(1, dl));
3691         } else
3692           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3693 
3694       } else {
3695         const TargetRegisterClass *RC;
3696 
3697         if (RegVT == MVT::f32)
3698           RC = &ARM::SPRRegClass;
3699         else if (RegVT == MVT::f64)
3700           RC = &ARM::DPRRegClass;
3701         else if (RegVT == MVT::v2f64)
3702           RC = &ARM::QPRRegClass;
3703         else if (RegVT == MVT::i32)
3704           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
3705                                            : &ARM::GPRRegClass;
3706         else
3707           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
3708 
3709         // Transform the arguments in physical registers into virtual ones.
3710         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3711         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3712       }
3713 
3714       // If this is an 8 or 16-bit value, it is really passed promoted
3715       // to 32 bits.  Insert an assert[sz]ext to capture this, then
3716       // truncate to the right size.
3717       switch (VA.getLocInfo()) {
3718       default: llvm_unreachable("Unknown loc info!");
3719       case CCValAssign::Full: break;
3720       case CCValAssign::BCvt:
3721         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
3722         break;
3723       case CCValAssign::SExt:
3724         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3725                                DAG.getValueType(VA.getValVT()));
3726         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3727         break;
3728       case CCValAssign::ZExt:
3729         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3730                                DAG.getValueType(VA.getValVT()));
3731         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3732         break;
3733       }
3734 
3735       InVals.push_back(ArgValue);
3736 
3737     } else { // VA.isRegLoc()
3738       // sanity check
3739       assert(VA.isMemLoc());
3740       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
3741 
3742       int index = VA.getValNo();
3743 
3744       // Some Ins[] entries become multiple ArgLoc[] entries.
3745       // Process them only once.
3746       if (index != lastInsIndex)
3747         {
3748           ISD::ArgFlagsTy Flags = Ins[index].Flags;
3749           // FIXME: For now, all byval parameter objects are marked mutable.
3750           // This can be changed with more analysis.
3751           // In case of tail call optimization mark all arguments mutable.
3752           // Since they could be overwritten by lowering of arguments in case of
3753           // a tail call.
3754           if (Flags.isByVal()) {
3755             assert(Ins[index].isOrigArg() &&
3756                    "Byval arguments cannot be implicit");
3757             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
3758 
3759             int FrameIndex = StoreByValRegs(
3760                 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
3761                 VA.getLocMemOffset(), Flags.getByValSize());
3762             InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
3763             CCInfo.nextInRegsParam();
3764           } else {
3765             unsigned FIOffset = VA.getLocMemOffset();
3766             int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
3767                                            FIOffset, true);
3768 
3769             // Create load nodes to retrieve arguments from the stack.
3770             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3771             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
3772                                          MachinePointerInfo::getFixedStack(
3773                                              DAG.getMachineFunction(), FI)));
3774           }
3775           lastInsIndex = index;
3776         }
3777     }
3778   }
3779 
3780   // varargs
3781   if (isVarArg && MFI.hasVAStart())
3782     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3783                          CCInfo.getNextStackOffset(),
3784                          TotalArgRegsSaveSize);
3785 
3786   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
3787 
3788   return Chain;
3789 }
3790 
3791 /// isFloatingPointZero - Return true if this is +0.0.
3792 static bool isFloatingPointZero(SDValue Op) {
3793   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
3794     return CFP->getValueAPF().isPosZero();
3795   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
3796     // Maybe this has already been legalized into the constant pool?
3797     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
3798       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
3799       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
3800         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
3801           return CFP->getValueAPF().isPosZero();
3802     }
3803   } else if (Op->getOpcode() == ISD::BITCAST &&
3804              Op->getValueType(0) == MVT::f64) {
3805     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
3806     // created by LowerConstantFP().
3807     SDValue BitcastOp = Op->getOperand(0);
3808     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
3809         isNullConstant(BitcastOp->getOperand(0)))
3810       return true;
3811   }
3812   return false;
3813 }
3814 
3815 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
3816 /// the given operands.
3817 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3818                                      SDValue &ARMcc, SelectionDAG &DAG,
3819                                      const SDLoc &dl) const {
3820   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
3821     unsigned C = RHSC->getZExtValue();
3822     if (!isLegalICmpImmediate(C)) {
3823       // Constant does not fit, try adjusting it by one?
3824       switch (CC) {
3825       default: break;
3826       case ISD::SETLT:
3827       case ISD::SETGE:
3828         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
3829           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
3830           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
3831         }
3832         break;
3833       case ISD::SETULT:
3834       case ISD::SETUGE:
3835         if (C != 0 && isLegalICmpImmediate(C-1)) {
3836           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
3837           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
3838         }
3839         break;
3840       case ISD::SETLE:
3841       case ISD::SETGT:
3842         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
3843           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
3844           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
3845         }
3846         break;
3847       case ISD::SETULE:
3848       case ISD::SETUGT:
3849         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
3850           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3851           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
3852         }
3853         break;
3854       }
3855     }
3856   }
3857 
3858   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
3859   ARMISD::NodeType CompareType;
3860   switch (CondCode) {
3861   default:
3862     CompareType = ARMISD::CMP;
3863     break;
3864   case ARMCC::EQ:
3865   case ARMCC::NE:
3866     // Uses only Z Flag
3867     CompareType = ARMISD::CMPZ;
3868     break;
3869   }
3870   ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
3871   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
3872 }
3873 
3874 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
3875 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
3876                                      SelectionDAG &DAG, const SDLoc &dl,
3877                                      bool InvalidOnQNaN) const {
3878   assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
3879   SDValue Cmp;
3880   SDValue C = DAG.getConstant(InvalidOnQNaN, dl, MVT::i32);
3881   if (!isFloatingPointZero(RHS))
3882     Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS, C);
3883   else
3884     Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS, C);
3885   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
3886 }
3887 
3888 /// duplicateCmp - Glue values can have only one use, so this function
3889 /// duplicates a comparison node.
3890 SDValue
3891 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
3892   unsigned Opc = Cmp.getOpcode();
3893   SDLoc DL(Cmp);
3894   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
3895     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
3896 
3897   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
3898   Cmp = Cmp.getOperand(0);
3899   Opc = Cmp.getOpcode();
3900   if (Opc == ARMISD::CMPFP)
3901     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),
3902                       Cmp.getOperand(1), Cmp.getOperand(2));
3903   else {
3904     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
3905     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),
3906                       Cmp.getOperand(1));
3907   }
3908   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
3909 }
3910 
3911 std::pair<SDValue, SDValue>
3912 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
3913                                  SDValue &ARMcc) const {
3914   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
3915 
3916   SDValue Value, OverflowCmp;
3917   SDValue LHS = Op.getOperand(0);
3918   SDValue RHS = Op.getOperand(1);
3919   SDLoc dl(Op);
3920 
3921   // FIXME: We are currently always generating CMPs because we don't support
3922   // generating CMN through the backend. This is not as good as the natural
3923   // CMP case because it causes a register dependency and cannot be folded
3924   // later.
3925 
3926   switch (Op.getOpcode()) {
3927   default:
3928     llvm_unreachable("Unknown overflow instruction!");
3929   case ISD::SADDO:
3930     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
3931     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
3932     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
3933     break;
3934   case ISD::UADDO:
3935     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
3936     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
3937     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
3938     break;
3939   case ISD::SSUBO:
3940     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
3941     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
3942     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
3943     break;
3944   case ISD::USUBO:
3945     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
3946     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
3947     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
3948     break;
3949   } // switch (...)
3950 
3951   return std::make_pair(Value, OverflowCmp);
3952 }
3953 
3954 SDValue
3955 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
3956   // Let legalize expand this if it isn't a legal type yet.
3957   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
3958     return SDValue();
3959 
3960   SDValue Value, OverflowCmp;
3961   SDValue ARMcc;
3962   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
3963   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3964   SDLoc dl(Op);
3965   // We use 0 and 1 as false and true values.
3966   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3967   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3968   EVT VT = Op.getValueType();
3969 
3970   SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
3971                                  ARMcc, CCR, OverflowCmp);
3972 
3973   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
3974   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
3975 }
3976 
3977 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3978   SDValue Cond = Op.getOperand(0);
3979   SDValue SelectTrue = Op.getOperand(1);
3980   SDValue SelectFalse = Op.getOperand(2);
3981   SDLoc dl(Op);
3982   unsigned Opc = Cond.getOpcode();
3983 
3984   if (Cond.getResNo() == 1 &&
3985       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
3986        Opc == ISD::USUBO)) {
3987     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
3988       return SDValue();
3989 
3990     SDValue Value, OverflowCmp;
3991     SDValue ARMcc;
3992     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
3993     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3994     EVT VT = Op.getValueType();
3995 
3996     return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
3997                    OverflowCmp, DAG);
3998   }
3999 
4000   // Convert:
4001   //
4002   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
4003   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
4004   //
4005   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
4006     const ConstantSDNode *CMOVTrue =
4007       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
4008     const ConstantSDNode *CMOVFalse =
4009       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
4010 
4011     if (CMOVTrue && CMOVFalse) {
4012       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
4013       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
4014 
4015       SDValue True;
4016       SDValue False;
4017       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
4018         True = SelectTrue;
4019         False = SelectFalse;
4020       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4021         True = SelectFalse;
4022         False = SelectTrue;
4023       }
4024 
4025       if (True.getNode() && False.getNode()) {
4026         EVT VT = Op.getValueType();
4027         SDValue ARMcc = Cond.getOperand(2);
4028         SDValue CCR = Cond.getOperand(3);
4029         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
4030         assert(True.getValueType() == VT);
4031         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4032       }
4033     }
4034   }
4035 
4036   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
4037   // undefined bits before doing a full-word comparison with zero.
4038   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
4039                      DAG.getConstant(1, dl, Cond.getValueType()));
4040 
4041   return DAG.getSelectCC(dl, Cond,
4042                          DAG.getConstant(0, dl, Cond.getValueType()),
4043                          SelectTrue, SelectFalse, ISD::SETNE);
4044 }
4045 
4046 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
4047                                  bool &swpCmpOps, bool &swpVselOps) {
4048   // Start by selecting the GE condition code for opcodes that return true for
4049   // 'equality'
4050   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
4051       CC == ISD::SETULE)
4052     CondCode = ARMCC::GE;
4053 
4054   // and GT for opcodes that return false for 'equality'.
4055   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
4056            CC == ISD::SETULT)
4057     CondCode = ARMCC::GT;
4058 
4059   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
4060   // to swap the compare operands.
4061   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
4062       CC == ISD::SETULT)
4063     swpCmpOps = true;
4064 
4065   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
4066   // If we have an unordered opcode, we need to swap the operands to the VSEL
4067   // instruction (effectively negating the condition).
4068   //
4069   // This also has the effect of swapping which one of 'less' or 'greater'
4070   // returns true, so we also swap the compare operands. It also switches
4071   // whether we return true for 'equality', so we compensate by picking the
4072   // opposite condition code to our original choice.
4073   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
4074       CC == ISD::SETUGT) {
4075     swpCmpOps = !swpCmpOps;
4076     swpVselOps = !swpVselOps;
4077     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
4078   }
4079 
4080   // 'ordered' is 'anything but unordered', so use the VS condition code and
4081   // swap the VSEL operands.
4082   if (CC == ISD::SETO) {
4083     CondCode = ARMCC::VS;
4084     swpVselOps = true;
4085   }
4086 
4087   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
4088   // code and swap the VSEL operands.
4089   if (CC == ISD::SETUNE) {
4090     CondCode = ARMCC::EQ;
4091     swpVselOps = true;
4092   }
4093 }
4094 
4095 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
4096                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
4097                                    SDValue Cmp, SelectionDAG &DAG) const {
4098   if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
4099     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4100                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
4101     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4102                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
4103 
4104     SDValue TrueLow = TrueVal.getValue(0);
4105     SDValue TrueHigh = TrueVal.getValue(1);
4106     SDValue FalseLow = FalseVal.getValue(0);
4107     SDValue FalseHigh = FalseVal.getValue(1);
4108 
4109     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
4110                               ARMcc, CCR, Cmp);
4111     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
4112                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
4113 
4114     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
4115   } else {
4116     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
4117                        Cmp);
4118   }
4119 }
4120 
4121 static bool isGTorGE(ISD::CondCode CC) {
4122   return CC == ISD::SETGT || CC == ISD::SETGE;
4123 }
4124 
4125 static bool isLTorLE(ISD::CondCode CC) {
4126   return CC == ISD::SETLT || CC == ISD::SETLE;
4127 }
4128 
4129 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
4130 // All of these conditions (and their <= and >= counterparts) will do:
4131 //          x < k ? k : x
4132 //          x > k ? x : k
4133 //          k < x ? x : k
4134 //          k > x ? k : x
4135 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
4136                             const SDValue TrueVal, const SDValue FalseVal,
4137                             const ISD::CondCode CC, const SDValue K) {
4138   return (isGTorGE(CC) &&
4139           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4140          (isLTorLE(CC) &&
4141           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4142 }
4143 
4144 // Similar to isLowerSaturate(), but checks for upper-saturating conditions.
4145 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS,
4146                             const SDValue TrueVal, const SDValue FalseVal,
4147                             const ISD::CondCode CC, const SDValue K) {
4148   return (isGTorGE(CC) &&
4149           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4150          (isLTorLE(CC) &&
4151           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4152 }
4153 
4154 // Check if two chained conditionals could be converted into SSAT.
4155 //
4156 // SSAT can replace a set of two conditional selectors that bound a number to an
4157 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
4158 //
4159 //     x < -k ? -k : (x > k ? k : x)
4160 //     x < -k ? -k : (x < k ? x : k)
4161 //     x > -k ? (x > k ? k : x) : -k
4162 //     x < k ? (x < -k ? -k : x) : k
4163 //     etc.
4164 //
4165 // It returns true if the conversion can be done, false otherwise.
4166 // Additionally, the variable is returned in parameter V and the constant in K.
4167 static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
4168                                     uint64_t &K) {
4169   SDValue LHS1 = Op.getOperand(0);
4170   SDValue RHS1 = Op.getOperand(1);
4171   SDValue TrueVal1 = Op.getOperand(2);
4172   SDValue FalseVal1 = Op.getOperand(3);
4173   ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4174 
4175   const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4176   if (Op2.getOpcode() != ISD::SELECT_CC)
4177     return false;
4178 
4179   SDValue LHS2 = Op2.getOperand(0);
4180   SDValue RHS2 = Op2.getOperand(1);
4181   SDValue TrueVal2 = Op2.getOperand(2);
4182   SDValue FalseVal2 = Op2.getOperand(3);
4183   ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
4184 
4185   // Find out which are the constants and which are the variables
4186   // in each conditional
4187   SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4188                                                         ? &RHS1
4189                                                         : nullptr;
4190   SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4191                                                         ? &RHS2
4192                                                         : nullptr;
4193   SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4194   SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4195   SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4196   SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4197 
4198   // We must detect cases where the original operations worked with 16- or
4199   // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
4200   // must work with sign-extended values but the select operations return
4201   // the original non-extended value.
4202   SDValue V2TmpReg = V2Tmp;
4203   if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG)
4204     V2TmpReg = V2Tmp->getOperand(0);
4205 
4206   // Check that the registers and the constants have the correct values
4207   // in both conditionals
4208   if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4209       V2TmpReg != V2)
4210     return false;
4211 
4212   // Figure out which conditional is saturating the lower/upper bound.
4213   const SDValue *LowerCheckOp =
4214       isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4215           ? &Op
4216           : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
4217                 ? &Op2
4218                 : nullptr;
4219   const SDValue *UpperCheckOp =
4220       isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4221           ? &Op
4222           : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
4223                 ? &Op2
4224                 : nullptr;
4225 
4226   if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4227     return false;
4228 
4229   // Check that the constant in the lower-bound check is
4230   // the opposite of the constant in the upper-bound check
4231   // in 1's complement.
4232   int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4233   int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4234   int64_t PosVal = std::max(Val1, Val2);
4235 
4236   if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4237        (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4238       Val1 == ~Val2 && isPowerOf2_64(PosVal + 1)) {
4239 
4240     V = V2;
4241     K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive
4242     return true;
4243   }
4244 
4245   return false;
4246 }
4247 
4248 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
4249   EVT VT = Op.getValueType();
4250   SDLoc dl(Op);
4251 
4252   // Try to convert two saturating conditional selects into a single SSAT
4253   SDValue SatValue;
4254   uint64_t SatConstant;
4255   if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) &&
4256       isSaturatingConditional(Op, SatValue, SatConstant))
4257     return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
4258                        DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
4259 
4260   SDValue LHS = Op.getOperand(0);
4261   SDValue RHS = Op.getOperand(1);
4262   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4263   SDValue TrueVal = Op.getOperand(2);
4264   SDValue FalseVal = Op.getOperand(3);
4265 
4266   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
4267     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
4268                                                     dl);
4269 
4270     // If softenSetCCOperands only returned one value, we should compare it to
4271     // zero.
4272     if (!RHS.getNode()) {
4273       RHS = DAG.getConstant(0, dl, LHS.getValueType());
4274       CC = ISD::SETNE;
4275     }
4276   }
4277 
4278   if (LHS.getValueType() == MVT::i32) {
4279     // Try to generate VSEL on ARMv8.
4280     // The VSEL instruction can't use all the usual ARM condition
4281     // codes: it only has two bits to select the condition code, so it's
4282     // constrained to use only GE, GT, VS and EQ.
4283     //
4284     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
4285     // swap the operands of the previous compare instruction (effectively
4286     // inverting the compare condition, swapping 'less' and 'greater') and
4287     // sometimes need to swap the operands to the VSEL (which inverts the
4288     // condition in the sense of firing whenever the previous condition didn't)
4289     if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
4290                                     TrueVal.getValueType() == MVT::f64)) {
4291       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4292       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
4293           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
4294         CC = ISD::getSetCCInverse(CC, true);
4295         std::swap(TrueVal, FalseVal);
4296       }
4297     }
4298 
4299     SDValue ARMcc;
4300     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4301     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4302     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4303   }
4304 
4305   ARMCC::CondCodes CondCode, CondCode2;
4306   bool InvalidOnQNaN;
4307   FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4308 
4309   // Try to generate VMAXNM/VMINNM on ARMv8.
4310   if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
4311                                   TrueVal.getValueType() == MVT::f64)) {
4312     bool swpCmpOps = false;
4313     bool swpVselOps = false;
4314     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
4315 
4316     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
4317         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
4318       if (swpCmpOps)
4319         std::swap(LHS, RHS);
4320       if (swpVselOps)
4321         std::swap(TrueVal, FalseVal);
4322     }
4323   }
4324 
4325   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4326   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4327   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4328   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4329   if (CondCode2 != ARMCC::AL) {
4330     SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
4331     // FIXME: Needs another CMP because flag can have but one use.
4332     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4333     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
4334   }
4335   return Result;
4336 }
4337 
4338 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
4339 /// to morph to an integer compare sequence.
4340 static bool canChangeToInt(SDValue Op, bool &SeenZero,
4341                            const ARMSubtarget *Subtarget) {
4342   SDNode *N = Op.getNode();
4343   if (!N->hasOneUse())
4344     // Otherwise it requires moving the value from fp to integer registers.
4345     return false;
4346   if (!N->getNumValues())
4347     return false;
4348   EVT VT = Op.getValueType();
4349   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
4350     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
4351     // vmrs are very slow, e.g. cortex-a8.
4352     return false;
4353 
4354   if (isFloatingPointZero(Op)) {
4355     SeenZero = true;
4356     return true;
4357   }
4358   return ISD::isNormalLoad(N);
4359 }
4360 
4361 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
4362   if (isFloatingPointZero(Op))
4363     return DAG.getConstant(0, SDLoc(Op), MVT::i32);
4364 
4365   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
4366     return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
4367                        Ld->getPointerInfo(), Ld->getAlignment(),
4368                        Ld->getMemOperand()->getFlags());
4369 
4370   llvm_unreachable("Unknown VFP cmp argument!");
4371 }
4372 
4373 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
4374                            SDValue &RetVal1, SDValue &RetVal2) {
4375   SDLoc dl(Op);
4376 
4377   if (isFloatingPointZero(Op)) {
4378     RetVal1 = DAG.getConstant(0, dl, MVT::i32);
4379     RetVal2 = DAG.getConstant(0, dl, MVT::i32);
4380     return;
4381   }
4382 
4383   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
4384     SDValue Ptr = Ld->getBasePtr();
4385     RetVal1 =
4386         DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
4387                     Ld->getAlignment(), Ld->getMemOperand()->getFlags());
4388 
4389     EVT PtrType = Ptr.getValueType();
4390     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
4391     SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
4392                                  PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
4393     RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
4394                           Ld->getPointerInfo().getWithOffset(4), NewAlign,
4395                           Ld->getMemOperand()->getFlags());
4396     return;
4397   }
4398 
4399   llvm_unreachable("Unknown VFP cmp argument!");
4400 }
4401 
4402 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
4403 /// f32 and even f64 comparisons to integer ones.
4404 SDValue
4405 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
4406   SDValue Chain = Op.getOperand(0);
4407   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4408   SDValue LHS = Op.getOperand(2);
4409   SDValue RHS = Op.getOperand(3);
4410   SDValue Dest = Op.getOperand(4);
4411   SDLoc dl(Op);
4412 
4413   bool LHSSeenZero = false;
4414   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
4415   bool RHSSeenZero = false;
4416   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
4417   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
4418     // If unsafe fp math optimization is enabled and there are no other uses of
4419     // the CMP operands, and the condition code is EQ or NE, we can optimize it
4420     // to an integer comparison.
4421     if (CC == ISD::SETOEQ)
4422       CC = ISD::SETEQ;
4423     else if (CC == ISD::SETUNE)
4424       CC = ISD::SETNE;
4425 
4426     SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
4427     SDValue ARMcc;
4428     if (LHS.getValueType() == MVT::f32) {
4429       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4430                         bitcastf32Toi32(LHS, DAG), Mask);
4431       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4432                         bitcastf32Toi32(RHS, DAG), Mask);
4433       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4434       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4435       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4436                          Chain, Dest, ARMcc, CCR, Cmp);
4437     }
4438 
4439     SDValue LHS1, LHS2;
4440     SDValue RHS1, RHS2;
4441     expandf64Toi32(LHS, DAG, LHS1, LHS2);
4442     expandf64Toi32(RHS, DAG, RHS1, RHS2);
4443     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
4444     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
4445     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4446     ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4447     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4448     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
4449     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
4450   }
4451 
4452   return SDValue();
4453 }
4454 
4455 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
4456   SDValue Chain = Op.getOperand(0);
4457   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4458   SDValue LHS = Op.getOperand(2);
4459   SDValue RHS = Op.getOperand(3);
4460   SDValue Dest = Op.getOperand(4);
4461   SDLoc dl(Op);
4462 
4463   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
4464     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
4465                                                     dl);
4466 
4467     // If softenSetCCOperands only returned one value, we should compare it to
4468     // zero.
4469     if (!RHS.getNode()) {
4470       RHS = DAG.getConstant(0, dl, LHS.getValueType());
4471       CC = ISD::SETNE;
4472     }
4473   }
4474 
4475   if (LHS.getValueType() == MVT::i32) {
4476     SDValue ARMcc;
4477     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4478     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4479     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4480                        Chain, Dest, ARMcc, CCR, Cmp);
4481   }
4482 
4483   assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
4484 
4485   if (getTargetMachine().Options.UnsafeFPMath &&
4486       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
4487        CC == ISD::SETNE || CC == ISD::SETUNE)) {
4488     if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
4489       return Result;
4490   }
4491 
4492   ARMCC::CondCodes CondCode, CondCode2;
4493   bool InvalidOnQNaN;
4494   FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4495 
4496   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4497   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4498   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4499   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4500   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
4501   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4502   if (CondCode2 != ARMCC::AL) {
4503     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
4504     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
4505     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4506   }
4507   return Res;
4508 }
4509 
4510 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
4511   SDValue Chain = Op.getOperand(0);
4512   SDValue Table = Op.getOperand(1);
4513   SDValue Index = Op.getOperand(2);
4514   SDLoc dl(Op);
4515 
4516   EVT PTy = getPointerTy(DAG.getDataLayout());
4517   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
4518   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
4519   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
4520   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
4521   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
4522   if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
4523     // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
4524     // which does another jump to the destination. This also makes it easier
4525     // to translate it to TBB / TBH later (Thumb2 only).
4526     // FIXME: This might not work if the function is extremely large.
4527     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
4528                        Addr, Op.getOperand(2), JTI);
4529   }
4530   if (isPositionIndependent() || Subtarget->isROPI()) {
4531     Addr =
4532         DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
4533                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4534     Chain = Addr.getValue(1);
4535     Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
4536     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4537   } else {
4538     Addr =
4539         DAG.getLoad(PTy, dl, Chain, Addr,
4540                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4541     Chain = Addr.getValue(1);
4542     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4543   }
4544 }
4545 
4546 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
4547   EVT VT = Op.getValueType();
4548   SDLoc dl(Op);
4549 
4550   if (Op.getValueType().getVectorElementType() == MVT::i32) {
4551     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
4552       return Op;
4553     return DAG.UnrollVectorOp(Op.getNode());
4554   }
4555 
4556   assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
4557          "Invalid type for custom lowering!");
4558   if (VT != MVT::v4i16)
4559     return DAG.UnrollVectorOp(Op.getNode());
4560 
4561   Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
4562   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
4563 }
4564 
4565 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
4566   EVT VT = Op.getValueType();
4567   if (VT.isVector())
4568     return LowerVectorFP_TO_INT(Op, DAG);
4569   if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) {
4570     RTLIB::Libcall LC;
4571     if (Op.getOpcode() == ISD::FP_TO_SINT)
4572       LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
4573                               Op.getValueType());
4574     else
4575       LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
4576                               Op.getValueType());
4577     return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
4578                        /*isSigned*/ false, SDLoc(Op)).first;
4579   }
4580 
4581   return Op;
4582 }
4583 
4584 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
4585   EVT VT = Op.getValueType();
4586   SDLoc dl(Op);
4587 
4588   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
4589     if (VT.getVectorElementType() == MVT::f32)
4590       return Op;
4591     return DAG.UnrollVectorOp(Op.getNode());
4592   }
4593 
4594   assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
4595          "Invalid type for custom lowering!");
4596   if (VT != MVT::v4f32)
4597     return DAG.UnrollVectorOp(Op.getNode());
4598 
4599   unsigned CastOpc;
4600   unsigned Opc;
4601   switch (Op.getOpcode()) {
4602   default: llvm_unreachable("Invalid opcode!");
4603   case ISD::SINT_TO_FP:
4604     CastOpc = ISD::SIGN_EXTEND;
4605     Opc = ISD::SINT_TO_FP;
4606     break;
4607   case ISD::UINT_TO_FP:
4608     CastOpc = ISD::ZERO_EXTEND;
4609     Opc = ISD::UINT_TO_FP;
4610     break;
4611   }
4612 
4613   Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
4614   return DAG.getNode(Opc, dl, VT, Op);
4615 }
4616 
4617 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
4618   EVT VT = Op.getValueType();
4619   if (VT.isVector())
4620     return LowerVectorINT_TO_FP(Op, DAG);
4621   if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) {
4622     RTLIB::Libcall LC;
4623     if (Op.getOpcode() == ISD::SINT_TO_FP)
4624       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
4625                               Op.getValueType());
4626     else
4627       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
4628                               Op.getValueType());
4629     return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
4630                        /*isSigned*/ false, SDLoc(Op)).first;
4631   }
4632 
4633   return Op;
4634 }
4635 
4636 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
4637   // Implement fcopysign with a fabs and a conditional fneg.
4638   SDValue Tmp0 = Op.getOperand(0);
4639   SDValue Tmp1 = Op.getOperand(1);
4640   SDLoc dl(Op);
4641   EVT VT = Op.getValueType();
4642   EVT SrcVT = Tmp1.getValueType();
4643   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
4644     Tmp0.getOpcode() == ARMISD::VMOVDRR;
4645   bool UseNEON = !InGPR && Subtarget->hasNEON();
4646 
4647   if (UseNEON) {
4648     // Use VBSL to copy the sign bit.
4649     unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
4650     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
4651                                DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
4652     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
4653     if (VT == MVT::f64)
4654       Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
4655                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
4656                          DAG.getConstant(32, dl, MVT::i32));
4657     else /*if (VT == MVT::f32)*/
4658       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
4659     if (SrcVT == MVT::f32) {
4660       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
4661       if (VT == MVT::f64)
4662         Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
4663                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
4664                            DAG.getConstant(32, dl, MVT::i32));
4665     } else if (VT == MVT::f32)
4666       Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
4667                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
4668                          DAG.getConstant(32, dl, MVT::i32));
4669     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
4670     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
4671 
4672     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
4673                                             dl, MVT::i32);
4674     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
4675     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
4676                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
4677 
4678     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
4679                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
4680                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
4681     if (VT == MVT::f32) {
4682       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
4683       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
4684                         DAG.getConstant(0, dl, MVT::i32));
4685     } else {
4686       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
4687     }
4688 
4689     return Res;
4690   }
4691 
4692   // Bitcast operand 1 to i32.
4693   if (SrcVT == MVT::f64)
4694     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
4695                        Tmp1).getValue(1);
4696   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
4697 
4698   // Or in the signbit with integer operations.
4699   SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
4700   SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
4701   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
4702   if (VT == MVT::f32) {
4703     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
4704                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
4705     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4706                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
4707   }
4708 
4709   // f64: Or the high part with signbit and then combine two parts.
4710   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
4711                      Tmp0);
4712   SDValue Lo = Tmp0.getValue(0);
4713   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
4714   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
4715   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
4716 }
4717 
4718 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
4719   MachineFunction &MF = DAG.getMachineFunction();
4720   MachineFrameInfo &MFI = MF.getFrameInfo();
4721   MFI.setReturnAddressIsTaken(true);
4722 
4723   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
4724     return SDValue();
4725 
4726   EVT VT = Op.getValueType();
4727   SDLoc dl(Op);
4728   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4729   if (Depth) {
4730     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
4731     SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
4732     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
4733                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
4734                        MachinePointerInfo());
4735   }
4736 
4737   // Return LR, which contains the return address. Mark it an implicit live-in.
4738   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
4739   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
4740 }
4741 
4742 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
4743   const ARMBaseRegisterInfo &ARI =
4744     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
4745   MachineFunction &MF = DAG.getMachineFunction();
4746   MachineFrameInfo &MFI = MF.getFrameInfo();
4747   MFI.setFrameAddressIsTaken(true);
4748 
4749   EVT VT = Op.getValueType();
4750   SDLoc dl(Op);  // FIXME probably not meaningful
4751   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4752   unsigned FrameReg = ARI.getFrameRegister(MF);
4753   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
4754   while (Depth--)
4755     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
4756                             MachinePointerInfo());
4757   return FrameAddr;
4758 }
4759 
4760 // FIXME? Maybe this could be a TableGen attribute on some registers and
4761 // this table could be generated automatically from RegInfo.
4762 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT,
4763                                               SelectionDAG &DAG) const {
4764   unsigned Reg = StringSwitch<unsigned>(RegName)
4765                        .Case("sp", ARM::SP)
4766                        .Default(0);
4767   if (Reg)
4768     return Reg;
4769   report_fatal_error(Twine("Invalid register name \""
4770                               + StringRef(RegName)  + "\"."));
4771 }
4772 
4773 // Result is 64 bit value so split into two 32 bit values and return as a
4774 // pair of values.
4775 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
4776                                 SelectionDAG &DAG) {
4777   SDLoc DL(N);
4778 
4779   // This function is only supposed to be called for i64 type destination.
4780   assert(N->getValueType(0) == MVT::i64
4781           && "ExpandREAD_REGISTER called for non-i64 type result.");
4782 
4783   SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
4784                              DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
4785                              N->getOperand(0),
4786                              N->getOperand(1));
4787 
4788   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
4789                     Read.getValue(1)));
4790   Results.push_back(Read.getOperand(0));
4791 }
4792 
4793 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
4794 /// When \p DstVT, the destination type of \p BC, is on the vector
4795 /// register bank and the source of bitcast, \p Op, operates on the same bank,
4796 /// it might be possible to combine them, such that everything stays on the
4797 /// vector register bank.
4798 /// \p return The node that would replace \p BT, if the combine
4799 /// is possible.
4800 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
4801                                                 SelectionDAG &DAG) {
4802   SDValue Op = BC->getOperand(0);
4803   EVT DstVT = BC->getValueType(0);
4804 
4805   // The only vector instruction that can produce a scalar (remember,
4806   // since the bitcast was about to be turned into VMOVDRR, the source
4807   // type is i64) from a vector is EXTRACT_VECTOR_ELT.
4808   // Moreover, we can do this combine only if there is one use.
4809   // Finally, if the destination type is not a vector, there is not
4810   // much point on forcing everything on the vector bank.
4811   if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4812       !Op.hasOneUse())
4813     return SDValue();
4814 
4815   // If the index is not constant, we will introduce an additional
4816   // multiply that will stick.
4817   // Give up in that case.
4818   ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
4819   if (!Index)
4820     return SDValue();
4821   unsigned DstNumElt = DstVT.getVectorNumElements();
4822 
4823   // Compute the new index.
4824   const APInt &APIntIndex = Index->getAPIntValue();
4825   APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
4826   NewIndex *= APIntIndex;
4827   // Check if the new constant index fits into i32.
4828   if (NewIndex.getBitWidth() > 32)
4829     return SDValue();
4830 
4831   // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
4832   // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
4833   SDLoc dl(Op);
4834   SDValue ExtractSrc = Op.getOperand(0);
4835   EVT VecVT = EVT::getVectorVT(
4836       *DAG.getContext(), DstVT.getScalarType(),
4837       ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
4838   SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
4839   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
4840                      DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
4841 }
4842 
4843 /// ExpandBITCAST - If the target supports VFP, this function is called to
4844 /// expand a bit convert where either the source or destination type is i64 to
4845 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
4846 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
4847 /// vectors), since the legalizer won't know what to do with that.
4848 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
4849   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4850   SDLoc dl(N);
4851   SDValue Op = N->getOperand(0);
4852 
4853   // This function is only supposed to be called for i64 types, either as the
4854   // source or destination of the bit convert.
4855   EVT SrcVT = Op.getValueType();
4856   EVT DstVT = N->getValueType(0);
4857   assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
4858          "ExpandBITCAST called for non-i64 type");
4859 
4860   // Turn i64->f64 into VMOVDRR.
4861   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
4862     // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
4863     // if we can combine the bitcast with its source.
4864     if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
4865       return Val;
4866 
4867     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
4868                              DAG.getConstant(0, dl, MVT::i32));
4869     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
4870                              DAG.getConstant(1, dl, MVT::i32));
4871     return DAG.getNode(ISD::BITCAST, dl, DstVT,
4872                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
4873   }
4874 
4875   // Turn f64->i64 into VMOVRRD.
4876   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
4877     SDValue Cvt;
4878     if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
4879         SrcVT.getVectorNumElements() > 1)
4880       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
4881                         DAG.getVTList(MVT::i32, MVT::i32),
4882                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
4883     else
4884       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
4885                         DAG.getVTList(MVT::i32, MVT::i32), Op);
4886     // Merge the pieces into a single i64 value.
4887     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
4888   }
4889 
4890   return SDValue();
4891 }
4892 
4893 /// getZeroVector - Returns a vector of specified type with all zero elements.
4894 /// Zero vectors are used to represent vector negation and in those cases
4895 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
4896 /// not support i64 elements, so sometimes the zero vectors will need to be
4897 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
4898 /// zero vector.
4899 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
4900   assert(VT.isVector() && "Expected a vector type");
4901   // The canonical modified immediate encoding of a zero vector is....0!
4902   SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
4903   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
4904   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
4905   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
4906 }
4907 
4908 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
4909 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
4910 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
4911                                                 SelectionDAG &DAG) const {
4912   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
4913   EVT VT = Op.getValueType();
4914   unsigned VTBits = VT.getSizeInBits();
4915   SDLoc dl(Op);
4916   SDValue ShOpLo = Op.getOperand(0);
4917   SDValue ShOpHi = Op.getOperand(1);
4918   SDValue ShAmt  = Op.getOperand(2);
4919   SDValue ARMcc;
4920   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4921   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
4922 
4923   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
4924 
4925   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
4926                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
4927   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
4928   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
4929                                    DAG.getConstant(VTBits, dl, MVT::i32));
4930   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
4931   SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4932   SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
4933   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4934                             ISD::SETGE, ARMcc, DAG, dl);
4935   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
4936                            ARMcc, CCR, CmpLo);
4937 
4938 
4939   SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
4940   SDValue HiBigShift = Opc == ISD::SRA
4941                            ? DAG.getNode(Opc, dl, VT, ShOpHi,
4942                                          DAG.getConstant(VTBits - 1, dl, VT))
4943                            : DAG.getConstant(0, dl, VT);
4944   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4945                             ISD::SETGE, ARMcc, DAG, dl);
4946   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
4947                            ARMcc, CCR, CmpHi);
4948 
4949   SDValue Ops[2] = { Lo, Hi };
4950   return DAG.getMergeValues(Ops, dl);
4951 }
4952 
4953 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
4954 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
4955 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
4956                                                SelectionDAG &DAG) const {
4957   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
4958   EVT VT = Op.getValueType();
4959   unsigned VTBits = VT.getSizeInBits();
4960   SDLoc dl(Op);
4961   SDValue ShOpLo = Op.getOperand(0);
4962   SDValue ShOpHi = Op.getOperand(1);
4963   SDValue ShAmt  = Op.getOperand(2);
4964   SDValue ARMcc;
4965   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4966 
4967   assert(Op.getOpcode() == ISD::SHL_PARTS);
4968   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
4969                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
4970   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
4971   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
4972   SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4973 
4974   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
4975                                    DAG.getConstant(VTBits, dl, MVT::i32));
4976   SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
4977   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4978                             ISD::SETGE, ARMcc, DAG, dl);
4979   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
4980                            ARMcc, CCR, CmpHi);
4981 
4982   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4983                           ISD::SETGE, ARMcc, DAG, dl);
4984   SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
4985   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
4986                            DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
4987 
4988   SDValue Ops[2] = { Lo, Hi };
4989   return DAG.getMergeValues(Ops, dl);
4990 }
4991 
4992 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
4993                                             SelectionDAG &DAG) const {
4994   // The rounding mode is in bits 23:22 of the FPSCR.
4995   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
4996   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
4997   // so that the shift + and get folded into a bitfield extract.
4998   SDLoc dl(Op);
4999   SDValue Ops[] = { DAG.getEntryNode(),
5000                     DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32) };
5001 
5002   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, MVT::i32, Ops);
5003   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
5004                                   DAG.getConstant(1U << 22, dl, MVT::i32));
5005   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
5006                               DAG.getConstant(22, dl, MVT::i32));
5007   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
5008                      DAG.getConstant(3, dl, MVT::i32));
5009 }
5010 
5011 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
5012                          const ARMSubtarget *ST) {
5013   SDLoc dl(N);
5014   EVT VT = N->getValueType(0);
5015   if (VT.isVector()) {
5016     assert(ST->hasNEON());
5017 
5018     // Compute the least significant set bit: LSB = X & -X
5019     SDValue X = N->getOperand(0);
5020     SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
5021     SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);
5022 
5023     EVT ElemTy = VT.getVectorElementType();
5024 
5025     if (ElemTy == MVT::i8) {
5026       // Compute with: cttz(x) = ctpop(lsb - 1)
5027       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5028                                 DAG.getTargetConstant(1, dl, ElemTy));
5029       SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5030       return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
5031     }
5032 
5033     if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
5034         (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
5035       // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
5036       unsigned NumBits = ElemTy.getSizeInBits();
5037       SDValue WidthMinus1 =
5038           DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5039                       DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
5040       SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
5041       return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
5042     }
5043 
5044     // Compute with: cttz(x) = ctpop(lsb - 1)
5045 
5046     // Since we can only compute the number of bits in a byte with vcnt.8, we
5047     // have to gather the result with pairwise addition (vpaddl) for i16, i32,
5048     // and i64.
5049 
5050     // Compute LSB - 1.
5051     SDValue Bits;
5052     if (ElemTy == MVT::i64) {
5053       // Load constant 0xffff'ffff'ffff'ffff to register.
5054       SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5055                                DAG.getTargetConstant(0x1eff, dl, MVT::i32));
5056       Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
5057     } else {
5058       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5059                                 DAG.getTargetConstant(1, dl, ElemTy));
5060       Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5061     }
5062 
5063     // Count #bits with vcnt.8.
5064     EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
5065     SDValue BitsVT8 = DAG.getNode(ISD::BITCAST, dl, VT8Bit, Bits);
5066     SDValue Cnt8 = DAG.getNode(ISD::CTPOP, dl, VT8Bit, BitsVT8);
5067 
5068     // Gather the #bits with vpaddl (pairwise add.)
5069     EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
5070     SDValue Cnt16 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT16Bit,
5071         DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5072         Cnt8);
5073     if (ElemTy == MVT::i16)
5074       return Cnt16;
5075 
5076     EVT VT32Bit = VT.is64BitVector() ? MVT::v2i32 : MVT::v4i32;
5077     SDValue Cnt32 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT32Bit,
5078         DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5079         Cnt16);
5080     if (ElemTy == MVT::i32)
5081       return Cnt32;
5082 
5083     assert(ElemTy == MVT::i64);
5084     SDValue Cnt64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5085         DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5086         Cnt32);
5087     return Cnt64;
5088   }
5089 
5090   if (!ST->hasV6T2Ops())
5091     return SDValue();
5092 
5093   SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
5094   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
5095 }
5096 
5097 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count
5098 /// for each 16-bit element from operand, repeated.  The basic idea is to
5099 /// leverage vcnt to get the 8-bit counts, gather and add the results.
5100 ///
5101 /// Trace for v4i16:
5102 /// input    = [v0    v1    v2    v3   ] (vi 16-bit element)
5103 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element)
5104 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi)
5105 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6]
5106 ///            [b0 b1 b2 b3 b4 b5 b6 b7]
5107 ///           +[b1 b0 b3 b2 b5 b4 b7 b6]
5108 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0,
5109 /// vuzp:    = [k0 k1 k2 k3 k0 k1 k2 k3]  each ki is 8-bits)
5110 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
5111   EVT VT = N->getValueType(0);
5112   SDLoc DL(N);
5113 
5114   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
5115   SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0));
5116   SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0);
5117   SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1);
5118   SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2);
5119   return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3);
5120 }
5121 
5122 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the
5123 /// bit-count for each 16-bit element from the operand.  We need slightly
5124 /// different sequencing for v4i16 and v8i16 to stay within NEON's available
5125 /// 64/128-bit registers.
5126 ///
5127 /// Trace for v4i16:
5128 /// input           = [v0    v1    v2    v3    ] (vi 16-bit element)
5129 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi)
5130 /// v8i16:Extended  = [k0    k1    k2    k3    k0    k1    k2    k3    ]
5131 /// v4i16:Extracted = [k0    k1    k2    k3    ]
5132 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
5133   EVT VT = N->getValueType(0);
5134   SDLoc DL(N);
5135 
5136   SDValue BitCounts = getCTPOP16BitCounts(N, DAG);
5137   if (VT.is64BitVector()) {
5138     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts);
5139     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended,
5140                        DAG.getIntPtrConstant(0, DL));
5141   } else {
5142     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8,
5143                                     BitCounts, DAG.getIntPtrConstant(0, DL));
5144     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted);
5145   }
5146 }
5147 
5148 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the
5149 /// bit-count for each 32-bit element from the operand.  The idea here is
5150 /// to split the vector into 16-bit elements, leverage the 16-bit count
5151 /// routine, and then combine the results.
5152 ///
5153 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged):
5154 /// input    = [v0    v1    ] (vi: 32-bit elements)
5155 /// Bitcast  = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1])
5156 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi)
5157 /// vrev: N0 = [k1 k0 k3 k2 ]
5158 ///            [k0 k1 k2 k3 ]
5159 ///       N1 =+[k1 k0 k3 k2 ]
5160 ///            [k0 k2 k1 k3 ]
5161 ///       N2 =+[k1 k3 k0 k2 ]
5162 ///            [k0    k2    k1    k3    ]
5163 /// Extended =+[k1    k3    k0    k2    ]
5164 ///            [k0    k2    ]
5165 /// Extracted=+[k1    k3    ]
5166 ///
5167 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
5168   EVT VT = N->getValueType(0);
5169   SDLoc DL(N);
5170 
5171   EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
5172 
5173   SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0));
5174   SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG);
5175   SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16);
5176   SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0);
5177   SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1);
5178 
5179   if (VT.is64BitVector()) {
5180     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2);
5181     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended,
5182                        DAG.getIntPtrConstant(0, DL));
5183   } else {
5184     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2,
5185                                     DAG.getIntPtrConstant(0, DL));
5186     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted);
5187   }
5188 }
5189 
5190 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
5191                           const ARMSubtarget *ST) {
5192   EVT VT = N->getValueType(0);
5193 
5194   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
5195   assert((VT == MVT::v2i32 || VT == MVT::v4i32 ||
5196           VT == MVT::v4i16 || VT == MVT::v8i16) &&
5197          "Unexpected type for custom ctpop lowering");
5198 
5199   if (VT.getVectorElementType() == MVT::i32)
5200     return lowerCTPOP32BitElements(N, DAG);
5201   else
5202     return lowerCTPOP16BitElements(N, DAG);
5203 }
5204 
5205 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
5206                           const ARMSubtarget *ST) {
5207   EVT VT = N->getValueType(0);
5208   SDLoc dl(N);
5209 
5210   if (!VT.isVector())
5211     return SDValue();
5212 
5213   // Lower vector shifts on NEON to use VSHL.
5214   assert(ST->hasNEON() && "unexpected vector shift");
5215 
5216   // Left shifts translate directly to the vshiftu intrinsic.
5217   if (N->getOpcode() == ISD::SHL)
5218     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5219                        DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl,
5220                                        MVT::i32),
5221                        N->getOperand(0), N->getOperand(1));
5222 
5223   assert((N->getOpcode() == ISD::SRA ||
5224           N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
5225 
5226   // NEON uses the same intrinsics for both left and right shifts.  For
5227   // right shifts, the shift amounts are negative, so negate the vector of
5228   // shift amounts.
5229   EVT ShiftVT = N->getOperand(1).getValueType();
5230   SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
5231                                      getZeroVector(ShiftVT, DAG, dl),
5232                                      N->getOperand(1));
5233   Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
5234                              Intrinsic::arm_neon_vshifts :
5235                              Intrinsic::arm_neon_vshiftu);
5236   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5237                      DAG.getConstant(vshiftInt, dl, MVT::i32),
5238                      N->getOperand(0), NegatedCount);
5239 }
5240 
5241 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
5242                                 const ARMSubtarget *ST) {
5243   EVT VT = N->getValueType(0);
5244   SDLoc dl(N);
5245 
5246   // We can get here for a node like i32 = ISD::SHL i32, i64
5247   if (VT != MVT::i64)
5248     return SDValue();
5249 
5250   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
5251          "Unknown shift to lower!");
5252 
5253   // We only lower SRA, SRL of 1 here, all others use generic lowering.
5254   if (!isOneConstant(N->getOperand(1)))
5255     return SDValue();
5256 
5257   // If we are in thumb mode, we don't have RRX.
5258   if (ST->isThumb1Only()) return SDValue();
5259 
5260   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
5261   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5262                            DAG.getConstant(0, dl, MVT::i32));
5263   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5264                            DAG.getConstant(1, dl, MVT::i32));
5265 
5266   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
5267   // captures the result into a carry flag.
5268   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
5269   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
5270 
5271   // The low part is an ARMISD::RRX operand, which shifts the carry in.
5272   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
5273 
5274   // Merge the pieces into a single i64 value.
5275  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5276 }
5277 
5278 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
5279   SDValue TmpOp0, TmpOp1;
5280   bool Invert = false;
5281   bool Swap = false;
5282   unsigned Opc = 0;
5283 
5284   SDValue Op0 = Op.getOperand(0);
5285   SDValue Op1 = Op.getOperand(1);
5286   SDValue CC = Op.getOperand(2);
5287   EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
5288   EVT VT = Op.getValueType();
5289   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
5290   SDLoc dl(Op);
5291 
5292   if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
5293       (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
5294     // Special-case integer 64-bit equality comparisons. They aren't legal,
5295     // but they can be lowered with a few vector instructions.
5296     unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
5297     EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
5298     SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
5299     SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
5300     SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
5301                               DAG.getCondCode(ISD::SETEQ));
5302     SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
5303     SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
5304     Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
5305     if (SetCCOpcode == ISD::SETNE)
5306       Merged = DAG.getNOT(dl, Merged, CmpVT);
5307     Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
5308     return Merged;
5309   }
5310 
5311   if (CmpVT.getVectorElementType() == MVT::i64)
5312     // 64-bit comparisons are not legal in general.
5313     return SDValue();
5314 
5315   if (Op1.getValueType().isFloatingPoint()) {
5316     switch (SetCCOpcode) {
5317     default: llvm_unreachable("Illegal FP comparison");
5318     case ISD::SETUNE:
5319     case ISD::SETNE:  Invert = true; LLVM_FALLTHROUGH;
5320     case ISD::SETOEQ:
5321     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
5322     case ISD::SETOLT:
5323     case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
5324     case ISD::SETOGT:
5325     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
5326     case ISD::SETOLE:
5327     case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
5328     case ISD::SETOGE:
5329     case ISD::SETGE: Opc = ARMISD::VCGE; break;
5330     case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
5331     case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
5332     case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
5333     case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
5334     case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
5335     case ISD::SETONE:
5336       // Expand this to (OLT | OGT).
5337       TmpOp0 = Op0;
5338       TmpOp1 = Op1;
5339       Opc = ISD::OR;
5340       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5341       Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
5342       break;
5343     case ISD::SETUO:
5344       Invert = true;
5345       LLVM_FALLTHROUGH;
5346     case ISD::SETO:
5347       // Expand this to (OLT | OGE).
5348       TmpOp0 = Op0;
5349       TmpOp1 = Op1;
5350       Opc = ISD::OR;
5351       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5352       Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1);
5353       break;
5354     }
5355   } else {
5356     // Integer comparisons.
5357     switch (SetCCOpcode) {
5358     default: llvm_unreachable("Illegal integer comparison");
5359     case ISD::SETNE:  Invert = true;
5360     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
5361     case ISD::SETLT:  Swap = true;
5362     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
5363     case ISD::SETLE:  Swap = true;
5364     case ISD::SETGE:  Opc = ARMISD::VCGE; break;
5365     case ISD::SETULT: Swap = true;
5366     case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
5367     case ISD::SETULE: Swap = true;
5368     case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
5369     }
5370 
5371     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
5372     if (Opc == ARMISD::VCEQ) {
5373 
5374       SDValue AndOp;
5375       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5376         AndOp = Op0;
5377       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
5378         AndOp = Op1;
5379 
5380       // Ignore bitconvert.
5381       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
5382         AndOp = AndOp.getOperand(0);
5383 
5384       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
5385         Opc = ARMISD::VTST;
5386         Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
5387         Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
5388         Invert = !Invert;
5389       }
5390     }
5391   }
5392 
5393   if (Swap)
5394     std::swap(Op0, Op1);
5395 
5396   // If one of the operands is a constant vector zero, attempt to fold the
5397   // comparison to a specialized compare-against-zero form.
5398   SDValue SingleOp;
5399   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5400     SingleOp = Op0;
5401   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
5402     if (Opc == ARMISD::VCGE)
5403       Opc = ARMISD::VCLEZ;
5404     else if (Opc == ARMISD::VCGT)
5405       Opc = ARMISD::VCLTZ;
5406     SingleOp = Op1;
5407   }
5408 
5409   SDValue Result;
5410   if (SingleOp.getNode()) {
5411     switch (Opc) {
5412     case ARMISD::VCEQ:
5413       Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break;
5414     case ARMISD::VCGE:
5415       Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break;
5416     case ARMISD::VCLEZ:
5417       Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break;
5418     case ARMISD::VCGT:
5419       Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break;
5420     case ARMISD::VCLTZ:
5421       Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break;
5422     default:
5423       Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5424     }
5425   } else {
5426      Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5427   }
5428 
5429   Result = DAG.getSExtOrTrunc(Result, dl, VT);
5430 
5431   if (Invert)
5432     Result = DAG.getNOT(dl, Result, VT);
5433 
5434   return Result;
5435 }
5436 
5437 static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) {
5438   SDValue LHS = Op.getOperand(0);
5439   SDValue RHS = Op.getOperand(1);
5440   SDValue Carry = Op.getOperand(2);
5441   SDValue Cond = Op.getOperand(3);
5442   SDLoc DL(Op);
5443 
5444   assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only.");
5445 
5446   assert(Carry.getOpcode() != ISD::CARRY_FALSE);
5447   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
5448   SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
5449 
5450   SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
5451   SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
5452   SDValue ARMcc = DAG.getConstant(
5453       IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
5454   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5455   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
5456                                    Cmp.getValue(1), SDValue());
5457   return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
5458                      CCR, Chain.getValue(1));
5459 }
5460 
5461 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
5462 /// valid vector constant for a NEON instruction with a "modified immediate"
5463 /// operand (e.g., VMOV).  If so, return the encoded value.
5464 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
5465                                  unsigned SplatBitSize, SelectionDAG &DAG,
5466                                  const SDLoc &dl, EVT &VT, bool is128Bits,
5467                                  NEONModImmType type) {
5468   unsigned OpCmode, Imm;
5469 
5470   // SplatBitSize is set to the smallest size that splats the vector, so a
5471   // zero vector will always have SplatBitSize == 8.  However, NEON modified
5472   // immediate instructions others than VMOV do not support the 8-bit encoding
5473   // of a zero vector, and the default encoding of zero is supposed to be the
5474   // 32-bit version.
5475   if (SplatBits == 0)
5476     SplatBitSize = 32;
5477 
5478   switch (SplatBitSize) {
5479   case 8:
5480     if (type != VMOVModImm)
5481       return SDValue();
5482     // Any 1-byte value is OK.  Op=0, Cmode=1110.
5483     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
5484     OpCmode = 0xe;
5485     Imm = SplatBits;
5486     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
5487     break;
5488 
5489   case 16:
5490     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
5491     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
5492     if ((SplatBits & ~0xff) == 0) {
5493       // Value = 0x00nn: Op=x, Cmode=100x.
5494       OpCmode = 0x8;
5495       Imm = SplatBits;
5496       break;
5497     }
5498     if ((SplatBits & ~0xff00) == 0) {
5499       // Value = 0xnn00: Op=x, Cmode=101x.
5500       OpCmode = 0xa;
5501       Imm = SplatBits >> 8;
5502       break;
5503     }
5504     return SDValue();
5505 
5506   case 32:
5507     // NEON's 32-bit VMOV supports splat values where:
5508     // * only one byte is nonzero, or
5509     // * the least significant byte is 0xff and the second byte is nonzero, or
5510     // * the least significant 2 bytes are 0xff and the third is nonzero.
5511     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
5512     if ((SplatBits & ~0xff) == 0) {
5513       // Value = 0x000000nn: Op=x, Cmode=000x.
5514       OpCmode = 0;
5515       Imm = SplatBits;
5516       break;
5517     }
5518     if ((SplatBits & ~0xff00) == 0) {
5519       // Value = 0x0000nn00: Op=x, Cmode=001x.
5520       OpCmode = 0x2;
5521       Imm = SplatBits >> 8;
5522       break;
5523     }
5524     if ((SplatBits & ~0xff0000) == 0) {
5525       // Value = 0x00nn0000: Op=x, Cmode=010x.
5526       OpCmode = 0x4;
5527       Imm = SplatBits >> 16;
5528       break;
5529     }
5530     if ((SplatBits & ~0xff000000) == 0) {
5531       // Value = 0xnn000000: Op=x, Cmode=011x.
5532       OpCmode = 0x6;
5533       Imm = SplatBits >> 24;
5534       break;
5535     }
5536 
5537     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
5538     if (type == OtherModImm) return SDValue();
5539 
5540     if ((SplatBits & ~0xffff) == 0 &&
5541         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
5542       // Value = 0x0000nnff: Op=x, Cmode=1100.
5543       OpCmode = 0xc;
5544       Imm = SplatBits >> 8;
5545       break;
5546     }
5547 
5548     if ((SplatBits & ~0xffffff) == 0 &&
5549         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
5550       // Value = 0x00nnffff: Op=x, Cmode=1101.
5551       OpCmode = 0xd;
5552       Imm = SplatBits >> 16;
5553       break;
5554     }
5555 
5556     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
5557     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
5558     // VMOV.I32.  A (very) minor optimization would be to replicate the value
5559     // and fall through here to test for a valid 64-bit splat.  But, then the
5560     // caller would also need to check and handle the change in size.
5561     return SDValue();
5562 
5563   case 64: {
5564     if (type != VMOVModImm)
5565       return SDValue();
5566     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
5567     uint64_t BitMask = 0xff;
5568     uint64_t Val = 0;
5569     unsigned ImmMask = 1;
5570     Imm = 0;
5571     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
5572       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
5573         Val |= BitMask;
5574         Imm |= ImmMask;
5575       } else if ((SplatBits & BitMask) != 0) {
5576         return SDValue();
5577       }
5578       BitMask <<= 8;
5579       ImmMask <<= 1;
5580     }
5581 
5582     if (DAG.getDataLayout().isBigEndian())
5583       // swap higher and lower 32 bit word
5584       Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
5585 
5586     // Op=1, Cmode=1110.
5587     OpCmode = 0x1e;
5588     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
5589     break;
5590   }
5591 
5592   default:
5593     llvm_unreachable("unexpected size for isNEONModifiedImm");
5594   }
5595 
5596   unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
5597   return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
5598 }
5599 
5600 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
5601                                            const ARMSubtarget *ST) const {
5602   bool IsDouble = Op.getValueType() == MVT::f64;
5603   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
5604   const APFloat &FPVal = CFP->getValueAPF();
5605 
5606   // Prevent floating-point constants from using literal loads
5607   // when execute-only is enabled.
5608   if (ST->genExecuteOnly()) {
5609     APInt INTVal = FPVal.bitcastToAPInt();
5610     SDLoc DL(CFP);
5611     if (IsDouble) {
5612       SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
5613       SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
5614       if (!ST->isLittle())
5615         std::swap(Lo, Hi);
5616       return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
5617     } else {
5618       return DAG.getConstant(INTVal, DL, MVT::i32);
5619     }
5620   }
5621 
5622   if (!ST->hasVFP3())
5623     return SDValue();
5624 
5625   // Use the default (constant pool) lowering for double constants when we have
5626   // an SP-only FPU
5627   if (IsDouble && Subtarget->isFPOnlySP())
5628     return SDValue();
5629 
5630   // Try splatting with a VMOV.f32...
5631   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
5632 
5633   if (ImmVal != -1) {
5634     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
5635       // We have code in place to select a valid ConstantFP already, no need to
5636       // do any mangling.
5637       return Op;
5638     }
5639 
5640     // It's a float and we are trying to use NEON operations where
5641     // possible. Lower it to a splat followed by an extract.
5642     SDLoc DL(Op);
5643     SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
5644     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
5645                                       NewVal);
5646     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
5647                        DAG.getConstant(0, DL, MVT::i32));
5648   }
5649 
5650   // The rest of our options are NEON only, make sure that's allowed before
5651   // proceeding..
5652   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
5653     return SDValue();
5654 
5655   EVT VMovVT;
5656   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
5657 
5658   // It wouldn't really be worth bothering for doubles except for one very
5659   // important value, which does happen to match: 0.0. So make sure we don't do
5660   // anything stupid.
5661   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
5662     return SDValue();
5663 
5664   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
5665   SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
5666                                      VMovVT, false, VMOVModImm);
5667   if (NewVal != SDValue()) {
5668     SDLoc DL(Op);
5669     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
5670                                       NewVal);
5671     if (IsDouble)
5672       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
5673 
5674     // It's a float: cast and extract a vector element.
5675     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
5676                                        VecConstant);
5677     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
5678                        DAG.getConstant(0, DL, MVT::i32));
5679   }
5680 
5681   // Finally, try a VMVN.i32
5682   NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
5683                              false, VMVNModImm);
5684   if (NewVal != SDValue()) {
5685     SDLoc DL(Op);
5686     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
5687 
5688     if (IsDouble)
5689       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
5690 
5691     // It's a float: cast and extract a vector element.
5692     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
5693                                        VecConstant);
5694     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
5695                        DAG.getConstant(0, DL, MVT::i32));
5696   }
5697 
5698   return SDValue();
5699 }
5700 
5701 // check if an VEXT instruction can handle the shuffle mask when the
5702 // vector sources of the shuffle are the same.
5703 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
5704   unsigned NumElts = VT.getVectorNumElements();
5705 
5706   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
5707   if (M[0] < 0)
5708     return false;
5709 
5710   Imm = M[0];
5711 
5712   // If this is a VEXT shuffle, the immediate value is the index of the first
5713   // element.  The other shuffle indices must be the successive elements after
5714   // the first one.
5715   unsigned ExpectedElt = Imm;
5716   for (unsigned i = 1; i < NumElts; ++i) {
5717     // Increment the expected index.  If it wraps around, just follow it
5718     // back to index zero and keep going.
5719     ++ExpectedElt;
5720     if (ExpectedElt == NumElts)
5721       ExpectedElt = 0;
5722 
5723     if (M[i] < 0) continue; // ignore UNDEF indices
5724     if (ExpectedElt != static_cast<unsigned>(M[i]))
5725       return false;
5726   }
5727 
5728   return true;
5729 }
5730 
5731 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
5732                        bool &ReverseVEXT, unsigned &Imm) {
5733   unsigned NumElts = VT.getVectorNumElements();
5734   ReverseVEXT = false;
5735 
5736   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
5737   if (M[0] < 0)
5738     return false;
5739 
5740   Imm = M[0];
5741 
5742   // If this is a VEXT shuffle, the immediate value is the index of the first
5743   // element.  The other shuffle indices must be the successive elements after
5744   // the first one.
5745   unsigned ExpectedElt = Imm;
5746   for (unsigned i = 1; i < NumElts; ++i) {
5747     // Increment the expected index.  If it wraps around, it may still be
5748     // a VEXT but the source vectors must be swapped.
5749     ExpectedElt += 1;
5750     if (ExpectedElt == NumElts * 2) {
5751       ExpectedElt = 0;
5752       ReverseVEXT = true;
5753     }
5754 
5755     if (M[i] < 0) continue; // ignore UNDEF indices
5756     if (ExpectedElt != static_cast<unsigned>(M[i]))
5757       return false;
5758   }
5759 
5760   // Adjust the index value if the source operands will be swapped.
5761   if (ReverseVEXT)
5762     Imm -= NumElts;
5763 
5764   return true;
5765 }
5766 
5767 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
5768 /// instruction with the specified blocksize.  (The order of the elements
5769 /// within each block of the vector is reversed.)
5770 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
5771   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
5772          "Only possible block sizes for VREV are: 16, 32, 64");
5773 
5774   unsigned EltSz = VT.getScalarSizeInBits();
5775   if (EltSz == 64)
5776     return false;
5777 
5778   unsigned NumElts = VT.getVectorNumElements();
5779   unsigned BlockElts = M[0] + 1;
5780   // If the first shuffle index is UNDEF, be optimistic.
5781   if (M[0] < 0)
5782     BlockElts = BlockSize / EltSz;
5783 
5784   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
5785     return false;
5786 
5787   for (unsigned i = 0; i < NumElts; ++i) {
5788     if (M[i] < 0) continue; // ignore UNDEF indices
5789     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
5790       return false;
5791   }
5792 
5793   return true;
5794 }
5795 
5796 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
5797   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
5798   // range, then 0 is placed into the resulting vector. So pretty much any mask
5799   // of 8 elements can work here.
5800   return VT == MVT::v8i8 && M.size() == 8;
5801 }
5802 
5803 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
5804 // checking that pairs of elements in the shuffle mask represent the same index
5805 // in each vector, incrementing the expected index by 2 at each step.
5806 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
5807 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
5808 //  v2={e,f,g,h}
5809 // WhichResult gives the offset for each element in the mask based on which
5810 // of the two results it belongs to.
5811 //
5812 // The transpose can be represented either as:
5813 // result1 = shufflevector v1, v2, result1_shuffle_mask
5814 // result2 = shufflevector v1, v2, result2_shuffle_mask
5815 // where v1/v2 and the shuffle masks have the same number of elements
5816 // (here WhichResult (see below) indicates which result is being checked)
5817 //
5818 // or as:
5819 // results = shufflevector v1, v2, shuffle_mask
5820 // where both results are returned in one vector and the shuffle mask has twice
5821 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
5822 // want to check the low half and high half of the shuffle mask as if it were
5823 // the other case
5824 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5825   unsigned EltSz = VT.getScalarSizeInBits();
5826   if (EltSz == 64)
5827     return false;
5828 
5829   unsigned NumElts = VT.getVectorNumElements();
5830   if (M.size() != NumElts && M.size() != NumElts*2)
5831     return false;
5832 
5833   // If the mask is twice as long as the input vector then we need to check the
5834   // upper and lower parts of the mask with a matching value for WhichResult
5835   // FIXME: A mask with only even values will be rejected in case the first
5836   // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
5837   // M[0] is used to determine WhichResult
5838   for (unsigned i = 0; i < M.size(); i += NumElts) {
5839     if (M.size() == NumElts * 2)
5840       WhichResult = i / NumElts;
5841     else
5842       WhichResult = M[i] == 0 ? 0 : 1;
5843     for (unsigned j = 0; j < NumElts; j += 2) {
5844       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
5845           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
5846         return false;
5847     }
5848   }
5849 
5850   if (M.size() == NumElts*2)
5851     WhichResult = 0;
5852 
5853   return true;
5854 }
5855 
5856 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
5857 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5858 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
5859 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5860   unsigned EltSz = VT.getScalarSizeInBits();
5861   if (EltSz == 64)
5862     return false;
5863 
5864   unsigned NumElts = VT.getVectorNumElements();
5865   if (M.size() != NumElts && M.size() != NumElts*2)
5866     return false;
5867 
5868   for (unsigned i = 0; i < M.size(); i += NumElts) {
5869     if (M.size() == NumElts * 2)
5870       WhichResult = i / NumElts;
5871     else
5872       WhichResult = M[i] == 0 ? 0 : 1;
5873     for (unsigned j = 0; j < NumElts; j += 2) {
5874       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
5875           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
5876         return false;
5877     }
5878   }
5879 
5880   if (M.size() == NumElts*2)
5881     WhichResult = 0;
5882 
5883   return true;
5884 }
5885 
5886 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
5887 // that the mask elements are either all even and in steps of size 2 or all odd
5888 // and in steps of size 2.
5889 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
5890 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
5891 //  v2={e,f,g,h}
5892 // Requires similar checks to that of isVTRNMask with
5893 // respect the how results are returned.
5894 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5895   unsigned EltSz = VT.getScalarSizeInBits();
5896   if (EltSz == 64)
5897     return false;
5898 
5899   unsigned NumElts = VT.getVectorNumElements();
5900   if (M.size() != NumElts && M.size() != NumElts*2)
5901     return false;
5902 
5903   for (unsigned i = 0; i < M.size(); i += NumElts) {
5904     WhichResult = M[i] == 0 ? 0 : 1;
5905     for (unsigned j = 0; j < NumElts; ++j) {
5906       if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
5907         return false;
5908     }
5909   }
5910 
5911   if (M.size() == NumElts*2)
5912     WhichResult = 0;
5913 
5914   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5915   if (VT.is64BitVector() && EltSz == 32)
5916     return false;
5917 
5918   return true;
5919 }
5920 
5921 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
5922 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5923 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
5924 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5925   unsigned EltSz = VT.getScalarSizeInBits();
5926   if (EltSz == 64)
5927     return false;
5928 
5929   unsigned NumElts = VT.getVectorNumElements();
5930   if (M.size() != NumElts && M.size() != NumElts*2)
5931     return false;
5932 
5933   unsigned Half = NumElts / 2;
5934   for (unsigned i = 0; i < M.size(); i += NumElts) {
5935     WhichResult = M[i] == 0 ? 0 : 1;
5936     for (unsigned j = 0; j < NumElts; j += Half) {
5937       unsigned Idx = WhichResult;
5938       for (unsigned k = 0; k < Half; ++k) {
5939         int MIdx = M[i + j + k];
5940         if (MIdx >= 0 && (unsigned) MIdx != Idx)
5941           return false;
5942         Idx += 2;
5943       }
5944     }
5945   }
5946 
5947   if (M.size() == NumElts*2)
5948     WhichResult = 0;
5949 
5950   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5951   if (VT.is64BitVector() && EltSz == 32)
5952     return false;
5953 
5954   return true;
5955 }
5956 
5957 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
5958 // that pairs of elements of the shufflemask represent the same index in each
5959 // vector incrementing sequentially through the vectors.
5960 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
5961 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
5962 //  v2={e,f,g,h}
5963 // Requires similar checks to that of isVTRNMask with respect the how results
5964 // are returned.
5965 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5966   unsigned EltSz = VT.getScalarSizeInBits();
5967   if (EltSz == 64)
5968     return false;
5969 
5970   unsigned NumElts = VT.getVectorNumElements();
5971   if (M.size() != NumElts && M.size() != NumElts*2)
5972     return false;
5973 
5974   for (unsigned i = 0; i < M.size(); i += NumElts) {
5975     WhichResult = M[i] == 0 ? 0 : 1;
5976     unsigned Idx = WhichResult * NumElts / 2;
5977     for (unsigned j = 0; j < NumElts; j += 2) {
5978       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
5979           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
5980         return false;
5981       Idx += 1;
5982     }
5983   }
5984 
5985   if (M.size() == NumElts*2)
5986     WhichResult = 0;
5987 
5988   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5989   if (VT.is64BitVector() && EltSz == 32)
5990     return false;
5991 
5992   return true;
5993 }
5994 
5995 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
5996 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5997 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
5998 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5999   unsigned EltSz = VT.getScalarSizeInBits();
6000   if (EltSz == 64)
6001     return false;
6002 
6003   unsigned NumElts = VT.getVectorNumElements();
6004   if (M.size() != NumElts && M.size() != NumElts*2)
6005     return false;
6006 
6007   for (unsigned i = 0; i < M.size(); i += NumElts) {
6008     WhichResult = M[i] == 0 ? 0 : 1;
6009     unsigned Idx = WhichResult * NumElts / 2;
6010     for (unsigned j = 0; j < NumElts; j += 2) {
6011       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
6012           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
6013         return false;
6014       Idx += 1;
6015     }
6016   }
6017 
6018   if (M.size() == NumElts*2)
6019     WhichResult = 0;
6020 
6021   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6022   if (VT.is64BitVector() && EltSz == 32)
6023     return false;
6024 
6025   return true;
6026 }
6027 
6028 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
6029 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
6030 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
6031                                            unsigned &WhichResult,
6032                                            bool &isV_UNDEF) {
6033   isV_UNDEF = false;
6034   if (isVTRNMask(ShuffleMask, VT, WhichResult))
6035     return ARMISD::VTRN;
6036   if (isVUZPMask(ShuffleMask, VT, WhichResult))
6037     return ARMISD::VUZP;
6038   if (isVZIPMask(ShuffleMask, VT, WhichResult))
6039     return ARMISD::VZIP;
6040 
6041   isV_UNDEF = true;
6042   if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
6043     return ARMISD::VTRN;
6044   if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6045     return ARMISD::VUZP;
6046   if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6047     return ARMISD::VZIP;
6048 
6049   return 0;
6050 }
6051 
6052 /// \return true if this is a reverse operation on an vector.
6053 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
6054   unsigned NumElts = VT.getVectorNumElements();
6055   // Make sure the mask has the right size.
6056   if (NumElts != M.size())
6057       return false;
6058 
6059   // Look for <15, ..., 3, -1, 1, 0>.
6060   for (unsigned i = 0; i != NumElts; ++i)
6061     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
6062       return false;
6063 
6064   return true;
6065 }
6066 
6067 // If N is an integer constant that can be moved into a register in one
6068 // instruction, return an SDValue of such a constant (will become a MOV
6069 // instruction).  Otherwise return null.
6070 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
6071                                      const ARMSubtarget *ST, const SDLoc &dl) {
6072   uint64_t Val;
6073   if (!isa<ConstantSDNode>(N))
6074     return SDValue();
6075   Val = cast<ConstantSDNode>(N)->getZExtValue();
6076 
6077   if (ST->isThumb1Only()) {
6078     if (Val <= 255 || ~Val <= 255)
6079       return DAG.getConstant(Val, dl, MVT::i32);
6080   } else {
6081     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
6082       return DAG.getConstant(Val, dl, MVT::i32);
6083   }
6084   return SDValue();
6085 }
6086 
6087 // If this is a case we can't handle, return null and let the default
6088 // expansion code take care of it.
6089 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
6090                                              const ARMSubtarget *ST) const {
6091   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
6092   SDLoc dl(Op);
6093   EVT VT = Op.getValueType();
6094 
6095   APInt SplatBits, SplatUndef;
6096   unsigned SplatBitSize;
6097   bool HasAnyUndefs;
6098   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6099     if (SplatUndef.isAllOnesValue())
6100       return DAG.getUNDEF(VT);
6101 
6102     if (SplatBitSize <= 64) {
6103       // Check if an immediate VMOV works.
6104       EVT VmovVT;
6105       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
6106                                       SplatUndef.getZExtValue(), SplatBitSize,
6107                                       DAG, dl, VmovVT, VT.is128BitVector(),
6108                                       VMOVModImm);
6109       if (Val.getNode()) {
6110         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
6111         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6112       }
6113 
6114       // Try an immediate VMVN.
6115       uint64_t NegatedImm = (~SplatBits).getZExtValue();
6116       Val = isNEONModifiedImm(NegatedImm,
6117                                       SplatUndef.getZExtValue(), SplatBitSize,
6118                                       DAG, dl, VmovVT, VT.is128BitVector(),
6119                                       VMVNModImm);
6120       if (Val.getNode()) {
6121         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
6122         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6123       }
6124 
6125       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
6126       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
6127         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
6128         if (ImmVal != -1) {
6129           SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
6130           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
6131         }
6132       }
6133     }
6134   }
6135 
6136   // Scan through the operands to see if only one value is used.
6137   //
6138   // As an optimisation, even if more than one value is used it may be more
6139   // profitable to splat with one value then change some lanes.
6140   //
6141   // Heuristically we decide to do this if the vector has a "dominant" value,
6142   // defined as splatted to more than half of the lanes.
6143   unsigned NumElts = VT.getVectorNumElements();
6144   bool isOnlyLowElement = true;
6145   bool usesOnlyOneValue = true;
6146   bool hasDominantValue = false;
6147   bool isConstant = true;
6148 
6149   // Map of the number of times a particular SDValue appears in the
6150   // element list.
6151   DenseMap<SDValue, unsigned> ValueCounts;
6152   SDValue Value;
6153   for (unsigned i = 0; i < NumElts; ++i) {
6154     SDValue V = Op.getOperand(i);
6155     if (V.isUndef())
6156       continue;
6157     if (i > 0)
6158       isOnlyLowElement = false;
6159     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
6160       isConstant = false;
6161 
6162     ValueCounts.insert(std::make_pair(V, 0));
6163     unsigned &Count = ValueCounts[V];
6164 
6165     // Is this value dominant? (takes up more than half of the lanes)
6166     if (++Count > (NumElts / 2)) {
6167       hasDominantValue = true;
6168       Value = V;
6169     }
6170   }
6171   if (ValueCounts.size() != 1)
6172     usesOnlyOneValue = false;
6173   if (!Value.getNode() && !ValueCounts.empty())
6174     Value = ValueCounts.begin()->first;
6175 
6176   if (ValueCounts.empty())
6177     return DAG.getUNDEF(VT);
6178 
6179   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
6180   // Keep going if we are hitting this case.
6181   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
6182     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
6183 
6184   unsigned EltSize = VT.getScalarSizeInBits();
6185 
6186   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
6187   // i32 and try again.
6188   if (hasDominantValue && EltSize <= 32) {
6189     if (!isConstant) {
6190       SDValue N;
6191 
6192       // If we are VDUPing a value that comes directly from a vector, that will
6193       // cause an unnecessary move to and from a GPR, where instead we could
6194       // just use VDUPLANE. We can only do this if the lane being extracted
6195       // is at a constant index, as the VDUP from lane instructions only have
6196       // constant-index forms.
6197       ConstantSDNode *constIndex;
6198       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6199           (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
6200         // We need to create a new undef vector to use for the VDUPLANE if the
6201         // size of the vector from which we get the value is different than the
6202         // size of the vector that we need to create. We will insert the element
6203         // such that the register coalescer will remove unnecessary copies.
6204         if (VT != Value->getOperand(0).getValueType()) {
6205           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
6206                              VT.getVectorNumElements();
6207           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6208                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
6209                         Value, DAG.getConstant(index, dl, MVT::i32)),
6210                            DAG.getConstant(index, dl, MVT::i32));
6211         } else
6212           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6213                         Value->getOperand(0), Value->getOperand(1));
6214       } else
6215         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
6216 
6217       if (!usesOnlyOneValue) {
6218         // The dominant value was splatted as 'N', but we now have to insert
6219         // all differing elements.
6220         for (unsigned I = 0; I < NumElts; ++I) {
6221           if (Op.getOperand(I) == Value)
6222             continue;
6223           SmallVector<SDValue, 3> Ops;
6224           Ops.push_back(N);
6225           Ops.push_back(Op.getOperand(I));
6226           Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
6227           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
6228         }
6229       }
6230       return N;
6231     }
6232     if (VT.getVectorElementType().isFloatingPoint()) {
6233       SmallVector<SDValue, 8> Ops;
6234       for (unsigned i = 0; i < NumElts; ++i)
6235         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
6236                                   Op.getOperand(i)));
6237       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
6238       SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
6239       Val = LowerBUILD_VECTOR(Val, DAG, ST);
6240       if (Val.getNode())
6241         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6242     }
6243     if (usesOnlyOneValue) {
6244       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
6245       if (isConstant && Val.getNode())
6246         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
6247     }
6248   }
6249 
6250   // If all elements are constants and the case above didn't get hit, fall back
6251   // to the default expansion, which will generate a load from the constant
6252   // pool.
6253   if (isConstant)
6254     return SDValue();
6255 
6256   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
6257   if (NumElts >= 4) {
6258     SDValue shuffle = ReconstructShuffle(Op, DAG);
6259     if (shuffle != SDValue())
6260       return shuffle;
6261   }
6262 
6263   if (VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
6264     // If we haven't found an efficient lowering, try splitting a 128-bit vector
6265     // into two 64-bit vectors; we might discover a better way to lower it.
6266     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
6267     EVT ExtVT = VT.getVectorElementType();
6268     EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
6269     SDValue Lower =
6270         DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2));
6271     if (Lower.getOpcode() == ISD::BUILD_VECTOR)
6272       Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
6273     SDValue Upper = DAG.getBuildVector(
6274         HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
6275     if (Upper.getOpcode() == ISD::BUILD_VECTOR)
6276       Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
6277     if (Lower && Upper)
6278       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
6279   }
6280 
6281   // Vectors with 32- or 64-bit elements can be built by directly assigning
6282   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
6283   // will be legalized.
6284   if (EltSize >= 32) {
6285     // Do the expansion with floating-point types, since that is what the VFP
6286     // registers are defined to use, and since i64 is not legal.
6287     EVT EltVT = EVT::getFloatingPointVT(EltSize);
6288     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
6289     SmallVector<SDValue, 8> Ops;
6290     for (unsigned i = 0; i < NumElts; ++i)
6291       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
6292     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
6293     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6294   }
6295 
6296   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
6297   // know the default expansion would otherwise fall back on something even
6298   // worse. For a vector with one or two non-undef values, that's
6299   // scalar_to_vector for the elements followed by a shuffle (provided the
6300   // shuffle is valid for the target) and materialization element by element
6301   // on the stack followed by a load for everything else.
6302   if (!isConstant && !usesOnlyOneValue) {
6303     SDValue Vec = DAG.getUNDEF(VT);
6304     for (unsigned i = 0 ; i < NumElts; ++i) {
6305       SDValue V = Op.getOperand(i);
6306       if (V.isUndef())
6307         continue;
6308       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
6309       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
6310     }
6311     return Vec;
6312   }
6313 
6314   return SDValue();
6315 }
6316 
6317 // Gather data to see if the operation can be modelled as a
6318 // shuffle in combination with VEXTs.
6319 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
6320                                               SelectionDAG &DAG) const {
6321   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
6322   SDLoc dl(Op);
6323   EVT VT = Op.getValueType();
6324   unsigned NumElts = VT.getVectorNumElements();
6325 
6326   struct ShuffleSourceInfo {
6327     SDValue Vec;
6328     unsigned MinElt = std::numeric_limits<unsigned>::max();
6329     unsigned MaxElt = 0;
6330 
6331     // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
6332     // be compatible with the shuffle we intend to construct. As a result
6333     // ShuffleVec will be some sliding window into the original Vec.
6334     SDValue ShuffleVec;
6335 
6336     // Code should guarantee that element i in Vec starts at element "WindowBase
6337     // + i * WindowScale in ShuffleVec".
6338     int WindowBase = 0;
6339     int WindowScale = 1;
6340 
6341     ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
6342 
6343     bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
6344   };
6345 
6346   // First gather all vectors used as an immediate source for this BUILD_VECTOR
6347   // node.
6348   SmallVector<ShuffleSourceInfo, 2> Sources;
6349   for (unsigned i = 0; i < NumElts; ++i) {
6350     SDValue V = Op.getOperand(i);
6351     if (V.isUndef())
6352       continue;
6353     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
6354       // A shuffle can only come from building a vector from various
6355       // elements of other vectors.
6356       return SDValue();
6357     } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
6358       // Furthermore, shuffles require a constant mask, whereas extractelts
6359       // accept variable indices.
6360       return SDValue();
6361     }
6362 
6363     // Add this element source to the list if it's not already there.
6364     SDValue SourceVec = V.getOperand(0);
6365     auto Source = llvm::find(Sources, SourceVec);
6366     if (Source == Sources.end())
6367       Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
6368 
6369     // Update the minimum and maximum lane number seen.
6370     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
6371     Source->MinElt = std::min(Source->MinElt, EltNo);
6372     Source->MaxElt = std::max(Source->MaxElt, EltNo);
6373   }
6374 
6375   // Currently only do something sane when at most two source vectors
6376   // are involved.
6377   if (Sources.size() > 2)
6378     return SDValue();
6379 
6380   // Find out the smallest element size among result and two sources, and use
6381   // it as element size to build the shuffle_vector.
6382   EVT SmallestEltTy = VT.getVectorElementType();
6383   for (auto &Source : Sources) {
6384     EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
6385     if (SrcEltTy.bitsLT(SmallestEltTy))
6386       SmallestEltTy = SrcEltTy;
6387   }
6388   unsigned ResMultiplier =
6389       VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
6390   NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
6391   EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
6392 
6393   // If the source vector is too wide or too narrow, we may nevertheless be able
6394   // to construct a compatible shuffle either by concatenating it with UNDEF or
6395   // extracting a suitable range of elements.
6396   for (auto &Src : Sources) {
6397     EVT SrcVT = Src.ShuffleVec.getValueType();
6398 
6399     if (SrcVT.getSizeInBits() == VT.getSizeInBits())
6400       continue;
6401 
6402     // This stage of the search produces a source with the same element type as
6403     // the original, but with a total width matching the BUILD_VECTOR output.
6404     EVT EltVT = SrcVT.getVectorElementType();
6405     unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits();
6406     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
6407 
6408     if (SrcVT.getSizeInBits() < VT.getSizeInBits()) {
6409       if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits())
6410         return SDValue();
6411       // We can pad out the smaller vector for free, so if it's part of a
6412       // shuffle...
6413       Src.ShuffleVec =
6414           DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
6415                       DAG.getUNDEF(Src.ShuffleVec.getValueType()));
6416       continue;
6417     }
6418 
6419     if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits())
6420       return SDValue();
6421 
6422     if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
6423       // Span too large for a VEXT to cope
6424       return SDValue();
6425     }
6426 
6427     if (Src.MinElt >= NumSrcElts) {
6428       // The extraction can just take the second half
6429       Src.ShuffleVec =
6430           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6431                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
6432       Src.WindowBase = -NumSrcElts;
6433     } else if (Src.MaxElt < NumSrcElts) {
6434       // The extraction can just take the first half
6435       Src.ShuffleVec =
6436           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6437                       DAG.getConstant(0, dl, MVT::i32));
6438     } else {
6439       // An actual VEXT is needed
6440       SDValue VEXTSrc1 =
6441           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6442                       DAG.getConstant(0, dl, MVT::i32));
6443       SDValue VEXTSrc2 =
6444           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6445                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
6446 
6447       Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
6448                                    VEXTSrc2,
6449                                    DAG.getConstant(Src.MinElt, dl, MVT::i32));
6450       Src.WindowBase = -Src.MinElt;
6451     }
6452   }
6453 
6454   // Another possible incompatibility occurs from the vector element types. We
6455   // can fix this by bitcasting the source vectors to the same type we intend
6456   // for the shuffle.
6457   for (auto &Src : Sources) {
6458     EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
6459     if (SrcEltTy == SmallestEltTy)
6460       continue;
6461     assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
6462     Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
6463     Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
6464     Src.WindowBase *= Src.WindowScale;
6465   }
6466 
6467   // Final sanity check before we try to actually produce a shuffle.
6468   DEBUG(
6469     for (auto Src : Sources)
6470       assert(Src.ShuffleVec.getValueType() == ShuffleVT);
6471   );
6472 
6473   // The stars all align, our next step is to produce the mask for the shuffle.
6474   SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
6475   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
6476   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
6477     SDValue Entry = Op.getOperand(i);
6478     if (Entry.isUndef())
6479       continue;
6480 
6481     auto Src = llvm::find(Sources, Entry.getOperand(0));
6482     int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
6483 
6484     // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
6485     // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
6486     // segment.
6487     EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
6488     int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
6489                                VT.getScalarSizeInBits());
6490     int LanesDefined = BitsDefined / BitsPerShuffleLane;
6491 
6492     // This source is expected to fill ResMultiplier lanes of the final shuffle,
6493     // starting at the appropriate offset.
6494     int *LaneMask = &Mask[i * ResMultiplier];
6495 
6496     int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
6497     ExtractBase += NumElts * (Src - Sources.begin());
6498     for (int j = 0; j < LanesDefined; ++j)
6499       LaneMask[j] = ExtractBase + j;
6500   }
6501 
6502   // Final check before we try to produce nonsense...
6503   if (!isShuffleMaskLegal(Mask, ShuffleVT))
6504     return SDValue();
6505 
6506   // We can't handle more than two sources. This should have already
6507   // been checked before this point.
6508   assert(Sources.size() <= 2 && "Too many sources!");
6509 
6510   SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
6511   for (unsigned i = 0; i < Sources.size(); ++i)
6512     ShuffleOps[i] = Sources[i].ShuffleVec;
6513 
6514   SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
6515                                          ShuffleOps[1], Mask);
6516   return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
6517 }
6518 
6519 /// isShuffleMaskLegal - Targets can use this to indicate that they only
6520 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
6521 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
6522 /// are assumed to be legal.
6523 bool
6524 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
6525                                       EVT VT) const {
6526   if (VT.getVectorNumElements() == 4 &&
6527       (VT.is128BitVector() || VT.is64BitVector())) {
6528     unsigned PFIndexes[4];
6529     for (unsigned i = 0; i != 4; ++i) {
6530       if (M[i] < 0)
6531         PFIndexes[i] = 8;
6532       else
6533         PFIndexes[i] = M[i];
6534     }
6535 
6536     // Compute the index in the perfect shuffle table.
6537     unsigned PFTableIndex =
6538       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6539     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
6540     unsigned Cost = (PFEntry >> 30);
6541 
6542     if (Cost <= 4)
6543       return true;
6544   }
6545 
6546   bool ReverseVEXT, isV_UNDEF;
6547   unsigned Imm, WhichResult;
6548 
6549   unsigned EltSize = VT.getScalarSizeInBits();
6550   return (EltSize >= 32 ||
6551           ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
6552           isVREVMask(M, VT, 64) ||
6553           isVREVMask(M, VT, 32) ||
6554           isVREVMask(M, VT, 16) ||
6555           isVEXTMask(M, VT, ReverseVEXT, Imm) ||
6556           isVTBLMask(M, VT) ||
6557           isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) ||
6558           ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT)));
6559 }
6560 
6561 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
6562 /// the specified operations to build the shuffle.
6563 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
6564                                       SDValue RHS, SelectionDAG &DAG,
6565                                       const SDLoc &dl) {
6566   unsigned OpNum = (PFEntry >> 26) & 0x0F;
6567   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
6568   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
6569 
6570   enum {
6571     OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
6572     OP_VREV,
6573     OP_VDUP0,
6574     OP_VDUP1,
6575     OP_VDUP2,
6576     OP_VDUP3,
6577     OP_VEXT1,
6578     OP_VEXT2,
6579     OP_VEXT3,
6580     OP_VUZPL, // VUZP, left result
6581     OP_VUZPR, // VUZP, right result
6582     OP_VZIPL, // VZIP, left result
6583     OP_VZIPR, // VZIP, right result
6584     OP_VTRNL, // VTRN, left result
6585     OP_VTRNR  // VTRN, right result
6586   };
6587 
6588   if (OpNum == OP_COPY) {
6589     if (LHSID == (1*9+2)*9+3) return LHS;
6590     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
6591     return RHS;
6592   }
6593 
6594   SDValue OpLHS, OpRHS;
6595   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
6596   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
6597   EVT VT = OpLHS.getValueType();
6598 
6599   switch (OpNum) {
6600   default: llvm_unreachable("Unknown shuffle opcode!");
6601   case OP_VREV:
6602     // VREV divides the vector in half and swaps within the half.
6603     if (VT.getVectorElementType() == MVT::i32 ||
6604         VT.getVectorElementType() == MVT::f32)
6605       return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
6606     // vrev <4 x i16> -> VREV32
6607     if (VT.getVectorElementType() == MVT::i16)
6608       return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
6609     // vrev <4 x i8> -> VREV16
6610     assert(VT.getVectorElementType() == MVT::i8);
6611     return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
6612   case OP_VDUP0:
6613   case OP_VDUP1:
6614   case OP_VDUP2:
6615   case OP_VDUP3:
6616     return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6617                        OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
6618   case OP_VEXT1:
6619   case OP_VEXT2:
6620   case OP_VEXT3:
6621     return DAG.getNode(ARMISD::VEXT, dl, VT,
6622                        OpLHS, OpRHS,
6623                        DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
6624   case OP_VUZPL:
6625   case OP_VUZPR:
6626     return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
6627                        OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
6628   case OP_VZIPL:
6629   case OP_VZIPR:
6630     return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
6631                        OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
6632   case OP_VTRNL:
6633   case OP_VTRNR:
6634     return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
6635                        OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
6636   }
6637 }
6638 
6639 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
6640                                        ArrayRef<int> ShuffleMask,
6641                                        SelectionDAG &DAG) {
6642   // Check to see if we can use the VTBL instruction.
6643   SDValue V1 = Op.getOperand(0);
6644   SDValue V2 = Op.getOperand(1);
6645   SDLoc DL(Op);
6646 
6647   SmallVector<SDValue, 8> VTBLMask;
6648   for (ArrayRef<int>::iterator
6649          I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
6650     VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
6651 
6652   if (V2.getNode()->isUndef())
6653     return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
6654                        DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
6655 
6656   return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
6657                      DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
6658 }
6659 
6660 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
6661                                                       SelectionDAG &DAG) {
6662   SDLoc DL(Op);
6663   SDValue OpLHS = Op.getOperand(0);
6664   EVT VT = OpLHS.getValueType();
6665 
6666   assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&
6667          "Expect an v8i16/v16i8 type");
6668   OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS);
6669   // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
6670   // extract the first 8 bytes into the top double word and the last 8 bytes
6671   // into the bottom double word. The v8i16 case is similar.
6672   unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
6673   return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
6674                      DAG.getConstant(ExtractNum, DL, MVT::i32));
6675 }
6676 
6677 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
6678   SDValue V1 = Op.getOperand(0);
6679   SDValue V2 = Op.getOperand(1);
6680   SDLoc dl(Op);
6681   EVT VT = Op.getValueType();
6682   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
6683 
6684   // Convert shuffles that are directly supported on NEON to target-specific
6685   // DAG nodes, instead of keeping them as shuffles and matching them again
6686   // during code selection.  This is more efficient and avoids the possibility
6687   // of inconsistencies between legalization and selection.
6688   // FIXME: floating-point vectors should be canonicalized to integer vectors
6689   // of the same time so that they get CSEd properly.
6690   ArrayRef<int> ShuffleMask = SVN->getMask();
6691 
6692   unsigned EltSize = VT.getScalarSizeInBits();
6693   if (EltSize <= 32) {
6694     if (SVN->isSplat()) {
6695       int Lane = SVN->getSplatIndex();
6696       // If this is undef splat, generate it via "just" vdup, if possible.
6697       if (Lane == -1) Lane = 0;
6698 
6699       // Test if V1 is a SCALAR_TO_VECTOR.
6700       if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
6701         return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
6702       }
6703       // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
6704       // (and probably will turn into a SCALAR_TO_VECTOR once legalization
6705       // reaches it).
6706       if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
6707           !isa<ConstantSDNode>(V1.getOperand(0))) {
6708         bool IsScalarToVector = true;
6709         for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
6710           if (!V1.getOperand(i).isUndef()) {
6711             IsScalarToVector = false;
6712             break;
6713           }
6714         if (IsScalarToVector)
6715           return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
6716       }
6717       return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
6718                          DAG.getConstant(Lane, dl, MVT::i32));
6719     }
6720 
6721     bool ReverseVEXT;
6722     unsigned Imm;
6723     if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
6724       if (ReverseVEXT)
6725         std::swap(V1, V2);
6726       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
6727                          DAG.getConstant(Imm, dl, MVT::i32));
6728     }
6729 
6730     if (isVREVMask(ShuffleMask, VT, 64))
6731       return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
6732     if (isVREVMask(ShuffleMask, VT, 32))
6733       return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
6734     if (isVREVMask(ShuffleMask, VT, 16))
6735       return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
6736 
6737     if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
6738       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
6739                          DAG.getConstant(Imm, dl, MVT::i32));
6740     }
6741 
6742     // Check for Neon shuffles that modify both input vectors in place.
6743     // If both results are used, i.e., if there are two shuffles with the same
6744     // source operands and with masks corresponding to both results of one of
6745     // these operations, DAG memoization will ensure that a single node is
6746     // used for both shuffles.
6747     unsigned WhichResult;
6748     bool isV_UNDEF;
6749     if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
6750             ShuffleMask, VT, WhichResult, isV_UNDEF)) {
6751       if (isV_UNDEF)
6752         V2 = V1;
6753       return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
6754           .getValue(WhichResult);
6755     }
6756 
6757     // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
6758     // shuffles that produce a result larger than their operands with:
6759     //   shuffle(concat(v1, undef), concat(v2, undef))
6760     // ->
6761     //   shuffle(concat(v1, v2), undef)
6762     // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
6763     //
6764     // This is useful in the general case, but there are special cases where
6765     // native shuffles produce larger results: the two-result ops.
6766     //
6767     // Look through the concat when lowering them:
6768     //   shuffle(concat(v1, v2), undef)
6769     // ->
6770     //   concat(VZIP(v1, v2):0, :1)
6771     //
6772     if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
6773       SDValue SubV1 = V1->getOperand(0);
6774       SDValue SubV2 = V1->getOperand(1);
6775       EVT SubVT = SubV1.getValueType();
6776 
6777       // We expect these to have been canonicalized to -1.
6778       assert(llvm::all_of(ShuffleMask, [&](int i) {
6779         return i < (int)VT.getVectorNumElements();
6780       }) && "Unexpected shuffle index into UNDEF operand!");
6781 
6782       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
6783               ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
6784         if (isV_UNDEF)
6785           SubV2 = SubV1;
6786         assert((WhichResult == 0) &&
6787                "In-place shuffle of concat can only have one result!");
6788         SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
6789                                   SubV1, SubV2);
6790         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
6791                            Res.getValue(1));
6792       }
6793     }
6794   }
6795 
6796   // If the shuffle is not directly supported and it has 4 elements, use
6797   // the PerfectShuffle-generated table to synthesize it from other shuffles.
6798   unsigned NumElts = VT.getVectorNumElements();
6799   if (NumElts == 4) {
6800     unsigned PFIndexes[4];
6801     for (unsigned i = 0; i != 4; ++i) {
6802       if (ShuffleMask[i] < 0)
6803         PFIndexes[i] = 8;
6804       else
6805         PFIndexes[i] = ShuffleMask[i];
6806     }
6807 
6808     // Compute the index in the perfect shuffle table.
6809     unsigned PFTableIndex =
6810       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6811     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
6812     unsigned Cost = (PFEntry >> 30);
6813 
6814     if (Cost <= 4)
6815       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
6816   }
6817 
6818   // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
6819   if (EltSize >= 32) {
6820     // Do the expansion with floating-point types, since that is what the VFP
6821     // registers are defined to use, and since i64 is not legal.
6822     EVT EltVT = EVT::getFloatingPointVT(EltSize);
6823     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
6824     V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
6825     V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
6826     SmallVector<SDValue, 8> Ops;
6827     for (unsigned i = 0; i < NumElts; ++i) {
6828       if (ShuffleMask[i] < 0)
6829         Ops.push_back(DAG.getUNDEF(EltVT));
6830       else
6831         Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
6832                                   ShuffleMask[i] < (int)NumElts ? V1 : V2,
6833                                   DAG.getConstant(ShuffleMask[i] & (NumElts-1),
6834                                                   dl, MVT::i32)));
6835     }
6836     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
6837     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6838   }
6839 
6840   if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
6841     return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
6842 
6843   if (VT == MVT::v8i8)
6844     if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
6845       return NewOp;
6846 
6847   return SDValue();
6848 }
6849 
6850 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
6851   // INSERT_VECTOR_ELT is legal only for immediate indexes.
6852   SDValue Lane = Op.getOperand(2);
6853   if (!isa<ConstantSDNode>(Lane))
6854     return SDValue();
6855 
6856   return Op;
6857 }
6858 
6859 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
6860   // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
6861   SDValue Lane = Op.getOperand(1);
6862   if (!isa<ConstantSDNode>(Lane))
6863     return SDValue();
6864 
6865   SDValue Vec = Op.getOperand(0);
6866   if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
6867     SDLoc dl(Op);
6868     return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
6869   }
6870 
6871   return Op;
6872 }
6873 
6874 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
6875   // The only time a CONCAT_VECTORS operation can have legal types is when
6876   // two 64-bit vectors are concatenated to a 128-bit vector.
6877   assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
6878          "unexpected CONCAT_VECTORS");
6879   SDLoc dl(Op);
6880   SDValue Val = DAG.getUNDEF(MVT::v2f64);
6881   SDValue Op0 = Op.getOperand(0);
6882   SDValue Op1 = Op.getOperand(1);
6883   if (!Op0.isUndef())
6884     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
6885                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
6886                       DAG.getIntPtrConstant(0, dl));
6887   if (!Op1.isUndef())
6888     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
6889                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
6890                       DAG.getIntPtrConstant(1, dl));
6891   return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
6892 }
6893 
6894 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
6895 /// element has been zero/sign-extended, depending on the isSigned parameter,
6896 /// from an integer type half its size.
6897 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
6898                                    bool isSigned) {
6899   // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
6900   EVT VT = N->getValueType(0);
6901   if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
6902     SDNode *BVN = N->getOperand(0).getNode();
6903     if (BVN->getValueType(0) != MVT::v4i32 ||
6904         BVN->getOpcode() != ISD::BUILD_VECTOR)
6905       return false;
6906     unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
6907     unsigned HiElt = 1 - LoElt;
6908     ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
6909     ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
6910     ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
6911     ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
6912     if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
6913       return false;
6914     if (isSigned) {
6915       if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
6916           Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
6917         return true;
6918     } else {
6919       if (Hi0->isNullValue() && Hi1->isNullValue())
6920         return true;
6921     }
6922     return false;
6923   }
6924 
6925   if (N->getOpcode() != ISD::BUILD_VECTOR)
6926     return false;
6927 
6928   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
6929     SDNode *Elt = N->getOperand(i).getNode();
6930     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
6931       unsigned EltSize = VT.getScalarSizeInBits();
6932       unsigned HalfSize = EltSize / 2;
6933       if (isSigned) {
6934         if (!isIntN(HalfSize, C->getSExtValue()))
6935           return false;
6936       } else {
6937         if (!isUIntN(HalfSize, C->getZExtValue()))
6938           return false;
6939       }
6940       continue;
6941     }
6942     return false;
6943   }
6944 
6945   return true;
6946 }
6947 
6948 /// isSignExtended - Check if a node is a vector value that is sign-extended
6949 /// or a constant BUILD_VECTOR with sign-extended elements.
6950 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
6951   if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
6952     return true;
6953   if (isExtendedBUILD_VECTOR(N, DAG, true))
6954     return true;
6955   return false;
6956 }
6957 
6958 /// isZeroExtended - Check if a node is a vector value that is zero-extended
6959 /// or a constant BUILD_VECTOR with zero-extended elements.
6960 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
6961   if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
6962     return true;
6963   if (isExtendedBUILD_VECTOR(N, DAG, false))
6964     return true;
6965   return false;
6966 }
6967 
6968 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
6969   if (OrigVT.getSizeInBits() >= 64)
6970     return OrigVT;
6971 
6972   assert(OrigVT.isSimple() && "Expecting a simple value type");
6973 
6974   MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
6975   switch (OrigSimpleTy) {
6976   default: llvm_unreachable("Unexpected Vector Type");
6977   case MVT::v2i8:
6978   case MVT::v2i16:
6979      return MVT::v2i32;
6980   case MVT::v4i8:
6981     return  MVT::v4i16;
6982   }
6983 }
6984 
6985 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
6986 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
6987 /// We insert the required extension here to get the vector to fill a D register.
6988 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
6989                                             const EVT &OrigTy,
6990                                             const EVT &ExtTy,
6991                                             unsigned ExtOpcode) {
6992   // The vector originally had a size of OrigTy. It was then extended to ExtTy.
6993   // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
6994   // 64-bits we need to insert a new extension so that it will be 64-bits.
6995   assert(ExtTy.is128BitVector() && "Unexpected extension size");
6996   if (OrigTy.getSizeInBits() >= 64)
6997     return N;
6998 
6999   // Must extend size to at least 64 bits to be used as an operand for VMULL.
7000   EVT NewVT = getExtensionTo64Bits(OrigTy);
7001 
7002   return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
7003 }
7004 
7005 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
7006 /// does not do any sign/zero extension. If the original vector is less
7007 /// than 64 bits, an appropriate extension will be added after the load to
7008 /// reach a total size of 64 bits. We have to add the extension separately
7009 /// because ARM does not have a sign/zero extending load for vectors.
7010 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
7011   EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());
7012 
7013   // The load already has the right type.
7014   if (ExtendedTy == LD->getMemoryVT())
7015     return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
7016                        LD->getBasePtr(), LD->getPointerInfo(),
7017                        LD->getAlignment(), LD->getMemOperand()->getFlags());
7018 
7019   // We need to create a zextload/sextload. We cannot just create a load
7020   // followed by a zext/zext node because LowerMUL is also run during normal
7021   // operation legalization where we can't create illegal types.
7022   return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
7023                         LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
7024                         LD->getMemoryVT(), LD->getAlignment(),
7025                         LD->getMemOperand()->getFlags());
7026 }
7027 
7028 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
7029 /// extending load, or BUILD_VECTOR with extended elements, return the
7030 /// unextended value. The unextended vector should be 64 bits so that it can
7031 /// be used as an operand to a VMULL instruction. If the original vector size
7032 /// before extension is less than 64 bits we add a an extension to resize
7033 /// the vector to 64 bits.
7034 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
7035   if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
7036     return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
7037                                         N->getOperand(0)->getValueType(0),
7038                                         N->getValueType(0),
7039                                         N->getOpcode());
7040 
7041   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
7042     assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&
7043            "Expected extending load");
7044 
7045     SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG);
7046     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1));
7047     unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
7048     SDValue extLoad =
7049         DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad);
7050     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad);
7051 
7052     return newLoad;
7053   }
7054 
7055   // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
7056   // have been legalized as a BITCAST from v4i32.
7057   if (N->getOpcode() == ISD::BITCAST) {
7058     SDNode *BVN = N->getOperand(0).getNode();
7059     assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
7060            BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
7061     unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
7062     return DAG.getBuildVector(
7063         MVT::v2i32, SDLoc(N),
7064         {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
7065   }
7066   // Construct a new BUILD_VECTOR with elements truncated to half the size.
7067   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
7068   EVT VT = N->getValueType(0);
7069   unsigned EltSize = VT.getScalarSizeInBits() / 2;
7070   unsigned NumElts = VT.getVectorNumElements();
7071   MVT TruncVT = MVT::getIntegerVT(EltSize);
7072   SmallVector<SDValue, 8> Ops;
7073   SDLoc dl(N);
7074   for (unsigned i = 0; i != NumElts; ++i) {
7075     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
7076     const APInt &CInt = C->getAPIntValue();
7077     // Element types smaller than 32 bits are not legal, so use i32 elements.
7078     // The values are implicitly truncated so sext vs. zext doesn't matter.
7079     Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
7080   }
7081   return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
7082 }
7083 
7084 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
7085   unsigned Opcode = N->getOpcode();
7086   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7087     SDNode *N0 = N->getOperand(0).getNode();
7088     SDNode *N1 = N->getOperand(1).getNode();
7089     return N0->hasOneUse() && N1->hasOneUse() &&
7090       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
7091   }
7092   return false;
7093 }
7094 
7095 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
7096   unsigned Opcode = N->getOpcode();
7097   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7098     SDNode *N0 = N->getOperand(0).getNode();
7099     SDNode *N1 = N->getOperand(1).getNode();
7100     return N0->hasOneUse() && N1->hasOneUse() &&
7101       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
7102   }
7103   return false;
7104 }
7105 
7106 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
7107   // Multiplications are only custom-lowered for 128-bit vectors so that
7108   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
7109   EVT VT = Op.getValueType();
7110   assert(VT.is128BitVector() && VT.isInteger() &&
7111          "unexpected type for custom-lowering ISD::MUL");
7112   SDNode *N0 = Op.getOperand(0).getNode();
7113   SDNode *N1 = Op.getOperand(1).getNode();
7114   unsigned NewOpc = 0;
7115   bool isMLA = false;
7116   bool isN0SExt = isSignExtended(N0, DAG);
7117   bool isN1SExt = isSignExtended(N1, DAG);
7118   if (isN0SExt && isN1SExt)
7119     NewOpc = ARMISD::VMULLs;
7120   else {
7121     bool isN0ZExt = isZeroExtended(N0, DAG);
7122     bool isN1ZExt = isZeroExtended(N1, DAG);
7123     if (isN0ZExt && isN1ZExt)
7124       NewOpc = ARMISD::VMULLu;
7125     else if (isN1SExt || isN1ZExt) {
7126       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
7127       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
7128       if (isN1SExt && isAddSubSExt(N0, DAG)) {
7129         NewOpc = ARMISD::VMULLs;
7130         isMLA = true;
7131       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
7132         NewOpc = ARMISD::VMULLu;
7133         isMLA = true;
7134       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
7135         std::swap(N0, N1);
7136         NewOpc = ARMISD::VMULLu;
7137         isMLA = true;
7138       }
7139     }
7140 
7141     if (!NewOpc) {
7142       if (VT == MVT::v2i64)
7143         // Fall through to expand this.  It is not legal.
7144         return SDValue();
7145       else
7146         // Other vector multiplications are legal.
7147         return Op;
7148     }
7149   }
7150 
7151   // Legalize to a VMULL instruction.
7152   SDLoc DL(Op);
7153   SDValue Op0;
7154   SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
7155   if (!isMLA) {
7156     Op0 = SkipExtensionForVMULL(N0, DAG);
7157     assert(Op0.getValueType().is64BitVector() &&
7158            Op1.getValueType().is64BitVector() &&
7159            "unexpected types for extended operands to VMULL");
7160     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
7161   }
7162 
7163   // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
7164   // isel lowering to take advantage of no-stall back to back vmul + vmla.
7165   //   vmull q0, d4, d6
7166   //   vmlal q0, d5, d6
7167   // is faster than
7168   //   vaddl q0, d4, d5
7169   //   vmovl q1, d6
7170   //   vmul  q0, q0, q1
7171   SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
7172   SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
7173   EVT Op1VT = Op1.getValueType();
7174   return DAG.getNode(N0->getOpcode(), DL, VT,
7175                      DAG.getNode(NewOpc, DL, VT,
7176                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
7177                      DAG.getNode(NewOpc, DL, VT,
7178                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
7179 }
7180 
7181 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
7182                               SelectionDAG &DAG) {
7183   // TODO: Should this propagate fast-math-flags?
7184 
7185   // Convert to float
7186   // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
7187   // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
7188   X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
7189   Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
7190   X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
7191   Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
7192   // Get reciprocal estimate.
7193   // float4 recip = vrecpeq_f32(yf);
7194   Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7195                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7196                    Y);
7197   // Because char has a smaller range than uchar, we can actually get away
7198   // without any newton steps.  This requires that we use a weird bias
7199   // of 0xb000, however (again, this has been exhaustively tested).
7200   // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
7201   X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
7202   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
7203   Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
7204   X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
7205   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
7206   // Convert back to short.
7207   X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
7208   X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
7209   return X;
7210 }
7211 
7212 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
7213                                SelectionDAG &DAG) {
7214   // TODO: Should this propagate fast-math-flags?
7215 
7216   SDValue N2;
7217   // Convert to float.
7218   // float4 yf = vcvt_f32_s32(vmovl_s16(y));
7219   // float4 xf = vcvt_f32_s32(vmovl_s16(x));
7220   N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
7221   N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
7222   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7223   N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7224 
7225   // Use reciprocal estimate and one refinement step.
7226   // float4 recip = vrecpeq_f32(yf);
7227   // recip *= vrecpsq_f32(yf, recip);
7228   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7229                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7230                    N1);
7231   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7232                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7233                    N1, N2);
7234   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7235   // Because short has a smaller range than ushort, we can actually get away
7236   // with only a single newton step.  This requires that we use a weird bias
7237   // of 89, however (again, this has been exhaustively tested).
7238   // float4 result = as_float4(as_int4(xf*recip) + 0x89);
7239   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7240   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7241   N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
7242   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7243   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7244   // Convert back to integer and return.
7245   // return vmovn_s32(vcvt_s32_f32(result));
7246   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7247   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7248   return N0;
7249 }
7250 
7251 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
7252   EVT VT = Op.getValueType();
7253   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7254          "unexpected type for custom-lowering ISD::SDIV");
7255 
7256   SDLoc dl(Op);
7257   SDValue N0 = Op.getOperand(0);
7258   SDValue N1 = Op.getOperand(1);
7259   SDValue N2, N3;
7260 
7261   if (VT == MVT::v8i8) {
7262     N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
7263     N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
7264 
7265     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7266                      DAG.getIntPtrConstant(4, dl));
7267     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7268                      DAG.getIntPtrConstant(4, dl));
7269     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7270                      DAG.getIntPtrConstant(0, dl));
7271     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7272                      DAG.getIntPtrConstant(0, dl));
7273 
7274     N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
7275     N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
7276 
7277     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7278     N0 = LowerCONCAT_VECTORS(N0, DAG);
7279 
7280     N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
7281     return N0;
7282   }
7283   return LowerSDIV_v4i16(N0, N1, dl, DAG);
7284 }
7285 
7286 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
7287   // TODO: Should this propagate fast-math-flags?
7288   EVT VT = Op.getValueType();
7289   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7290          "unexpected type for custom-lowering ISD::UDIV");
7291 
7292   SDLoc dl(Op);
7293   SDValue N0 = Op.getOperand(0);
7294   SDValue N1 = Op.getOperand(1);
7295   SDValue N2, N3;
7296 
7297   if (VT == MVT::v8i8) {
7298     N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
7299     N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
7300 
7301     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7302                      DAG.getIntPtrConstant(4, dl));
7303     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7304                      DAG.getIntPtrConstant(4, dl));
7305     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7306                      DAG.getIntPtrConstant(0, dl));
7307     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7308                      DAG.getIntPtrConstant(0, dl));
7309 
7310     N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
7311     N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
7312 
7313     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7314     N0 = LowerCONCAT_VECTORS(N0, DAG);
7315 
7316     N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
7317                      DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
7318                                      MVT::i32),
7319                      N0);
7320     return N0;
7321   }
7322 
7323   // v4i16 sdiv ... Convert to float.
7324   // float4 yf = vcvt_f32_s32(vmovl_u16(y));
7325   // float4 xf = vcvt_f32_s32(vmovl_u16(x));
7326   N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
7327   N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
7328   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7329   SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7330 
7331   // Use reciprocal estimate and two refinement steps.
7332   // float4 recip = vrecpeq_f32(yf);
7333   // recip *= vrecpsq_f32(yf, recip);
7334   // recip *= vrecpsq_f32(yf, recip);
7335   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7336                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7337                    BN1);
7338   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7339                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7340                    BN1, N2);
7341   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7342   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7343                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7344                    BN1, N2);
7345   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7346   // Simply multiplying by the reciprocal estimate can leave us a few ulps
7347   // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
7348   // and that it will never cause us to return an answer too large).
7349   // float4 result = as_float4(as_int4(xf*recip) + 2);
7350   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7351   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7352   N1 = DAG.getConstant(2, dl, MVT::v4i32);
7353   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7354   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7355   // Convert back to integer and return.
7356   // return vmovn_u32(vcvt_s32_f32(result));
7357   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7358   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7359   return N0;
7360 }
7361 
7362 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
7363   EVT VT = Op.getNode()->getValueType(0);
7364   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
7365 
7366   unsigned Opc;
7367   bool ExtraOp = false;
7368   switch (Op.getOpcode()) {
7369   default: llvm_unreachable("Invalid code");
7370   case ISD::ADDC: Opc = ARMISD::ADDC; break;
7371   case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break;
7372   case ISD::SUBC: Opc = ARMISD::SUBC; break;
7373   case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break;
7374   }
7375 
7376   if (!ExtraOp)
7377     return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
7378                        Op.getOperand(1));
7379   return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
7380                      Op.getOperand(1), Op.getOperand(2));
7381 }
7382 
7383 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
7384   assert(Subtarget->isTargetDarwin());
7385 
7386   // For iOS, we want to call an alternative entry point: __sincos_stret,
7387   // return values are passed via sret.
7388   SDLoc dl(Op);
7389   SDValue Arg = Op.getOperand(0);
7390   EVT ArgVT = Arg.getValueType();
7391   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
7392   auto PtrVT = getPointerTy(DAG.getDataLayout());
7393 
7394   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7395   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7396 
7397   // Pair of floats / doubles used to pass the result.
7398   Type *RetTy = StructType::get(ArgTy, ArgTy);
7399   auto &DL = DAG.getDataLayout();
7400 
7401   ArgListTy Args;
7402   bool ShouldUseSRet = Subtarget->isAPCS_ABI();
7403   SDValue SRet;
7404   if (ShouldUseSRet) {
7405     // Create stack object for sret.
7406     const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
7407     const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy);
7408     int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
7409     SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));
7410 
7411     ArgListEntry Entry;
7412     Entry.Node = SRet;
7413     Entry.Ty = RetTy->getPointerTo();
7414     Entry.IsSExt = false;
7415     Entry.IsZExt = false;
7416     Entry.IsSRet = true;
7417     Args.push_back(Entry);
7418     RetTy = Type::getVoidTy(*DAG.getContext());
7419   }
7420 
7421   ArgListEntry Entry;
7422   Entry.Node = Arg;
7423   Entry.Ty = ArgTy;
7424   Entry.IsSExt = false;
7425   Entry.IsZExt = false;
7426   Args.push_back(Entry);
7427 
7428   const char *LibcallName =
7429       (ArgVT == MVT::f64) ? "__sincos_stret" : "__sincosf_stret";
7430   RTLIB::Libcall LC =
7431       (ArgVT == MVT::f64) ? RTLIB::SINCOS_F64 : RTLIB::SINCOS_F32;
7432   CallingConv::ID CC = getLibcallCallingConv(LC);
7433   SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));
7434 
7435   TargetLowering::CallLoweringInfo CLI(DAG);
7436   CLI.setDebugLoc(dl)
7437       .setChain(DAG.getEntryNode())
7438       .setCallee(CC, RetTy, Callee, std::move(Args))
7439       .setDiscardResult(ShouldUseSRet);
7440   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
7441 
7442   if (!ShouldUseSRet)
7443     return CallResult.first;
7444 
7445   SDValue LoadSin =
7446       DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
7447 
7448   // Address of cos field.
7449   SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
7450                             DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
7451   SDValue LoadCos =
7452       DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
7453 
7454   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
7455   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
7456                      LoadSin.getValue(0), LoadCos.getValue(0));
7457 }
7458 
7459 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
7460                                                   bool Signed,
7461                                                   SDValue &Chain) const {
7462   EVT VT = Op.getValueType();
7463   assert((VT == MVT::i32 || VT == MVT::i64) &&
7464          "unexpected type for custom lowering DIV");
7465   SDLoc dl(Op);
7466 
7467   const auto &DL = DAG.getDataLayout();
7468   const auto &TLI = DAG.getTargetLoweringInfo();
7469 
7470   const char *Name = nullptr;
7471   if (Signed)
7472     Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
7473   else
7474     Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";
7475 
7476   SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));
7477 
7478   ARMTargetLowering::ArgListTy Args;
7479 
7480   for (auto AI : {1, 0}) {
7481     ArgListEntry Arg;
7482     Arg.Node = Op.getOperand(AI);
7483     Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
7484     Args.push_back(Arg);
7485   }
7486 
7487   CallLoweringInfo CLI(DAG);
7488   CLI.setDebugLoc(dl)
7489     .setChain(Chain)
7490     .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
7491                ES, std::move(Args));
7492 
7493   return LowerCallTo(CLI).first;
7494 }
7495 
7496 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
7497                                             bool Signed) const {
7498   assert(Op.getValueType() == MVT::i32 &&
7499          "unexpected type for custom lowering DIV");
7500   SDLoc dl(Op);
7501 
7502   SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
7503                                DAG.getEntryNode(), Op.getOperand(1));
7504 
7505   return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7506 }
7507 
7508 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
7509   SDLoc DL(N);
7510   SDValue Op = N->getOperand(1);
7511   if (N->getValueType(0) == MVT::i32)
7512     return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
7513   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
7514                            DAG.getConstant(0, DL, MVT::i32));
7515   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
7516                            DAG.getConstant(1, DL, MVT::i32));
7517   return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
7518                      DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
7519 }
7520 
7521 void ARMTargetLowering::ExpandDIV_Windows(
7522     SDValue Op, SelectionDAG &DAG, bool Signed,
7523     SmallVectorImpl<SDValue> &Results) const {
7524   const auto &DL = DAG.getDataLayout();
7525   const auto &TLI = DAG.getTargetLoweringInfo();
7526 
7527   assert(Op.getValueType() == MVT::i64 &&
7528          "unexpected type for custom lowering DIV");
7529   SDLoc dl(Op);
7530 
7531   SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());
7532 
7533   SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7534 
7535   SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
7536   SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
7537                               DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
7538   Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);
7539 
7540   Results.push_back(Lower);
7541   Results.push_back(Upper);
7542 }
7543 
7544 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
7545   if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
7546     // Acquire/Release load/store is not legal for targets without a dmb or
7547     // equivalent available.
7548     return SDValue();
7549 
7550   // Monotonic load/store is legal for all targets.
7551   return Op;
7552 }
7553 
7554 static void ReplaceREADCYCLECOUNTER(SDNode *N,
7555                                     SmallVectorImpl<SDValue> &Results,
7556                                     SelectionDAG &DAG,
7557                                     const ARMSubtarget *Subtarget) {
7558   SDLoc DL(N);
7559   // Under Power Management extensions, the cycle-count is:
7560   //    mrc p15, #0, <Rt>, c9, c13, #0
7561   SDValue Ops[] = { N->getOperand(0), // Chain
7562                     DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
7563                     DAG.getConstant(15, DL, MVT::i32),
7564                     DAG.getConstant(0, DL, MVT::i32),
7565                     DAG.getConstant(9, DL, MVT::i32),
7566                     DAG.getConstant(13, DL, MVT::i32),
7567                     DAG.getConstant(0, DL, MVT::i32)
7568   };
7569 
7570   SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
7571                                  DAG.getVTList(MVT::i32, MVT::Other), Ops);
7572   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
7573                                 DAG.getConstant(0, DL, MVT::i32)));
7574   Results.push_back(Cycles32.getValue(1));
7575 }
7576 
7577 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
7578   SDLoc dl(V.getNode());
7579   SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
7580   SDValue VHi = DAG.getAnyExtOrTrunc(
7581       DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
7582       dl, MVT::i32);
7583   SDValue RegClass =
7584       DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
7585   SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
7586   SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
7587   const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
7588   return SDValue(
7589       DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
7590 }
7591 
7592 static void ReplaceCMP_SWAP_64Results(SDNode *N,
7593                                        SmallVectorImpl<SDValue> & Results,
7594                                        SelectionDAG &DAG) {
7595   assert(N->getValueType(0) == MVT::i64 &&
7596          "AtomicCmpSwap on types less than 64 should be legal");
7597   SDValue Ops[] = {N->getOperand(1),
7598                    createGPRPairNode(DAG, N->getOperand(2)),
7599                    createGPRPairNode(DAG, N->getOperand(3)),
7600                    N->getOperand(0)};
7601   SDNode *CmpSwap = DAG.getMachineNode(
7602       ARM::CMP_SWAP_64, SDLoc(N),
7603       DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
7604 
7605   MachineFunction &MF = DAG.getMachineFunction();
7606   MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1);
7607   MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
7608   cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
7609 
7610   Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_0, SDLoc(N), MVT::i32,
7611                                                SDValue(CmpSwap, 0)));
7612   Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_1, SDLoc(N), MVT::i32,
7613                                                SDValue(CmpSwap, 0)));
7614   Results.push_back(SDValue(CmpSwap, 2));
7615 }
7616 
7617 static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget,
7618                           SelectionDAG &DAG) {
7619   const auto &TLI = DAG.getTargetLoweringInfo();
7620 
7621   assert(Subtarget.getTargetTriple().isOSMSVCRT() &&
7622          "Custom lowering is MSVCRT specific!");
7623 
7624   SDLoc dl(Op);
7625   SDValue Val = Op.getOperand(0);
7626   MVT Ty = Val->getSimpleValueType(0);
7627   SDValue Exponent = DAG.getNode(ISD::SINT_TO_FP, dl, Ty, Op.getOperand(1));
7628   SDValue Callee = DAG.getExternalSymbol(Ty == MVT::f32 ? "powf" : "pow",
7629                                          TLI.getPointerTy(DAG.getDataLayout()));
7630 
7631   TargetLowering::ArgListTy Args;
7632   TargetLowering::ArgListEntry Entry;
7633 
7634   Entry.Node = Val;
7635   Entry.Ty = Val.getValueType().getTypeForEVT(*DAG.getContext());
7636   Entry.IsZExt = true;
7637   Args.push_back(Entry);
7638 
7639   Entry.Node = Exponent;
7640   Entry.Ty = Exponent.getValueType().getTypeForEVT(*DAG.getContext());
7641   Entry.IsZExt = true;
7642   Args.push_back(Entry);
7643 
7644   Type *LCRTy = Val.getValueType().getTypeForEVT(*DAG.getContext());
7645 
7646   // In the in-chain to the call is the entry node  If we are emitting a
7647   // tailcall, the chain will be mutated if the node has a non-entry input
7648   // chain.
7649   SDValue InChain = DAG.getEntryNode();
7650   SDValue TCChain = InChain;
7651 
7652   const auto *F = DAG.getMachineFunction().getFunction();
7653   bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) &&
7654               F->getReturnType() == LCRTy;
7655   if (IsTC)
7656     InChain = TCChain;
7657 
7658   TargetLowering::CallLoweringInfo CLI(DAG);
7659   CLI.setDebugLoc(dl)
7660       .setChain(InChain)
7661       .setCallee(CallingConv::ARM_AAPCS_VFP, LCRTy, Callee, std::move(Args))
7662       .setTailCall(IsTC);
7663   std::pair<SDValue, SDValue> CI = TLI.LowerCallTo(CLI);
7664 
7665   // Return the chain (the DAG root) if it is a tail call
7666   return !CI.second.getNode() ? DAG.getRoot() : CI.first;
7667 }
7668 
7669 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
7670   switch (Op.getOpcode()) {
7671   default: llvm_unreachable("Don't know how to custom lower this!");
7672   case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
7673   case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
7674   case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
7675   case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
7676   case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
7677   case ISD::SELECT:        return LowerSELECT(Op, DAG);
7678   case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
7679   case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
7680   case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
7681   case ISD::VASTART:       return LowerVASTART(Op, DAG);
7682   case ISD::ATOMIC_FENCE:  return LowerATOMIC_FENCE(Op, DAG, Subtarget);
7683   case ISD::PREFETCH:      return LowerPREFETCH(Op, DAG, Subtarget);
7684   case ISD::SINT_TO_FP:
7685   case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
7686   case ISD::FP_TO_SINT:
7687   case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
7688   case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
7689   case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
7690   case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
7691   case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
7692   case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
7693   case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
7694   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
7695                                                                Subtarget);
7696   case ISD::BITCAST:       return ExpandBITCAST(Op.getNode(), DAG);
7697   case ISD::SHL:
7698   case ISD::SRL:
7699   case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
7700   case ISD::SREM:          return LowerREM(Op.getNode(), DAG);
7701   case ISD::UREM:          return LowerREM(Op.getNode(), DAG);
7702   case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
7703   case ISD::SRL_PARTS:
7704   case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
7705   case ISD::CTTZ:
7706   case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
7707   case ISD::CTPOP:         return LowerCTPOP(Op.getNode(), DAG, Subtarget);
7708   case ISD::SETCC:         return LowerVSETCC(Op, DAG);
7709   case ISD::SETCCE:        return LowerSETCCE(Op, DAG);
7710   case ISD::ConstantFP:    return LowerConstantFP(Op, DAG, Subtarget);
7711   case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);
7712   case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
7713   case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
7714   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
7715   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
7716   case ISD::FLT_ROUNDS_:   return LowerFLT_ROUNDS_(Op, DAG);
7717   case ISD::MUL:           return LowerMUL(Op, DAG);
7718   case ISD::SDIV:
7719     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
7720       return LowerDIV_Windows(Op, DAG, /* Signed */ true);
7721     return LowerSDIV(Op, DAG);
7722   case ISD::UDIV:
7723     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
7724       return LowerDIV_Windows(Op, DAG, /* Signed */ false);
7725     return LowerUDIV(Op, DAG);
7726   case ISD::ADDC:
7727   case ISD::ADDE:
7728   case ISD::SUBC:
7729   case ISD::SUBE:          return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
7730   case ISD::SADDO:
7731   case ISD::UADDO:
7732   case ISD::SSUBO:
7733   case ISD::USUBO:
7734     return LowerXALUO(Op, DAG);
7735   case ISD::ATOMIC_LOAD:
7736   case ISD::ATOMIC_STORE:  return LowerAtomicLoadStore(Op, DAG);
7737   case ISD::FSINCOS:       return LowerFSINCOS(Op, DAG);
7738   case ISD::SDIVREM:
7739   case ISD::UDIVREM:       return LowerDivRem(Op, DAG);
7740   case ISD::DYNAMIC_STACKALLOC:
7741     if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
7742       return LowerDYNAMIC_STACKALLOC(Op, DAG);
7743     llvm_unreachable("Don't know how to custom lower this!");
7744   case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
7745   case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
7746   case ISD::FPOWI: return LowerFPOWI(Op, *Subtarget, DAG);
7747   case ARMISD::WIN__DBZCHK: return SDValue();
7748   }
7749 }
7750 
7751 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
7752                                  SelectionDAG &DAG) {
7753   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7754   unsigned Opc = 0;
7755   if (IntNo == Intrinsic::arm_smlald)
7756     Opc = ARMISD::SMLALD;
7757   else if (IntNo == Intrinsic::arm_smlaldx)
7758     Opc = ARMISD::SMLALDX;
7759   else if (IntNo == Intrinsic::arm_smlsld)
7760     Opc = ARMISD::SMLSLD;
7761   else if (IntNo == Intrinsic::arm_smlsldx)
7762     Opc = ARMISD::SMLSLDX;
7763   else
7764     return;
7765 
7766   SDLoc dl(N);
7767   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
7768                            N->getOperand(3),
7769                            DAG.getConstant(0, dl, MVT::i32));
7770   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
7771                            N->getOperand(3),
7772                            DAG.getConstant(1, dl, MVT::i32));
7773 
7774   SDValue LongMul = DAG.getNode(Opc, dl,
7775                                 DAG.getVTList(MVT::i32, MVT::i32),
7776                                 N->getOperand(1), N->getOperand(2),
7777                                 Lo, Hi);
7778   Results.push_back(LongMul.getValue(0));
7779   Results.push_back(LongMul.getValue(1));
7780 }
7781 
7782 /// ReplaceNodeResults - Replace the results of node with an illegal result
7783 /// type with new values built out of custom code.
7784 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
7785                                            SmallVectorImpl<SDValue> &Results,
7786                                            SelectionDAG &DAG) const {
7787   SDValue Res;
7788   switch (N->getOpcode()) {
7789   default:
7790     llvm_unreachable("Don't know how to custom expand this!");
7791   case ISD::READ_REGISTER:
7792     ExpandREAD_REGISTER(N, Results, DAG);
7793     break;
7794   case ISD::BITCAST:
7795     Res = ExpandBITCAST(N, DAG);
7796     break;
7797   case ISD::SRL:
7798   case ISD::SRA:
7799     Res = Expand64BitShift(N, DAG, Subtarget);
7800     break;
7801   case ISD::SREM:
7802   case ISD::UREM:
7803     Res = LowerREM(N, DAG);
7804     break;
7805   case ISD::SDIVREM:
7806   case ISD::UDIVREM:
7807     Res = LowerDivRem(SDValue(N, 0), DAG);
7808     assert(Res.getNumOperands() == 2 && "DivRem needs two values");
7809     Results.push_back(Res.getValue(0));
7810     Results.push_back(Res.getValue(1));
7811     return;
7812   case ISD::READCYCLECOUNTER:
7813     ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
7814     return;
7815   case ISD::UDIV:
7816   case ISD::SDIV:
7817     assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
7818     return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
7819                              Results);
7820   case ISD::ATOMIC_CMP_SWAP:
7821     ReplaceCMP_SWAP_64Results(N, Results, DAG);
7822     return;
7823   case ISD::INTRINSIC_WO_CHAIN:
7824     return ReplaceLongIntrinsic(N, Results, DAG);
7825   }
7826   if (Res.getNode())
7827     Results.push_back(Res);
7828 }
7829 
7830 //===----------------------------------------------------------------------===//
7831 //                           ARM Scheduler Hooks
7832 //===----------------------------------------------------------------------===//
7833 
7834 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
7835 /// registers the function context.
7836 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
7837                                                MachineBasicBlock *MBB,
7838                                                MachineBasicBlock *DispatchBB,
7839                                                int FI) const {
7840   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
7841          "ROPI/RWPI not currently supported with SjLj");
7842   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
7843   DebugLoc dl = MI.getDebugLoc();
7844   MachineFunction *MF = MBB->getParent();
7845   MachineRegisterInfo *MRI = &MF->getRegInfo();
7846   MachineConstantPool *MCP = MF->getConstantPool();
7847   ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
7848   const Function *F = MF->getFunction();
7849 
7850   bool isThumb = Subtarget->isThumb();
7851   bool isThumb2 = Subtarget->isThumb2();
7852 
7853   unsigned PCLabelId = AFI->createPICLabelUId();
7854   unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
7855   ARMConstantPoolValue *CPV =
7856     ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
7857   unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
7858 
7859   const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
7860                                            : &ARM::GPRRegClass;
7861 
7862   // Grab constant pool and fixed stack memory operands.
7863   MachineMemOperand *CPMMO =
7864       MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
7865                                MachineMemOperand::MOLoad, 4, 4);
7866 
7867   MachineMemOperand *FIMMOSt =
7868       MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
7869                                MachineMemOperand::MOStore, 4, 4);
7870 
7871   // Load the address of the dispatch MBB into the jump buffer.
7872   if (isThumb2) {
7873     // Incoming value: jbuf
7874     //   ldr.n  r5, LCPI1_1
7875     //   orr    r5, r5, #1
7876     //   add    r5, pc
7877     //   str    r5, [$jbuf, #+4] ; &jbuf[1]
7878     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7879     BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
7880         .addConstantPoolIndex(CPI)
7881         .addMemOperand(CPMMO)
7882         .add(predOps(ARMCC::AL));
7883     // Set the low bit because of thumb mode.
7884     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7885     BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
7886         .addReg(NewVReg1, RegState::Kill)
7887         .addImm(0x01)
7888         .add(predOps(ARMCC::AL))
7889         .add(condCodeOp());
7890     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
7891     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
7892       .addReg(NewVReg2, RegState::Kill)
7893       .addImm(PCLabelId);
7894     BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
7895         .addReg(NewVReg3, RegState::Kill)
7896         .addFrameIndex(FI)
7897         .addImm(36) // &jbuf[1] :: pc
7898         .addMemOperand(FIMMOSt)
7899         .add(predOps(ARMCC::AL));
7900   } else if (isThumb) {
7901     // Incoming value: jbuf
7902     //   ldr.n  r1, LCPI1_4
7903     //   add    r1, pc
7904     //   mov    r2, #1
7905     //   orrs   r1, r2
7906     //   add    r2, $jbuf, #+4 ; &jbuf[1]
7907     //   str    r1, [r2]
7908     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7909     BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
7910         .addConstantPoolIndex(CPI)
7911         .addMemOperand(CPMMO)
7912         .add(predOps(ARMCC::AL));
7913     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7914     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
7915       .addReg(NewVReg1, RegState::Kill)
7916       .addImm(PCLabelId);
7917     // Set the low bit because of thumb mode.
7918     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
7919     BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
7920         .addReg(ARM::CPSR, RegState::Define)
7921         .addImm(1)
7922         .add(predOps(ARMCC::AL));
7923     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
7924     BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
7925         .addReg(ARM::CPSR, RegState::Define)
7926         .addReg(NewVReg2, RegState::Kill)
7927         .addReg(NewVReg3, RegState::Kill)
7928         .add(predOps(ARMCC::AL));
7929     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
7930     BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
7931             .addFrameIndex(FI)
7932             .addImm(36); // &jbuf[1] :: pc
7933     BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
7934         .addReg(NewVReg4, RegState::Kill)
7935         .addReg(NewVReg5, RegState::Kill)
7936         .addImm(0)
7937         .addMemOperand(FIMMOSt)
7938         .add(predOps(ARMCC::AL));
7939   } else {
7940     // Incoming value: jbuf
7941     //   ldr  r1, LCPI1_1
7942     //   add  r1, pc, r1
7943     //   str  r1, [$jbuf, #+4] ; &jbuf[1]
7944     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7945     BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
7946         .addConstantPoolIndex(CPI)
7947         .addImm(0)
7948         .addMemOperand(CPMMO)
7949         .add(predOps(ARMCC::AL));
7950     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7951     BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
7952         .addReg(NewVReg1, RegState::Kill)
7953         .addImm(PCLabelId)
7954         .add(predOps(ARMCC::AL));
7955     BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
7956         .addReg(NewVReg2, RegState::Kill)
7957         .addFrameIndex(FI)
7958         .addImm(36) // &jbuf[1] :: pc
7959         .addMemOperand(FIMMOSt)
7960         .add(predOps(ARMCC::AL));
7961   }
7962 }
7963 
7964 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
7965                                               MachineBasicBlock *MBB) const {
7966   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
7967   DebugLoc dl = MI.getDebugLoc();
7968   MachineFunction *MF = MBB->getParent();
7969   MachineRegisterInfo *MRI = &MF->getRegInfo();
7970   MachineFrameInfo &MFI = MF->getFrameInfo();
7971   int FI = MFI.getFunctionContextIndex();
7972 
7973   const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
7974                                                         : &ARM::GPRnopcRegClass;
7975 
7976   // Get a mapping of the call site numbers to all of the landing pads they're
7977   // associated with.
7978   DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad;
7979   unsigned MaxCSNum = 0;
7980   for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
7981        ++BB) {
7982     if (!BB->isEHPad()) continue;
7983 
7984     // FIXME: We should assert that the EH_LABEL is the first MI in the landing
7985     // pad.
7986     for (MachineBasicBlock::iterator
7987            II = BB->begin(), IE = BB->end(); II != IE; ++II) {
7988       if (!II->isEHLabel()) continue;
7989 
7990       MCSymbol *Sym = II->getOperand(0).getMCSymbol();
7991       if (!MF->hasCallSiteLandingPad(Sym)) continue;
7992 
7993       SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
7994       for (SmallVectorImpl<unsigned>::iterator
7995              CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
7996            CSI != CSE; ++CSI) {
7997         CallSiteNumToLPad[*CSI].push_back(&*BB);
7998         MaxCSNum = std::max(MaxCSNum, *CSI);
7999       }
8000       break;
8001     }
8002   }
8003 
8004   // Get an ordered list of the machine basic blocks for the jump table.
8005   std::vector<MachineBasicBlock*> LPadList;
8006   SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
8007   LPadList.reserve(CallSiteNumToLPad.size());
8008   for (unsigned I = 1; I <= MaxCSNum; ++I) {
8009     SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
8010     for (SmallVectorImpl<MachineBasicBlock*>::iterator
8011            II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
8012       LPadList.push_back(*II);
8013       InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
8014     }
8015   }
8016 
8017   assert(!LPadList.empty() &&
8018          "No landing pad destinations for the dispatch jump table!");
8019 
8020   // Create the jump table and associated information.
8021   MachineJumpTableInfo *JTI =
8022     MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
8023   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
8024 
8025   // Create the MBBs for the dispatch code.
8026 
8027   // Shove the dispatch's address into the return slot in the function context.
8028   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
8029   DispatchBB->setIsEHPad();
8030 
8031   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
8032   unsigned trap_opcode;
8033   if (Subtarget->isThumb())
8034     trap_opcode = ARM::tTRAP;
8035   else
8036     trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
8037 
8038   BuildMI(TrapBB, dl, TII->get(trap_opcode));
8039   DispatchBB->addSuccessor(TrapBB);
8040 
8041   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
8042   DispatchBB->addSuccessor(DispContBB);
8043 
8044   // Insert and MBBs.
8045   MF->insert(MF->end(), DispatchBB);
8046   MF->insert(MF->end(), DispContBB);
8047   MF->insert(MF->end(), TrapBB);
8048 
8049   // Insert code into the entry block that creates and registers the function
8050   // context.
8051   SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
8052 
8053   MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
8054       MachinePointerInfo::getFixedStack(*MF, FI),
8055       MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4);
8056 
8057   MachineInstrBuilder MIB;
8058   MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
8059 
8060   const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
8061   const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
8062 
8063   // Add a register mask with no preserved registers.  This results in all
8064   // registers being marked as clobbered. This can't work if the dispatch block
8065   // is in a Thumb1 function and is linked with ARM code which uses the FP
8066   // registers, as there is no way to preserve the FP registers in Thumb1 mode.
8067   MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));
8068 
8069   bool IsPositionIndependent = isPositionIndependent();
8070   unsigned NumLPads = LPadList.size();
8071   if (Subtarget->isThumb2()) {
8072     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8073     BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
8074         .addFrameIndex(FI)
8075         .addImm(4)
8076         .addMemOperand(FIMMOLd)
8077         .add(predOps(ARMCC::AL));
8078 
8079     if (NumLPads < 256) {
8080       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
8081           .addReg(NewVReg1)
8082           .addImm(LPadList.size())
8083           .add(predOps(ARMCC::AL));
8084     } else {
8085       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8086       BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
8087           .addImm(NumLPads & 0xFFFF)
8088           .add(predOps(ARMCC::AL));
8089 
8090       unsigned VReg2 = VReg1;
8091       if ((NumLPads & 0xFFFF0000) != 0) {
8092         VReg2 = MRI->createVirtualRegister(TRC);
8093         BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
8094             .addReg(VReg1)
8095             .addImm(NumLPads >> 16)
8096             .add(predOps(ARMCC::AL));
8097       }
8098 
8099       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
8100           .addReg(NewVReg1)
8101           .addReg(VReg2)
8102           .add(predOps(ARMCC::AL));
8103     }
8104 
8105     BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
8106       .addMBB(TrapBB)
8107       .addImm(ARMCC::HI)
8108       .addReg(ARM::CPSR);
8109 
8110     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8111     BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3)
8112         .addJumpTableIndex(MJTI)
8113         .add(predOps(ARMCC::AL));
8114 
8115     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8116     BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
8117         .addReg(NewVReg3, RegState::Kill)
8118         .addReg(NewVReg1)
8119         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
8120         .add(predOps(ARMCC::AL))
8121         .add(condCodeOp());
8122 
8123     BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
8124       .addReg(NewVReg4, RegState::Kill)
8125       .addReg(NewVReg1)
8126       .addJumpTableIndex(MJTI);
8127   } else if (Subtarget->isThumb()) {
8128     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8129     BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
8130         .addFrameIndex(FI)
8131         .addImm(1)
8132         .addMemOperand(FIMMOLd)
8133         .add(predOps(ARMCC::AL));
8134 
8135     if (NumLPads < 256) {
8136       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
8137           .addReg(NewVReg1)
8138           .addImm(NumLPads)
8139           .add(predOps(ARMCC::AL));
8140     } else {
8141       MachineConstantPool *ConstantPool = MF->getConstantPool();
8142       Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8143       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8144 
8145       // MachineConstantPool wants an explicit alignment.
8146       unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8147       if (Align == 0)
8148         Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8149       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8150 
8151       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8152       BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
8153           .addReg(VReg1, RegState::Define)
8154           .addConstantPoolIndex(Idx)
8155           .add(predOps(ARMCC::AL));
8156       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
8157           .addReg(NewVReg1)
8158           .addReg(VReg1)
8159           .add(predOps(ARMCC::AL));
8160     }
8161 
8162     BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
8163       .addMBB(TrapBB)
8164       .addImm(ARMCC::HI)
8165       .addReg(ARM::CPSR);
8166 
8167     unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
8168     BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
8169         .addReg(ARM::CPSR, RegState::Define)
8170         .addReg(NewVReg1)
8171         .addImm(2)
8172         .add(predOps(ARMCC::AL));
8173 
8174     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8175     BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
8176         .addJumpTableIndex(MJTI)
8177         .add(predOps(ARMCC::AL));
8178 
8179     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8180     BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
8181         .addReg(ARM::CPSR, RegState::Define)
8182         .addReg(NewVReg2, RegState::Kill)
8183         .addReg(NewVReg3)
8184         .add(predOps(ARMCC::AL));
8185 
8186     MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8187         MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8188 
8189     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8190     BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
8191         .addReg(NewVReg4, RegState::Kill)
8192         .addImm(0)
8193         .addMemOperand(JTMMOLd)
8194         .add(predOps(ARMCC::AL));
8195 
8196     unsigned NewVReg6 = NewVReg5;
8197     if (IsPositionIndependent) {
8198       NewVReg6 = MRI->createVirtualRegister(TRC);
8199       BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
8200           .addReg(ARM::CPSR, RegState::Define)
8201           .addReg(NewVReg5, RegState::Kill)
8202           .addReg(NewVReg3)
8203           .add(predOps(ARMCC::AL));
8204     }
8205 
8206     BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
8207       .addReg(NewVReg6, RegState::Kill)
8208       .addJumpTableIndex(MJTI);
8209   } else {
8210     unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8211     BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
8212         .addFrameIndex(FI)
8213         .addImm(4)
8214         .addMemOperand(FIMMOLd)
8215         .add(predOps(ARMCC::AL));
8216 
8217     if (NumLPads < 256) {
8218       BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
8219           .addReg(NewVReg1)
8220           .addImm(NumLPads)
8221           .add(predOps(ARMCC::AL));
8222     } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
8223       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8224       BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
8225           .addImm(NumLPads & 0xFFFF)
8226           .add(predOps(ARMCC::AL));
8227 
8228       unsigned VReg2 = VReg1;
8229       if ((NumLPads & 0xFFFF0000) != 0) {
8230         VReg2 = MRI->createVirtualRegister(TRC);
8231         BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
8232             .addReg(VReg1)
8233             .addImm(NumLPads >> 16)
8234             .add(predOps(ARMCC::AL));
8235       }
8236 
8237       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8238           .addReg(NewVReg1)
8239           .addReg(VReg2)
8240           .add(predOps(ARMCC::AL));
8241     } else {
8242       MachineConstantPool *ConstantPool = MF->getConstantPool();
8243       Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8244       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8245 
8246       // MachineConstantPool wants an explicit alignment.
8247       unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8248       if (Align == 0)
8249         Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8250       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8251 
8252       unsigned VReg1 = MRI->createVirtualRegister(TRC);
8253       BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
8254           .addReg(VReg1, RegState::Define)
8255           .addConstantPoolIndex(Idx)
8256           .addImm(0)
8257           .add(predOps(ARMCC::AL));
8258       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8259           .addReg(NewVReg1)
8260           .addReg(VReg1, RegState::Kill)
8261           .add(predOps(ARMCC::AL));
8262     }
8263 
8264     BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
8265       .addMBB(TrapBB)
8266       .addImm(ARMCC::HI)
8267       .addReg(ARM::CPSR);
8268 
8269     unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8270     BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
8271         .addReg(NewVReg1)
8272         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
8273         .add(predOps(ARMCC::AL))
8274         .add(condCodeOp());
8275     unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8276     BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
8277         .addJumpTableIndex(MJTI)
8278         .add(predOps(ARMCC::AL));
8279 
8280     MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8281         MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8282     unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8283     BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
8284         .addReg(NewVReg3, RegState::Kill)
8285         .addReg(NewVReg4)
8286         .addImm(0)
8287         .addMemOperand(JTMMOLd)
8288         .add(predOps(ARMCC::AL));
8289 
8290     if (IsPositionIndependent) {
8291       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
8292         .addReg(NewVReg5, RegState::Kill)
8293         .addReg(NewVReg4)
8294         .addJumpTableIndex(MJTI);
8295     } else {
8296       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
8297         .addReg(NewVReg5, RegState::Kill)
8298         .addJumpTableIndex(MJTI);
8299     }
8300   }
8301 
8302   // Add the jump table entries as successors to the MBB.
8303   SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
8304   for (std::vector<MachineBasicBlock*>::iterator
8305          I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
8306     MachineBasicBlock *CurMBB = *I;
8307     if (SeenMBBs.insert(CurMBB).second)
8308       DispContBB->addSuccessor(CurMBB);
8309   }
8310 
8311   // N.B. the order the invoke BBs are processed in doesn't matter here.
8312   const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
8313   SmallVector<MachineBasicBlock*, 64> MBBLPads;
8314   for (MachineBasicBlock *BB : InvokeBBs) {
8315 
8316     // Remove the landing pad successor from the invoke block and replace it
8317     // with the new dispatch block.
8318     SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
8319                                                   BB->succ_end());
8320     while (!Successors.empty()) {
8321       MachineBasicBlock *SMBB = Successors.pop_back_val();
8322       if (SMBB->isEHPad()) {
8323         BB->removeSuccessor(SMBB);
8324         MBBLPads.push_back(SMBB);
8325       }
8326     }
8327 
8328     BB->addSuccessor(DispatchBB, BranchProbability::getZero());
8329     BB->normalizeSuccProbs();
8330 
8331     // Find the invoke call and mark all of the callee-saved registers as
8332     // 'implicit defined' so that they're spilled. This prevents code from
8333     // moving instructions to before the EH block, where they will never be
8334     // executed.
8335     for (MachineBasicBlock::reverse_iterator
8336            II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
8337       if (!II->isCall()) continue;
8338 
8339       DenseMap<unsigned, bool> DefRegs;
8340       for (MachineInstr::mop_iterator
8341              OI = II->operands_begin(), OE = II->operands_end();
8342            OI != OE; ++OI) {
8343         if (!OI->isReg()) continue;
8344         DefRegs[OI->getReg()] = true;
8345       }
8346 
8347       MachineInstrBuilder MIB(*MF, &*II);
8348 
8349       for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
8350         unsigned Reg = SavedRegs[i];
8351         if (Subtarget->isThumb2() &&
8352             !ARM::tGPRRegClass.contains(Reg) &&
8353             !ARM::hGPRRegClass.contains(Reg))
8354           continue;
8355         if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
8356           continue;
8357         if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
8358           continue;
8359         if (!DefRegs[Reg])
8360           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
8361       }
8362 
8363       break;
8364     }
8365   }
8366 
8367   // Mark all former landing pads as non-landing pads. The dispatch is the only
8368   // landing pad now.
8369   for (SmallVectorImpl<MachineBasicBlock*>::iterator
8370          I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
8371     (*I)->setIsEHPad(false);
8372 
8373   // The instruction is gone now.
8374   MI.eraseFromParent();
8375 }
8376 
8377 static
8378 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
8379   for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
8380        E = MBB->succ_end(); I != E; ++I)
8381     if (*I != Succ)
8382       return *I;
8383   llvm_unreachable("Expecting a BB with two successors!");
8384 }
8385 
8386 /// Return the load opcode for a given load size. If load size >= 8,
8387 /// neon opcode will be returned.
8388 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
8389   if (LdSize >= 8)
8390     return LdSize == 16 ? ARM::VLD1q32wb_fixed
8391                         : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
8392   if (IsThumb1)
8393     return LdSize == 4 ? ARM::tLDRi
8394                        : LdSize == 2 ? ARM::tLDRHi
8395                                      : LdSize == 1 ? ARM::tLDRBi : 0;
8396   if (IsThumb2)
8397     return LdSize == 4 ? ARM::t2LDR_POST
8398                        : LdSize == 2 ? ARM::t2LDRH_POST
8399                                      : LdSize == 1 ? ARM::t2LDRB_POST : 0;
8400   return LdSize == 4 ? ARM::LDR_POST_IMM
8401                      : LdSize == 2 ? ARM::LDRH_POST
8402                                    : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
8403 }
8404 
8405 /// Return the store opcode for a given store size. If store size >= 8,
8406 /// neon opcode will be returned.
8407 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
8408   if (StSize >= 8)
8409     return StSize == 16 ? ARM::VST1q32wb_fixed
8410                         : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
8411   if (IsThumb1)
8412     return StSize == 4 ? ARM::tSTRi
8413                        : StSize == 2 ? ARM::tSTRHi
8414                                      : StSize == 1 ? ARM::tSTRBi : 0;
8415   if (IsThumb2)
8416     return StSize == 4 ? ARM::t2STR_POST
8417                        : StSize == 2 ? ARM::t2STRH_POST
8418                                      : StSize == 1 ? ARM::t2STRB_POST : 0;
8419   return StSize == 4 ? ARM::STR_POST_IMM
8420                      : StSize == 2 ? ARM::STRH_POST
8421                                    : StSize == 1 ? ARM::STRB_POST_IMM : 0;
8422 }
8423 
8424 /// Emit a post-increment load operation with given size. The instructions
8425 /// will be added to BB at Pos.
8426 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
8427                        const TargetInstrInfo *TII, const DebugLoc &dl,
8428                        unsigned LdSize, unsigned Data, unsigned AddrIn,
8429                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
8430   unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
8431   assert(LdOpc != 0 && "Should have a load opcode");
8432   if (LdSize >= 8) {
8433     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8434         .addReg(AddrOut, RegState::Define)
8435         .addReg(AddrIn)
8436         .addImm(0)
8437         .add(predOps(ARMCC::AL));
8438   } else if (IsThumb1) {
8439     // load + update AddrIn
8440     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8441         .addReg(AddrIn)
8442         .addImm(0)
8443         .add(predOps(ARMCC::AL));
8444     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
8445         .add(t1CondCodeOp())
8446         .addReg(AddrIn)
8447         .addImm(LdSize)
8448         .add(predOps(ARMCC::AL));
8449   } else if (IsThumb2) {
8450     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8451         .addReg(AddrOut, RegState::Define)
8452         .addReg(AddrIn)
8453         .addImm(LdSize)
8454         .add(predOps(ARMCC::AL));
8455   } else { // arm
8456     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8457         .addReg(AddrOut, RegState::Define)
8458         .addReg(AddrIn)
8459         .addReg(0)
8460         .addImm(LdSize)
8461         .add(predOps(ARMCC::AL));
8462   }
8463 }
8464 
8465 /// Emit a post-increment store operation with given size. The instructions
8466 /// will be added to BB at Pos.
8467 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
8468                        const TargetInstrInfo *TII, const DebugLoc &dl,
8469                        unsigned StSize, unsigned Data, unsigned AddrIn,
8470                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
8471   unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
8472   assert(StOpc != 0 && "Should have a store opcode");
8473   if (StSize >= 8) {
8474     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8475         .addReg(AddrIn)
8476         .addImm(0)
8477         .addReg(Data)
8478         .add(predOps(ARMCC::AL));
8479   } else if (IsThumb1) {
8480     // store + update AddrIn
8481     BuildMI(*BB, Pos, dl, TII->get(StOpc))
8482         .addReg(Data)
8483         .addReg(AddrIn)
8484         .addImm(0)
8485         .add(predOps(ARMCC::AL));
8486     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
8487         .add(t1CondCodeOp())
8488         .addReg(AddrIn)
8489         .addImm(StSize)
8490         .add(predOps(ARMCC::AL));
8491   } else if (IsThumb2) {
8492     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8493         .addReg(Data)
8494         .addReg(AddrIn)
8495         .addImm(StSize)
8496         .add(predOps(ARMCC::AL));
8497   } else { // arm
8498     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8499         .addReg(Data)
8500         .addReg(AddrIn)
8501         .addReg(0)
8502         .addImm(StSize)
8503         .add(predOps(ARMCC::AL));
8504   }
8505 }
8506 
8507 MachineBasicBlock *
8508 ARMTargetLowering::EmitStructByval(MachineInstr &MI,
8509                                    MachineBasicBlock *BB) const {
8510   // This pseudo instruction has 3 operands: dst, src, size
8511   // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
8512   // Otherwise, we will generate unrolled scalar copies.
8513   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8514   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8515   MachineFunction::iterator It = ++BB->getIterator();
8516 
8517   unsigned dest = MI.getOperand(0).getReg();
8518   unsigned src = MI.getOperand(1).getReg();
8519   unsigned SizeVal = MI.getOperand(2).getImm();
8520   unsigned Align = MI.getOperand(3).getImm();
8521   DebugLoc dl = MI.getDebugLoc();
8522 
8523   MachineFunction *MF = BB->getParent();
8524   MachineRegisterInfo &MRI = MF->getRegInfo();
8525   unsigned UnitSize = 0;
8526   const TargetRegisterClass *TRC = nullptr;
8527   const TargetRegisterClass *VecTRC = nullptr;
8528 
8529   bool IsThumb1 = Subtarget->isThumb1Only();
8530   bool IsThumb2 = Subtarget->isThumb2();
8531   bool IsThumb = Subtarget->isThumb();
8532 
8533   if (Align & 1) {
8534     UnitSize = 1;
8535   } else if (Align & 2) {
8536     UnitSize = 2;
8537   } else {
8538     // Check whether we can use NEON instructions.
8539     if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
8540         Subtarget->hasNEON()) {
8541       if ((Align % 16 == 0) && SizeVal >= 16)
8542         UnitSize = 16;
8543       else if ((Align % 8 == 0) && SizeVal >= 8)
8544         UnitSize = 8;
8545     }
8546     // Can't use NEON instructions.
8547     if (UnitSize == 0)
8548       UnitSize = 4;
8549   }
8550 
8551   // Select the correct opcode and register class for unit size load/store
8552   bool IsNeon = UnitSize >= 8;
8553   TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
8554   if (IsNeon)
8555     VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
8556                             : UnitSize == 8 ? &ARM::DPRRegClass
8557                                             : nullptr;
8558 
8559   unsigned BytesLeft = SizeVal % UnitSize;
8560   unsigned LoopSize = SizeVal - BytesLeft;
8561 
8562   if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
8563     // Use LDR and STR to copy.
8564     // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
8565     // [destOut] = STR_POST(scratch, destIn, UnitSize)
8566     unsigned srcIn = src;
8567     unsigned destIn = dest;
8568     for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
8569       unsigned srcOut = MRI.createVirtualRegister(TRC);
8570       unsigned destOut = MRI.createVirtualRegister(TRC);
8571       unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
8572       emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
8573                  IsThumb1, IsThumb2);
8574       emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
8575                  IsThumb1, IsThumb2);
8576       srcIn = srcOut;
8577       destIn = destOut;
8578     }
8579 
8580     // Handle the leftover bytes with LDRB and STRB.
8581     // [scratch, srcOut] = LDRB_POST(srcIn, 1)
8582     // [destOut] = STRB_POST(scratch, destIn, 1)
8583     for (unsigned i = 0; i < BytesLeft; i++) {
8584       unsigned srcOut = MRI.createVirtualRegister(TRC);
8585       unsigned destOut = MRI.createVirtualRegister(TRC);
8586       unsigned scratch = MRI.createVirtualRegister(TRC);
8587       emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
8588                  IsThumb1, IsThumb2);
8589       emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
8590                  IsThumb1, IsThumb2);
8591       srcIn = srcOut;
8592       destIn = destOut;
8593     }
8594     MI.eraseFromParent(); // The instruction is gone now.
8595     return BB;
8596   }
8597 
8598   // Expand the pseudo op to a loop.
8599   // thisMBB:
8600   //   ...
8601   //   movw varEnd, # --> with thumb2
8602   //   movt varEnd, #
8603   //   ldrcp varEnd, idx --> without thumb2
8604   //   fallthrough --> loopMBB
8605   // loopMBB:
8606   //   PHI varPhi, varEnd, varLoop
8607   //   PHI srcPhi, src, srcLoop
8608   //   PHI destPhi, dst, destLoop
8609   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
8610   //   [destLoop] = STR_POST(scratch, destPhi, UnitSize)
8611   //   subs varLoop, varPhi, #UnitSize
8612   //   bne loopMBB
8613   //   fallthrough --> exitMBB
8614   // exitMBB:
8615   //   epilogue to handle left-over bytes
8616   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
8617   //   [destOut] = STRB_POST(scratch, destLoop, 1)
8618   MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
8619   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
8620   MF->insert(It, loopMBB);
8621   MF->insert(It, exitMBB);
8622 
8623   // Transfer the remainder of BB and its successor edges to exitMBB.
8624   exitMBB->splice(exitMBB->begin(), BB,
8625                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8626   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
8627 
8628   // Load an immediate to varEnd.
8629   unsigned varEnd = MRI.createVirtualRegister(TRC);
8630   if (Subtarget->useMovt(*MF)) {
8631     unsigned Vtmp = varEnd;
8632     if ((LoopSize & 0xFFFF0000) != 0)
8633       Vtmp = MRI.createVirtualRegister(TRC);
8634     BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
8635         .addImm(LoopSize & 0xFFFF)
8636         .add(predOps(ARMCC::AL));
8637 
8638     if ((LoopSize & 0xFFFF0000) != 0)
8639       BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
8640           .addReg(Vtmp)
8641           .addImm(LoopSize >> 16)
8642           .add(predOps(ARMCC::AL));
8643   } else {
8644     MachineConstantPool *ConstantPool = MF->getConstantPool();
8645     Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8646     const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
8647 
8648     // MachineConstantPool wants an explicit alignment.
8649     unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8650     if (Align == 0)
8651       Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8652     unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8653 
8654     if (IsThumb)
8655       BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci))
8656           .addReg(varEnd, RegState::Define)
8657           .addConstantPoolIndex(Idx)
8658           .add(predOps(ARMCC::AL));
8659     else
8660       BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp))
8661           .addReg(varEnd, RegState::Define)
8662           .addConstantPoolIndex(Idx)
8663           .addImm(0)
8664           .add(predOps(ARMCC::AL));
8665   }
8666   BB->addSuccessor(loopMBB);
8667 
8668   // Generate the loop body:
8669   //   varPhi = PHI(varLoop, varEnd)
8670   //   srcPhi = PHI(srcLoop, src)
8671   //   destPhi = PHI(destLoop, dst)
8672   MachineBasicBlock *entryBB = BB;
8673   BB = loopMBB;
8674   unsigned varLoop = MRI.createVirtualRegister(TRC);
8675   unsigned varPhi = MRI.createVirtualRegister(TRC);
8676   unsigned srcLoop = MRI.createVirtualRegister(TRC);
8677   unsigned srcPhi = MRI.createVirtualRegister(TRC);
8678   unsigned destLoop = MRI.createVirtualRegister(TRC);
8679   unsigned destPhi = MRI.createVirtualRegister(TRC);
8680 
8681   BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
8682     .addReg(varLoop).addMBB(loopMBB)
8683     .addReg(varEnd).addMBB(entryBB);
8684   BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
8685     .addReg(srcLoop).addMBB(loopMBB)
8686     .addReg(src).addMBB(entryBB);
8687   BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
8688     .addReg(destLoop).addMBB(loopMBB)
8689     .addReg(dest).addMBB(entryBB);
8690 
8691   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
8692   //   [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
8693   unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
8694   emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
8695              IsThumb1, IsThumb2);
8696   emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
8697              IsThumb1, IsThumb2);
8698 
8699   // Decrement loop variable by UnitSize.
8700   if (IsThumb1) {
8701     BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop)
8702         .add(t1CondCodeOp())
8703         .addReg(varPhi)
8704         .addImm(UnitSize)
8705         .add(predOps(ARMCC::AL));
8706   } else {
8707     MachineInstrBuilder MIB =
8708         BuildMI(*BB, BB->end(), dl,
8709                 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
8710     MIB.addReg(varPhi)
8711         .addImm(UnitSize)
8712         .add(predOps(ARMCC::AL))
8713         .add(condCodeOp());
8714     MIB->getOperand(5).setReg(ARM::CPSR);
8715     MIB->getOperand(5).setIsDef(true);
8716   }
8717   BuildMI(*BB, BB->end(), dl,
8718           TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
8719       .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
8720 
8721   // loopMBB can loop back to loopMBB or fall through to exitMBB.
8722   BB->addSuccessor(loopMBB);
8723   BB->addSuccessor(exitMBB);
8724 
8725   // Add epilogue to handle BytesLeft.
8726   BB = exitMBB;
8727   auto StartOfExit = exitMBB->begin();
8728 
8729   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
8730   //   [destOut] = STRB_POST(scratch, destLoop, 1)
8731   unsigned srcIn = srcLoop;
8732   unsigned destIn = destLoop;
8733   for (unsigned i = 0; i < BytesLeft; i++) {
8734     unsigned srcOut = MRI.createVirtualRegister(TRC);
8735     unsigned destOut = MRI.createVirtualRegister(TRC);
8736     unsigned scratch = MRI.createVirtualRegister(TRC);
8737     emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
8738                IsThumb1, IsThumb2);
8739     emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
8740                IsThumb1, IsThumb2);
8741     srcIn = srcOut;
8742     destIn = destOut;
8743   }
8744 
8745   MI.eraseFromParent(); // The instruction is gone now.
8746   return BB;
8747 }
8748 
8749 MachineBasicBlock *
8750 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
8751                                        MachineBasicBlock *MBB) const {
8752   const TargetMachine &TM = getTargetMachine();
8753   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
8754   DebugLoc DL = MI.getDebugLoc();
8755 
8756   assert(Subtarget->isTargetWindows() &&
8757          "__chkstk is only supported on Windows");
8758   assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
8759 
8760   // __chkstk takes the number of words to allocate on the stack in R4, and
8761   // returns the stack adjustment in number of bytes in R4.  This will not
8762   // clober any other registers (other than the obvious lr).
8763   //
8764   // Although, technically, IP should be considered a register which may be
8765   // clobbered, the call itself will not touch it.  Windows on ARM is a pure
8766   // thumb-2 environment, so there is no interworking required.  As a result, we
8767   // do not expect a veneer to be emitted by the linker, clobbering IP.
8768   //
8769   // Each module receives its own copy of __chkstk, so no import thunk is
8770   // required, again, ensuring that IP is not clobbered.
8771   //
8772   // Finally, although some linkers may theoretically provide a trampoline for
8773   // out of range calls (which is quite common due to a 32M range limitation of
8774   // branches for Thumb), we can generate the long-call version via
8775   // -mcmodel=large, alleviating the need for the trampoline which may clobber
8776   // IP.
8777 
8778   switch (TM.getCodeModel()) {
8779   case CodeModel::Small:
8780   case CodeModel::Medium:
8781   case CodeModel::Default:
8782   case CodeModel::Kernel:
8783     BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
8784         .add(predOps(ARMCC::AL))
8785         .addExternalSymbol("__chkstk")
8786         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
8787         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
8788         .addReg(ARM::R12,
8789                 RegState::Implicit | RegState::Define | RegState::Dead);
8790     break;
8791   case CodeModel::Large:
8792   case CodeModel::JITDefault: {
8793     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
8794     unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
8795 
8796     BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
8797       .addExternalSymbol("__chkstk");
8798     BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
8799         .add(predOps(ARMCC::AL))
8800         .addReg(Reg, RegState::Kill)
8801         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
8802         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
8803         .addReg(ARM::R12,
8804                 RegState::Implicit | RegState::Define | RegState::Dead);
8805     break;
8806   }
8807   }
8808 
8809   BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
8810       .addReg(ARM::SP, RegState::Kill)
8811       .addReg(ARM::R4, RegState::Kill)
8812       .setMIFlags(MachineInstr::FrameSetup)
8813       .add(predOps(ARMCC::AL))
8814       .add(condCodeOp());
8815 
8816   MI.eraseFromParent();
8817   return MBB;
8818 }
8819 
8820 MachineBasicBlock *
8821 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
8822                                        MachineBasicBlock *MBB) const {
8823   DebugLoc DL = MI.getDebugLoc();
8824   MachineFunction *MF = MBB->getParent();
8825   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8826 
8827   MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
8828   MF->insert(++MBB->getIterator(), ContBB);
8829   ContBB->splice(ContBB->begin(), MBB,
8830                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
8831   ContBB->transferSuccessorsAndUpdatePHIs(MBB);
8832   MBB->addSuccessor(ContBB);
8833 
8834   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
8835   BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
8836   MF->push_back(TrapBB);
8837   MBB->addSuccessor(TrapBB);
8838 
8839   BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
8840       .addReg(MI.getOperand(0).getReg())
8841       .addImm(0)
8842       .add(predOps(ARMCC::AL));
8843   BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
8844       .addMBB(TrapBB)
8845       .addImm(ARMCC::EQ)
8846       .addReg(ARM::CPSR);
8847 
8848   MI.eraseFromParent();
8849   return ContBB;
8850 }
8851 
8852 MachineBasicBlock *
8853 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
8854                                                MachineBasicBlock *BB) const {
8855   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8856   DebugLoc dl = MI.getDebugLoc();
8857   bool isThumb2 = Subtarget->isThumb2();
8858   switch (MI.getOpcode()) {
8859   default: {
8860     MI.print(errs());
8861     llvm_unreachable("Unexpected instr type to insert");
8862   }
8863 
8864   // Thumb1 post-indexed loads are really just single-register LDMs.
8865   case ARM::tLDR_postidx: {
8866     BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
8867         .add(MI.getOperand(1))  // Rn_wb
8868         .add(MI.getOperand(2))  // Rn
8869         .add(MI.getOperand(3))  // PredImm
8870         .add(MI.getOperand(4))  // PredReg
8871         .add(MI.getOperand(0)); // Rt
8872     MI.eraseFromParent();
8873     return BB;
8874   }
8875 
8876   // The Thumb2 pre-indexed stores have the same MI operands, they just
8877   // define them differently in the .td files from the isel patterns, so
8878   // they need pseudos.
8879   case ARM::t2STR_preidx:
8880     MI.setDesc(TII->get(ARM::t2STR_PRE));
8881     return BB;
8882   case ARM::t2STRB_preidx:
8883     MI.setDesc(TII->get(ARM::t2STRB_PRE));
8884     return BB;
8885   case ARM::t2STRH_preidx:
8886     MI.setDesc(TII->get(ARM::t2STRH_PRE));
8887     return BB;
8888 
8889   case ARM::STRi_preidx:
8890   case ARM::STRBi_preidx: {
8891     unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
8892                                                          : ARM::STRB_PRE_IMM;
8893     // Decode the offset.
8894     unsigned Offset = MI.getOperand(4).getImm();
8895     bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
8896     Offset = ARM_AM::getAM2Offset(Offset);
8897     if (isSub)
8898       Offset = -Offset;
8899 
8900     MachineMemOperand *MMO = *MI.memoperands_begin();
8901     BuildMI(*BB, MI, dl, TII->get(NewOpc))
8902         .add(MI.getOperand(0)) // Rn_wb
8903         .add(MI.getOperand(1)) // Rt
8904         .add(MI.getOperand(2)) // Rn
8905         .addImm(Offset)        // offset (skip GPR==zero_reg)
8906         .add(MI.getOperand(5)) // pred
8907         .add(MI.getOperand(6))
8908         .addMemOperand(MMO);
8909     MI.eraseFromParent();
8910     return BB;
8911   }
8912   case ARM::STRr_preidx:
8913   case ARM::STRBr_preidx:
8914   case ARM::STRH_preidx: {
8915     unsigned NewOpc;
8916     switch (MI.getOpcode()) {
8917     default: llvm_unreachable("unexpected opcode!");
8918     case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
8919     case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
8920     case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
8921     }
8922     MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
8923     for (unsigned i = 0; i < MI.getNumOperands(); ++i)
8924       MIB.add(MI.getOperand(i));
8925     MI.eraseFromParent();
8926     return BB;
8927   }
8928 
8929   case ARM::tMOVCCr_pseudo: {
8930     // To "insert" a SELECT_CC instruction, we actually have to insert the
8931     // diamond control-flow pattern.  The incoming instruction knows the
8932     // destination vreg to set, the condition code register to branch on, the
8933     // true/false values to select between, and a branch opcode to use.
8934     const BasicBlock *LLVM_BB = BB->getBasicBlock();
8935     MachineFunction::iterator It = ++BB->getIterator();
8936 
8937     //  thisMBB:
8938     //  ...
8939     //   TrueVal = ...
8940     //   cmpTY ccX, r1, r2
8941     //   bCC copy1MBB
8942     //   fallthrough --> copy0MBB
8943     MachineBasicBlock *thisMBB  = BB;
8944     MachineFunction *F = BB->getParent();
8945     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
8946     MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
8947     F->insert(It, copy0MBB);
8948     F->insert(It, sinkMBB);
8949 
8950     // Transfer the remainder of BB and its successor edges to sinkMBB.
8951     sinkMBB->splice(sinkMBB->begin(), BB,
8952                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
8953     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
8954 
8955     BB->addSuccessor(copy0MBB);
8956     BB->addSuccessor(sinkMBB);
8957 
8958     BuildMI(BB, dl, TII->get(ARM::tBcc))
8959         .addMBB(sinkMBB)
8960         .addImm(MI.getOperand(3).getImm())
8961         .addReg(MI.getOperand(4).getReg());
8962 
8963     //  copy0MBB:
8964     //   %FalseValue = ...
8965     //   # fallthrough to sinkMBB
8966     BB = copy0MBB;
8967 
8968     // Update machine-CFG edges
8969     BB->addSuccessor(sinkMBB);
8970 
8971     //  sinkMBB:
8972     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
8973     //  ...
8974     BB = sinkMBB;
8975     BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
8976         .addReg(MI.getOperand(1).getReg())
8977         .addMBB(copy0MBB)
8978         .addReg(MI.getOperand(2).getReg())
8979         .addMBB(thisMBB);
8980 
8981     MI.eraseFromParent(); // The pseudo instruction is gone now.
8982     return BB;
8983   }
8984 
8985   case ARM::BCCi64:
8986   case ARM::BCCZi64: {
8987     // If there is an unconditional branch to the other successor, remove it.
8988     BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
8989 
8990     // Compare both parts that make up the double comparison separately for
8991     // equality.
8992     bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
8993 
8994     unsigned LHS1 = MI.getOperand(1).getReg();
8995     unsigned LHS2 = MI.getOperand(2).getReg();
8996     if (RHSisZero) {
8997       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8998           .addReg(LHS1)
8999           .addImm(0)
9000           .add(predOps(ARMCC::AL));
9001       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9002         .addReg(LHS2).addImm(0)
9003         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
9004     } else {
9005       unsigned RHS1 = MI.getOperand(3).getReg();
9006       unsigned RHS2 = MI.getOperand(4).getReg();
9007       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
9008           .addReg(LHS1)
9009           .addReg(RHS1)
9010           .add(predOps(ARMCC::AL));
9011       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
9012         .addReg(LHS2).addReg(RHS2)
9013         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
9014     }
9015 
9016     MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
9017     MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
9018     if (MI.getOperand(0).getImm() == ARMCC::NE)
9019       std::swap(destMBB, exitMBB);
9020 
9021     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
9022       .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
9023     if (isThumb2)
9024       BuildMI(BB, dl, TII->get(ARM::t2B))
9025           .addMBB(exitMBB)
9026           .add(predOps(ARMCC::AL));
9027     else
9028       BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
9029 
9030     MI.eraseFromParent(); // The pseudo instruction is gone now.
9031     return BB;
9032   }
9033 
9034   case ARM::Int_eh_sjlj_setjmp:
9035   case ARM::Int_eh_sjlj_setjmp_nofp:
9036   case ARM::tInt_eh_sjlj_setjmp:
9037   case ARM::t2Int_eh_sjlj_setjmp:
9038   case ARM::t2Int_eh_sjlj_setjmp_nofp:
9039     return BB;
9040 
9041   case ARM::Int_eh_sjlj_setup_dispatch:
9042     EmitSjLjDispatchBlock(MI, BB);
9043     return BB;
9044 
9045   case ARM::ABS:
9046   case ARM::t2ABS: {
9047     // To insert an ABS instruction, we have to insert the
9048     // diamond control-flow pattern.  The incoming instruction knows the
9049     // source vreg to test against 0, the destination vreg to set,
9050     // the condition code register to branch on, the
9051     // true/false values to select between, and a branch opcode to use.
9052     // It transforms
9053     //     V1 = ABS V0
9054     // into
9055     //     V2 = MOVS V0
9056     //     BCC                      (branch to SinkBB if V0 >= 0)
9057     //     RSBBB: V3 = RSBri V2, 0  (compute ABS if V2 < 0)
9058     //     SinkBB: V1 = PHI(V2, V3)
9059     const BasicBlock *LLVM_BB = BB->getBasicBlock();
9060     MachineFunction::iterator BBI = ++BB->getIterator();
9061     MachineFunction *Fn = BB->getParent();
9062     MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
9063     MachineBasicBlock *SinkBB  = Fn->CreateMachineBasicBlock(LLVM_BB);
9064     Fn->insert(BBI, RSBBB);
9065     Fn->insert(BBI, SinkBB);
9066 
9067     unsigned int ABSSrcReg = MI.getOperand(1).getReg();
9068     unsigned int ABSDstReg = MI.getOperand(0).getReg();
9069     bool ABSSrcKIll = MI.getOperand(1).isKill();
9070     bool isThumb2 = Subtarget->isThumb2();
9071     MachineRegisterInfo &MRI = Fn->getRegInfo();
9072     // In Thumb mode S must not be specified if source register is the SP or
9073     // PC and if destination register is the SP, so restrict register class
9074     unsigned NewRsbDstReg =
9075       MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
9076 
9077     // Transfer the remainder of BB and its successor edges to sinkMBB.
9078     SinkBB->splice(SinkBB->begin(), BB,
9079                    std::next(MachineBasicBlock::iterator(MI)), BB->end());
9080     SinkBB->transferSuccessorsAndUpdatePHIs(BB);
9081 
9082     BB->addSuccessor(RSBBB);
9083     BB->addSuccessor(SinkBB);
9084 
9085     // fall through to SinkMBB
9086     RSBBB->addSuccessor(SinkBB);
9087 
9088     // insert a cmp at the end of BB
9089     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9090         .addReg(ABSSrcReg)
9091         .addImm(0)
9092         .add(predOps(ARMCC::AL));
9093 
9094     // insert a bcc with opposite CC to ARMCC::MI at the end of BB
9095     BuildMI(BB, dl,
9096       TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
9097       .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
9098 
9099     // insert rsbri in RSBBB
9100     // Note: BCC and rsbri will be converted into predicated rsbmi
9101     // by if-conversion pass
9102     BuildMI(*RSBBB, RSBBB->begin(), dl,
9103             TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
9104         .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
9105         .addImm(0)
9106         .add(predOps(ARMCC::AL))
9107         .add(condCodeOp());
9108 
9109     // insert PHI in SinkBB,
9110     // reuse ABSDstReg to not change uses of ABS instruction
9111     BuildMI(*SinkBB, SinkBB->begin(), dl,
9112       TII->get(ARM::PHI), ABSDstReg)
9113       .addReg(NewRsbDstReg).addMBB(RSBBB)
9114       .addReg(ABSSrcReg).addMBB(BB);
9115 
9116     // remove ABS instruction
9117     MI.eraseFromParent();
9118 
9119     // return last added BB
9120     return SinkBB;
9121   }
9122   case ARM::COPY_STRUCT_BYVAL_I32:
9123     ++NumLoopByVals;
9124     return EmitStructByval(MI, BB);
9125   case ARM::WIN__CHKSTK:
9126     return EmitLowered__chkstk(MI, BB);
9127   case ARM::WIN__DBZCHK:
9128     return EmitLowered__dbzchk(MI, BB);
9129   }
9130 }
9131 
9132 /// \brief Attaches vregs to MEMCPY that it will use as scratch registers
9133 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
9134 /// instead of as a custom inserter because we need the use list from the SDNode.
9135 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
9136                                     MachineInstr &MI, const SDNode *Node) {
9137   bool isThumb1 = Subtarget->isThumb1Only();
9138 
9139   DebugLoc DL = MI.getDebugLoc();
9140   MachineFunction *MF = MI.getParent()->getParent();
9141   MachineRegisterInfo &MRI = MF->getRegInfo();
9142   MachineInstrBuilder MIB(*MF, MI);
9143 
9144   // If the new dst/src is unused mark it as dead.
9145   if (!Node->hasAnyUseOfValue(0)) {
9146     MI.getOperand(0).setIsDead(true);
9147   }
9148   if (!Node->hasAnyUseOfValue(1)) {
9149     MI.getOperand(1).setIsDead(true);
9150   }
9151 
9152   // The MEMCPY both defines and kills the scratch registers.
9153   for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
9154     unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
9155                                                          : &ARM::GPRRegClass);
9156     MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
9157   }
9158 }
9159 
9160 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9161                                                       SDNode *Node) const {
9162   if (MI.getOpcode() == ARM::MEMCPY) {
9163     attachMEMCPYScratchRegs(Subtarget, MI, Node);
9164     return;
9165   }
9166 
9167   const MCInstrDesc *MCID = &MI.getDesc();
9168   // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
9169   // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
9170   // operand is still set to noreg. If needed, set the optional operand's
9171   // register to CPSR, and remove the redundant implicit def.
9172   //
9173   // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
9174 
9175   // Rename pseudo opcodes.
9176   unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
9177   unsigned ccOutIdx;
9178   if (NewOpc) {
9179     const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
9180     MCID = &TII->get(NewOpc);
9181 
9182     assert(MCID->getNumOperands() ==
9183            MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()
9184         && "converted opcode should be the same except for cc_out"
9185            " (and, on Thumb1, pred)");
9186 
9187     MI.setDesc(*MCID);
9188 
9189     // Add the optional cc_out operand
9190     MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
9191 
9192     // On Thumb1, move all input operands to the end, then add the predicate
9193     if (Subtarget->isThumb1Only()) {
9194       for (unsigned c = MCID->getNumOperands() - 4; c--;) {
9195         MI.addOperand(MI.getOperand(1));
9196         MI.RemoveOperand(1);
9197       }
9198 
9199       // Restore the ties
9200       for (unsigned i = MI.getNumOperands(); i--;) {
9201         const MachineOperand& op = MI.getOperand(i);
9202         if (op.isReg() && op.isUse()) {
9203           int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO);
9204           if (DefIdx != -1)
9205             MI.tieOperands(DefIdx, i);
9206         }
9207       }
9208 
9209       MI.addOperand(MachineOperand::CreateImm(ARMCC::AL));
9210       MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
9211       ccOutIdx = 1;
9212     } else
9213       ccOutIdx = MCID->getNumOperands() - 1;
9214   } else
9215     ccOutIdx = MCID->getNumOperands() - 1;
9216 
9217   // Any ARM instruction that sets the 's' bit should specify an optional
9218   // "cc_out" operand in the last operand position.
9219   if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
9220     assert(!NewOpc && "Optional cc_out operand required");
9221     return;
9222   }
9223   // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
9224   // since we already have an optional CPSR def.
9225   bool definesCPSR = false;
9226   bool deadCPSR = false;
9227   for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
9228        ++i) {
9229     const MachineOperand &MO = MI.getOperand(i);
9230     if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
9231       definesCPSR = true;
9232       if (MO.isDead())
9233         deadCPSR = true;
9234       MI.RemoveOperand(i);
9235       break;
9236     }
9237   }
9238   if (!definesCPSR) {
9239     assert(!NewOpc && "Optional cc_out operand required");
9240     return;
9241   }
9242   assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
9243   if (deadCPSR) {
9244     assert(!MI.getOperand(ccOutIdx).getReg() &&
9245            "expect uninitialized optional cc_out operand");
9246     // Thumb1 instructions must have the S bit even if the CPSR is dead.
9247     if (!Subtarget->isThumb1Only())
9248       return;
9249   }
9250 
9251   // If this instruction was defined with an optional CPSR def and its dag node
9252   // had a live implicit CPSR def, then activate the optional CPSR def.
9253   MachineOperand &MO = MI.getOperand(ccOutIdx);
9254   MO.setReg(ARM::CPSR);
9255   MO.setIsDef(true);
9256 }
9257 
9258 //===----------------------------------------------------------------------===//
9259 //                           ARM Optimization Hooks
9260 //===----------------------------------------------------------------------===//
9261 
9262 // Helper function that checks if N is a null or all ones constant.
9263 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
9264   return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
9265 }
9266 
9267 // Return true if N is conditionally 0 or all ones.
9268 // Detects these expressions where cc is an i1 value:
9269 //
9270 //   (select cc 0, y)   [AllOnes=0]
9271 //   (select cc y, 0)   [AllOnes=0]
9272 //   (zext cc)          [AllOnes=0]
9273 //   (sext cc)          [AllOnes=0/1]
9274 //   (select cc -1, y)  [AllOnes=1]
9275 //   (select cc y, -1)  [AllOnes=1]
9276 //
9277 // Invert is set when N is the null/all ones constant when CC is false.
9278 // OtherOp is set to the alternative value of N.
9279 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
9280                                        SDValue &CC, bool &Invert,
9281                                        SDValue &OtherOp,
9282                                        SelectionDAG &DAG) {
9283   switch (N->getOpcode()) {
9284   default: return false;
9285   case ISD::SELECT: {
9286     CC = N->getOperand(0);
9287     SDValue N1 = N->getOperand(1);
9288     SDValue N2 = N->getOperand(2);
9289     if (isZeroOrAllOnes(N1, AllOnes)) {
9290       Invert = false;
9291       OtherOp = N2;
9292       return true;
9293     }
9294     if (isZeroOrAllOnes(N2, AllOnes)) {
9295       Invert = true;
9296       OtherOp = N1;
9297       return true;
9298     }
9299     return false;
9300   }
9301   case ISD::ZERO_EXTEND:
9302     // (zext cc) can never be the all ones value.
9303     if (AllOnes)
9304       return false;
9305     LLVM_FALLTHROUGH;
9306   case ISD::SIGN_EXTEND: {
9307     SDLoc dl(N);
9308     EVT VT = N->getValueType(0);
9309     CC = N->getOperand(0);
9310     if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC)
9311       return false;
9312     Invert = !AllOnes;
9313     if (AllOnes)
9314       // When looking for an AllOnes constant, N is an sext, and the 'other'
9315       // value is 0.
9316       OtherOp = DAG.getConstant(0, dl, VT);
9317     else if (N->getOpcode() == ISD::ZERO_EXTEND)
9318       // When looking for a 0 constant, N can be zext or sext.
9319       OtherOp = DAG.getConstant(1, dl, VT);
9320     else
9321       OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
9322                                 VT);
9323     return true;
9324   }
9325   }
9326 }
9327 
9328 // Combine a constant select operand into its use:
9329 //
9330 //   (add (select cc, 0, c), x)  -> (select cc, x, (add, x, c))
9331 //   (sub x, (select cc, 0, c))  -> (select cc, x, (sub, x, c))
9332 //   (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))  [AllOnes=1]
9333 //   (or  (select cc, 0, c), x)  -> (select cc, x, (or, x, c))
9334 //   (xor (select cc, 0, c), x)  -> (select cc, x, (xor, x, c))
9335 //
9336 // The transform is rejected if the select doesn't have a constant operand that
9337 // is null, or all ones when AllOnes is set.
9338 //
9339 // Also recognize sext/zext from i1:
9340 //
9341 //   (add (zext cc), x) -> (select cc (add x, 1), x)
9342 //   (add (sext cc), x) -> (select cc (add x, -1), x)
9343 //
9344 // These transformations eventually create predicated instructions.
9345 //
9346 // @param N       The node to transform.
9347 // @param Slct    The N operand that is a select.
9348 // @param OtherOp The other N operand (x above).
9349 // @param DCI     Context.
9350 // @param AllOnes Require the select constant to be all ones instead of null.
9351 // @returns The new node, or SDValue() on failure.
9352 static
9353 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
9354                             TargetLowering::DAGCombinerInfo &DCI,
9355                             bool AllOnes = false) {
9356   SelectionDAG &DAG = DCI.DAG;
9357   EVT VT = N->getValueType(0);
9358   SDValue NonConstantVal;
9359   SDValue CCOp;
9360   bool SwapSelectOps;
9361   if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
9362                                   NonConstantVal, DAG))
9363     return SDValue();
9364 
9365   // Slct is now know to be the desired identity constant when CC is true.
9366   SDValue TrueVal = OtherOp;
9367   SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
9368                                  OtherOp, NonConstantVal);
9369   // Unless SwapSelectOps says CC should be false.
9370   if (SwapSelectOps)
9371     std::swap(TrueVal, FalseVal);
9372 
9373   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
9374                      CCOp, TrueVal, FalseVal);
9375 }
9376 
9377 // Attempt combineSelectAndUse on each operand of a commutative operator N.
9378 static
9379 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
9380                                        TargetLowering::DAGCombinerInfo &DCI) {
9381   SDValue N0 = N->getOperand(0);
9382   SDValue N1 = N->getOperand(1);
9383   if (N0.getNode()->hasOneUse())
9384     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
9385       return Result;
9386   if (N1.getNode()->hasOneUse())
9387     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
9388       return Result;
9389   return SDValue();
9390 }
9391 
9392 static bool IsVUZPShuffleNode(SDNode *N) {
9393   // VUZP shuffle node.
9394   if (N->getOpcode() == ARMISD::VUZP)
9395     return true;
9396 
9397   // "VUZP" on i32 is an alias for VTRN.
9398   if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
9399     return true;
9400 
9401   return false;
9402 }
9403 
9404 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
9405                                  TargetLowering::DAGCombinerInfo &DCI,
9406                                  const ARMSubtarget *Subtarget) {
9407   // Look for ADD(VUZP.0, VUZP.1).
9408   if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
9409       N0 == N1)
9410    return SDValue();
9411 
9412   // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
9413   if (!N->getValueType(0).is64BitVector())
9414     return SDValue();
9415 
9416   // Generate vpadd.
9417   SelectionDAG &DAG = DCI.DAG;
9418   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9419   SDLoc dl(N);
9420   SDNode *Unzip = N0.getNode();
9421   EVT VT = N->getValueType(0);
9422 
9423   SmallVector<SDValue, 8> Ops;
9424   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
9425                                 TLI.getPointerTy(DAG.getDataLayout())));
9426   Ops.push_back(Unzip->getOperand(0));
9427   Ops.push_back(Unzip->getOperand(1));
9428 
9429   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
9430 }
9431 
9432 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
9433                                       TargetLowering::DAGCombinerInfo &DCI,
9434                                       const ARMSubtarget *Subtarget) {
9435   // Check for two extended operands.
9436   if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
9437         N1.getOpcode() == ISD::SIGN_EXTEND) &&
9438       !(N0.getOpcode() == ISD::ZERO_EXTEND &&
9439         N1.getOpcode() == ISD::ZERO_EXTEND))
9440     return SDValue();
9441 
9442   SDValue N00 = N0.getOperand(0);
9443   SDValue N10 = N1.getOperand(0);
9444 
9445   // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
9446   if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
9447       N00 == N10)
9448     return SDValue();
9449 
9450   // We only recognize Q register paddl here; this can't be reached until
9451   // after type legalization.
9452   if (!N00.getValueType().is64BitVector() ||
9453       !N0.getValueType().is128BitVector())
9454     return SDValue();
9455 
9456   // Generate vpaddl.
9457   SelectionDAG &DAG = DCI.DAG;
9458   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9459   SDLoc dl(N);
9460   EVT VT = N->getValueType(0);
9461 
9462   SmallVector<SDValue, 8> Ops;
9463   // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
9464   unsigned Opcode;
9465   if (N0.getOpcode() == ISD::SIGN_EXTEND)
9466     Opcode = Intrinsic::arm_neon_vpaddls;
9467   else
9468     Opcode = Intrinsic::arm_neon_vpaddlu;
9469   Ops.push_back(DAG.getConstant(Opcode, dl,
9470                                 TLI.getPointerTy(DAG.getDataLayout())));
9471   EVT ElemTy = N00.getValueType().getVectorElementType();
9472   unsigned NumElts = VT.getVectorNumElements();
9473   EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
9474   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
9475                                N00.getOperand(0), N00.getOperand(1));
9476   Ops.push_back(Concat);
9477 
9478   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
9479 }
9480 
9481 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
9482 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
9483 // much easier to match.
9484 static SDValue
9485 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
9486                                TargetLowering::DAGCombinerInfo &DCI,
9487                                const ARMSubtarget *Subtarget) {
9488   // Only perform optimization if after legalize, and if NEON is available. We
9489   // also expected both operands to be BUILD_VECTORs.
9490   if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
9491       || N0.getOpcode() != ISD::BUILD_VECTOR
9492       || N1.getOpcode() != ISD::BUILD_VECTOR)
9493     return SDValue();
9494 
9495   // Check output type since VPADDL operand elements can only be 8, 16, or 32.
9496   EVT VT = N->getValueType(0);
9497   if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
9498     return SDValue();
9499 
9500   // Check that the vector operands are of the right form.
9501   // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
9502   // operands, where N is the size of the formed vector.
9503   // Each EXTRACT_VECTOR should have the same input vector and odd or even
9504   // index such that we have a pair wise add pattern.
9505 
9506   // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
9507   if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9508     return SDValue();
9509   SDValue Vec = N0->getOperand(0)->getOperand(0);
9510   SDNode *V = Vec.getNode();
9511   unsigned nextIndex = 0;
9512 
9513   // For each operands to the ADD which are BUILD_VECTORs,
9514   // check to see if each of their operands are an EXTRACT_VECTOR with
9515   // the same vector and appropriate index.
9516   for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
9517     if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
9518         && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
9519 
9520       SDValue ExtVec0 = N0->getOperand(i);
9521       SDValue ExtVec1 = N1->getOperand(i);
9522 
9523       // First operand is the vector, verify its the same.
9524       if (V != ExtVec0->getOperand(0).getNode() ||
9525           V != ExtVec1->getOperand(0).getNode())
9526         return SDValue();
9527 
9528       // Second is the constant, verify its correct.
9529       ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
9530       ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
9531 
9532       // For the constant, we want to see all the even or all the odd.
9533       if (!C0 || !C1 || C0->getZExtValue() != nextIndex
9534           || C1->getZExtValue() != nextIndex+1)
9535         return SDValue();
9536 
9537       // Increment index.
9538       nextIndex+=2;
9539     } else
9540       return SDValue();
9541   }
9542 
9543   // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure
9544   // we're using the entire input vector, otherwise there's a size/legality
9545   // mismatch somewhere.
9546   if (nextIndex != Vec.getValueType().getVectorNumElements() ||
9547       Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
9548     return SDValue();
9549 
9550   // Create VPADDL node.
9551   SelectionDAG &DAG = DCI.DAG;
9552   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9553 
9554   SDLoc dl(N);
9555 
9556   // Build operand list.
9557   SmallVector<SDValue, 8> Ops;
9558   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
9559                                 TLI.getPointerTy(DAG.getDataLayout())));
9560 
9561   // Input is the vector.
9562   Ops.push_back(Vec);
9563 
9564   // Get widened type and narrowed type.
9565   MVT widenType;
9566   unsigned numElem = VT.getVectorNumElements();
9567 
9568   EVT inputLaneType = Vec.getValueType().getVectorElementType();
9569   switch (inputLaneType.getSimpleVT().SimpleTy) {
9570     case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
9571     case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
9572     case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
9573     default:
9574       llvm_unreachable("Invalid vector element type for padd optimization.");
9575   }
9576 
9577   SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
9578   unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
9579   return DAG.getNode(ExtOp, dl, VT, tmp);
9580 }
9581 
9582 static SDValue findMUL_LOHI(SDValue V) {
9583   if (V->getOpcode() == ISD::UMUL_LOHI ||
9584       V->getOpcode() == ISD::SMUL_LOHI)
9585     return V;
9586   return SDValue();
9587 }
9588 
9589 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
9590                                         TargetLowering::DAGCombinerInfo &DCI,
9591                                         const ARMSubtarget *Subtarget) {
9592 
9593   if (Subtarget->isThumb()) {
9594     if (!Subtarget->hasDSP())
9595       return SDValue();
9596   } else if (!Subtarget->hasV5TEOps())
9597     return SDValue();
9598 
9599   // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and
9600   // accumulates the product into a 64-bit value. The 16-bit values will
9601   // be sign extended somehow or SRA'd into 32-bit values
9602   // (addc (adde (mul 16bit, 16bit), lo), hi)
9603   SDValue Mul = AddcNode->getOperand(0);
9604   SDValue Lo = AddcNode->getOperand(1);
9605   if (Mul.getOpcode() != ISD::MUL) {
9606     Lo = AddcNode->getOperand(0);
9607     Mul = AddcNode->getOperand(1);
9608     if (Mul.getOpcode() != ISD::MUL)
9609       return SDValue();
9610   }
9611 
9612   SDValue SRA = AddeNode->getOperand(0);
9613   SDValue Hi = AddeNode->getOperand(1);
9614   if (SRA.getOpcode() != ISD::SRA) {
9615     SRA = AddeNode->getOperand(1);
9616     Hi = AddeNode->getOperand(0);
9617     if (SRA.getOpcode() != ISD::SRA)
9618       return SDValue();
9619   }
9620   if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) {
9621     if (Const->getZExtValue() != 31)
9622       return SDValue();
9623   } else
9624     return SDValue();
9625 
9626   if (SRA.getOperand(0) != Mul)
9627     return SDValue();
9628 
9629   SelectionDAG &DAG = DCI.DAG;
9630   SDLoc dl(AddcNode);
9631   unsigned Opcode = 0;
9632   SDValue Op0;
9633   SDValue Op1;
9634 
9635   if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) {
9636     Opcode = ARMISD::SMLALBB;
9637     Op0 = Mul.getOperand(0);
9638     Op1 = Mul.getOperand(1);
9639   } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) {
9640     Opcode = ARMISD::SMLALBT;
9641     Op0 = Mul.getOperand(0);
9642     Op1 = Mul.getOperand(1).getOperand(0);
9643   } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) {
9644     Opcode = ARMISD::SMLALTB;
9645     Op0 = Mul.getOperand(0).getOperand(0);
9646     Op1 = Mul.getOperand(1);
9647   } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) {
9648     Opcode = ARMISD::SMLALTT;
9649     Op0 = Mul->getOperand(0).getOperand(0);
9650     Op1 = Mul->getOperand(1).getOperand(0);
9651   }
9652 
9653   if (!Op0 || !Op1)
9654     return SDValue();
9655 
9656   SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
9657                               Op0, Op1, Lo, Hi);
9658   // Replace the ADDs' nodes uses by the MLA node's values.
9659   SDValue HiMLALResult(SMLAL.getNode(), 1);
9660   SDValue LoMLALResult(SMLAL.getNode(), 0);
9661 
9662   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
9663   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
9664 
9665   // Return original node to notify the driver to stop replacing.
9666   SDValue resNode(AddcNode, 0);
9667   return resNode;
9668 }
9669 
9670 static SDValue AddCombineTo64bitMLAL(SDNode *AddeNode,
9671                                      TargetLowering::DAGCombinerInfo &DCI,
9672                                      const ARMSubtarget *Subtarget) {
9673   // Look for multiply add opportunities.
9674   // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
9675   // each add nodes consumes a value from ISD::UMUL_LOHI and there is
9676   // a glue link from the first add to the second add.
9677   // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
9678   // a S/UMLAL instruction.
9679   //                  UMUL_LOHI
9680   //                 / :lo    \ :hi
9681   //                /          \          [no multiline comment]
9682   //    loAdd ->  ADDE         |
9683   //                 \ :glue  /
9684   //                  \      /
9685   //                    ADDC   <- hiAdd
9686   //
9687   assert(AddeNode->getOpcode() == ARMISD::ADDE && "Expect an ADDE");
9688 
9689   assert(AddeNode->getNumOperands() == 3 &&
9690          AddeNode->getOperand(2).getValueType() == MVT::i32 &&
9691          "ADDE node has the wrong inputs");
9692 
9693   // Check that we have a glued ADDC node.
9694   SDNode* AddcNode = AddeNode->getOperand(2).getNode();
9695   if (AddcNode->getOpcode() != ARMISD::ADDC)
9696     return SDValue();
9697 
9698   SDValue AddcOp0 = AddcNode->getOperand(0);
9699   SDValue AddcOp1 = AddcNode->getOperand(1);
9700 
9701   // Check if the two operands are from the same mul_lohi node.
9702   if (AddcOp0.getNode() == AddcOp1.getNode())
9703     return SDValue();
9704 
9705   assert(AddcNode->getNumValues() == 2 &&
9706          AddcNode->getValueType(0) == MVT::i32 &&
9707          "Expect ADDC with two result values. First: i32");
9708 
9709   // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it
9710   // maybe a SMLAL which multiplies two 16-bit values.
9711   if (AddcOp0->getOpcode() != ISD::UMUL_LOHI &&
9712       AddcOp0->getOpcode() != ISD::SMUL_LOHI &&
9713       AddcOp1->getOpcode() != ISD::UMUL_LOHI &&
9714       AddcOp1->getOpcode() != ISD::SMUL_LOHI)
9715     return AddCombineTo64BitSMLAL16(AddcNode, AddeNode, DCI, Subtarget);
9716 
9717   // Check for the triangle shape.
9718   SDValue AddeOp0 = AddeNode->getOperand(0);
9719   SDValue AddeOp1 = AddeNode->getOperand(1);
9720 
9721   // Make sure that the ADDE operands are not coming from the same node.
9722   if (AddeOp0.getNode() == AddeOp1.getNode())
9723     return SDValue();
9724 
9725   // Find the MUL_LOHI node walking up ADDE's operands.
9726   bool IsLeftOperandMUL = false;
9727   SDValue MULOp = findMUL_LOHI(AddeOp0);
9728   if (MULOp == SDValue())
9729    MULOp = findMUL_LOHI(AddeOp1);
9730   else
9731     IsLeftOperandMUL = true;
9732   if (MULOp == SDValue())
9733     return SDValue();
9734 
9735   // Figure out the right opcode.
9736   unsigned Opc = MULOp->getOpcode();
9737   unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
9738 
9739   // Figure out the high and low input values to the MLAL node.
9740   SDValue* HiAdd = nullptr;
9741   SDValue* LoMul = nullptr;
9742   SDValue* LowAdd = nullptr;
9743 
9744   // Ensure that ADDE is from high result of ISD::SMUL_LOHI.
9745   if ((AddeOp0 != MULOp.getValue(1)) && (AddeOp1 != MULOp.getValue(1)))
9746     return SDValue();
9747 
9748   if (IsLeftOperandMUL)
9749     HiAdd = &AddeOp1;
9750   else
9751     HiAdd = &AddeOp0;
9752 
9753 
9754   // Ensure that LoMul and LowAdd are taken from correct ISD::SMUL_LOHI node
9755   // whose low result is fed to the ADDC we are checking.
9756 
9757   if (AddcOp0 == MULOp.getValue(0)) {
9758     LoMul = &AddcOp0;
9759     LowAdd = &AddcOp1;
9760   }
9761   if (AddcOp1 == MULOp.getValue(0)) {
9762     LoMul = &AddcOp1;
9763     LowAdd = &AddcOp0;
9764   }
9765 
9766   if (!LoMul)
9767     return SDValue();
9768 
9769   // Create the merged node.
9770   SelectionDAG &DAG = DCI.DAG;
9771 
9772   // Build operand list.
9773   SmallVector<SDValue, 8> Ops;
9774   Ops.push_back(LoMul->getOperand(0));
9775   Ops.push_back(LoMul->getOperand(1));
9776   Ops.push_back(*LowAdd);
9777   Ops.push_back(*HiAdd);
9778 
9779   SDValue MLALNode =  DAG.getNode(FinalOpc, SDLoc(AddcNode),
9780                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
9781 
9782   // Replace the ADDs' nodes uses by the MLA node's values.
9783   SDValue HiMLALResult(MLALNode.getNode(), 1);
9784   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
9785 
9786   SDValue LoMLALResult(MLALNode.getNode(), 0);
9787   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
9788 
9789   // Return original node to notify the driver to stop replacing.
9790   return SDValue(AddeNode, 0);
9791 }
9792 
9793 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
9794                                       TargetLowering::DAGCombinerInfo &DCI,
9795                                       const ARMSubtarget *Subtarget) {
9796   // UMAAL is similar to UMLAL except that it adds two unsigned values.
9797   // While trying to combine for the other MLAL nodes, first search for the
9798   // chance to use UMAAL. Check if Addc uses a node which has already
9799   // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde
9800   // as the addend, and it's handled in PerformUMLALCombine.
9801 
9802   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
9803     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
9804 
9805   // Check that we have a glued ADDC node.
9806   SDNode* AddcNode = AddeNode->getOperand(2).getNode();
9807   if (AddcNode->getOpcode() != ARMISD::ADDC)
9808     return SDValue();
9809 
9810   // Find the converted UMAAL or quit if it doesn't exist.
9811   SDNode *UmlalNode = nullptr;
9812   SDValue AddHi;
9813   if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
9814     UmlalNode = AddcNode->getOperand(0).getNode();
9815     AddHi = AddcNode->getOperand(1);
9816   } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
9817     UmlalNode = AddcNode->getOperand(1).getNode();
9818     AddHi = AddcNode->getOperand(0);
9819   } else {
9820     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
9821   }
9822 
9823   // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
9824   // the ADDC as well as Zero.
9825   if (!isNullConstant(UmlalNode->getOperand(3)))
9826     return SDValue();
9827 
9828   if ((isNullConstant(AddeNode->getOperand(0)) &&
9829        AddeNode->getOperand(1).getNode() == UmlalNode) ||
9830       (AddeNode->getOperand(0).getNode() == UmlalNode &&
9831        isNullConstant(AddeNode->getOperand(1)))) {
9832 
9833     SelectionDAG &DAG = DCI.DAG;
9834     SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
9835                       UmlalNode->getOperand(2), AddHi };
9836     SDValue UMAAL =  DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
9837                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
9838 
9839     // Replace the ADDs' nodes uses by the UMAAL node's values.
9840     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
9841     DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
9842 
9843     // Return original node to notify the driver to stop replacing.
9844     return SDValue(AddeNode, 0);
9845   }
9846   return SDValue();
9847 }
9848 
9849 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
9850                                    const ARMSubtarget *Subtarget) {
9851   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
9852     return SDValue();
9853 
9854   // Check that we have a pair of ADDC and ADDE as operands.
9855   // Both addends of the ADDE must be zero.
9856   SDNode* AddcNode = N->getOperand(2).getNode();
9857   SDNode* AddeNode = N->getOperand(3).getNode();
9858   if ((AddcNode->getOpcode() == ARMISD::ADDC) &&
9859       (AddeNode->getOpcode() == ARMISD::ADDE) &&
9860       isNullConstant(AddeNode->getOperand(0)) &&
9861       isNullConstant(AddeNode->getOperand(1)) &&
9862       (AddeNode->getOperand(2).getNode() == AddcNode))
9863     return DAG.getNode(ARMISD::UMAAL, SDLoc(N),
9864                        DAG.getVTList(MVT::i32, MVT::i32),
9865                        {N->getOperand(0), N->getOperand(1),
9866                         AddcNode->getOperand(0), AddcNode->getOperand(1)});
9867   else
9868     return SDValue();
9869 }
9870 
9871 static SDValue PerformAddcSubcCombine(SDNode *N, SelectionDAG &DAG,
9872                                       const ARMSubtarget *Subtarget) {
9873   if (Subtarget->isThumb1Only()) {
9874     SDValue RHS = N->getOperand(1);
9875     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
9876       int32_t imm = C->getSExtValue();
9877       if (imm < 0 && imm > INT_MIN) {
9878         SDLoc DL(N);
9879         RHS = DAG.getConstant(-imm, DL, MVT::i32);
9880         unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
9881                                                            : ARMISD::ADDC;
9882         return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS);
9883       }
9884     }
9885   }
9886   return SDValue();
9887 }
9888 
9889 static SDValue PerformAddeSubeCombine(SDNode *N, SelectionDAG &DAG,
9890                                       const ARMSubtarget *Subtarget) {
9891   if (Subtarget->isThumb1Only()) {
9892     SDValue RHS = N->getOperand(1);
9893     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
9894       int64_t imm = C->getSExtValue();
9895       if (imm < 0) {
9896         SDLoc DL(N);
9897 
9898         // The with-carry-in form matches bitwise not instead of the negation.
9899         // Effectively, the inverse interpretation of the carry flag already
9900         // accounts for part of the negation.
9901         RHS = DAG.getConstant(~imm, DL, MVT::i32);
9902 
9903         unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE
9904                                                            : ARMISD::ADDE;
9905         return DAG.getNode(Opcode, DL, N->getVTList(),
9906                            N->getOperand(0), RHS, N->getOperand(2));
9907       }
9908     }
9909   }
9910   return SDValue();
9911 }
9912 
9913 /// PerformADDECombine - Target-specific dag combine transform from
9914 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
9915 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
9916 static SDValue PerformADDECombine(SDNode *N,
9917                                   TargetLowering::DAGCombinerInfo &DCI,
9918                                   const ARMSubtarget *Subtarget) {
9919   // Only ARM and Thumb2 support UMLAL/SMLAL.
9920   if (Subtarget->isThumb1Only())
9921     return PerformAddeSubeCombine(N, DCI.DAG, Subtarget);
9922 
9923   // Only perform the checks after legalize when the pattern is available.
9924   if (DCI.isBeforeLegalize()) return SDValue();
9925 
9926   return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
9927 }
9928 
9929 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
9930 /// operands N0 and N1.  This is a helper for PerformADDCombine that is
9931 /// called with the default operands, and if that fails, with commuted
9932 /// operands.
9933 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
9934                                           TargetLowering::DAGCombinerInfo &DCI,
9935                                           const ARMSubtarget *Subtarget){
9936   // Attempt to create vpadd for this add.
9937   if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
9938     return Result;
9939 
9940   // Attempt to create vpaddl for this add.
9941   if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
9942     return Result;
9943   if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
9944                                                       Subtarget))
9945     return Result;
9946 
9947   // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
9948   if (N0.getNode()->hasOneUse())
9949     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
9950       return Result;
9951   return SDValue();
9952 }
9953 
9954 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
9955 ///
9956 static SDValue PerformADDCombine(SDNode *N,
9957                                  TargetLowering::DAGCombinerInfo &DCI,
9958                                  const ARMSubtarget *Subtarget) {
9959   SDValue N0 = N->getOperand(0);
9960   SDValue N1 = N->getOperand(1);
9961 
9962   // First try with the default operand order.
9963   if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
9964     return Result;
9965 
9966   // If that didn't work, try again with the operands commuted.
9967   return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
9968 }
9969 
9970 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
9971 ///
9972 static SDValue PerformSUBCombine(SDNode *N,
9973                                  TargetLowering::DAGCombinerInfo &DCI) {
9974   SDValue N0 = N->getOperand(0);
9975   SDValue N1 = N->getOperand(1);
9976 
9977   // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
9978   if (N1.getNode()->hasOneUse())
9979     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
9980       return Result;
9981 
9982   return SDValue();
9983 }
9984 
9985 /// PerformVMULCombine
9986 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
9987 /// special multiplier accumulator forwarding.
9988 ///   vmul d3, d0, d2
9989 ///   vmla d3, d1, d2
9990 /// is faster than
9991 ///   vadd d3, d0, d1
9992 ///   vmul d3, d3, d2
9993 //  However, for (A + B) * (A + B),
9994 //    vadd d2, d0, d1
9995 //    vmul d3, d0, d2
9996 //    vmla d3, d1, d2
9997 //  is slower than
9998 //    vadd d2, d0, d1
9999 //    vmul d3, d2, d2
10000 static SDValue PerformVMULCombine(SDNode *N,
10001                                   TargetLowering::DAGCombinerInfo &DCI,
10002                                   const ARMSubtarget *Subtarget) {
10003   if (!Subtarget->hasVMLxForwarding())
10004     return SDValue();
10005 
10006   SelectionDAG &DAG = DCI.DAG;
10007   SDValue N0 = N->getOperand(0);
10008   SDValue N1 = N->getOperand(1);
10009   unsigned Opcode = N0.getOpcode();
10010   if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
10011       Opcode != ISD::FADD && Opcode != ISD::FSUB) {
10012     Opcode = N1.getOpcode();
10013     if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
10014         Opcode != ISD::FADD && Opcode != ISD::FSUB)
10015       return SDValue();
10016     std::swap(N0, N1);
10017   }
10018 
10019   if (N0 == N1)
10020     return SDValue();
10021 
10022   EVT VT = N->getValueType(0);
10023   SDLoc DL(N);
10024   SDValue N00 = N0->getOperand(0);
10025   SDValue N01 = N0->getOperand(1);
10026   return DAG.getNode(Opcode, DL, VT,
10027                      DAG.getNode(ISD::MUL, DL, VT, N00, N1),
10028                      DAG.getNode(ISD::MUL, DL, VT, N01, N1));
10029 }
10030 
10031 static SDValue PerformMULCombine(SDNode *N,
10032                                  TargetLowering::DAGCombinerInfo &DCI,
10033                                  const ARMSubtarget *Subtarget) {
10034   SelectionDAG &DAG = DCI.DAG;
10035 
10036   if (Subtarget->isThumb1Only())
10037     return SDValue();
10038 
10039   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
10040     return SDValue();
10041 
10042   EVT VT = N->getValueType(0);
10043   if (VT.is64BitVector() || VT.is128BitVector())
10044     return PerformVMULCombine(N, DCI, Subtarget);
10045   if (VT != MVT::i32)
10046     return SDValue();
10047 
10048   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
10049   if (!C)
10050     return SDValue();
10051 
10052   int64_t MulAmt = C->getSExtValue();
10053   unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
10054 
10055   ShiftAmt = ShiftAmt & (32 - 1);
10056   SDValue V = N->getOperand(0);
10057   SDLoc DL(N);
10058 
10059   SDValue Res;
10060   MulAmt >>= ShiftAmt;
10061 
10062   if (MulAmt >= 0) {
10063     if (isPowerOf2_32(MulAmt - 1)) {
10064       // (mul x, 2^N + 1) => (add (shl x, N), x)
10065       Res = DAG.getNode(ISD::ADD, DL, VT,
10066                         V,
10067                         DAG.getNode(ISD::SHL, DL, VT,
10068                                     V,
10069                                     DAG.getConstant(Log2_32(MulAmt - 1), DL,
10070                                                     MVT::i32)));
10071     } else if (isPowerOf2_32(MulAmt + 1)) {
10072       // (mul x, 2^N - 1) => (sub (shl x, N), x)
10073       Res = DAG.getNode(ISD::SUB, DL, VT,
10074                         DAG.getNode(ISD::SHL, DL, VT,
10075                                     V,
10076                                     DAG.getConstant(Log2_32(MulAmt + 1), DL,
10077                                                     MVT::i32)),
10078                         V);
10079     } else
10080       return SDValue();
10081   } else {
10082     uint64_t MulAmtAbs = -MulAmt;
10083     if (isPowerOf2_32(MulAmtAbs + 1)) {
10084       // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
10085       Res = DAG.getNode(ISD::SUB, DL, VT,
10086                         V,
10087                         DAG.getNode(ISD::SHL, DL, VT,
10088                                     V,
10089                                     DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
10090                                                     MVT::i32)));
10091     } else if (isPowerOf2_32(MulAmtAbs - 1)) {
10092       // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
10093       Res = DAG.getNode(ISD::ADD, DL, VT,
10094                         V,
10095                         DAG.getNode(ISD::SHL, DL, VT,
10096                                     V,
10097                                     DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
10098                                                     MVT::i32)));
10099       Res = DAG.getNode(ISD::SUB, DL, VT,
10100                         DAG.getConstant(0, DL, MVT::i32), Res);
10101 
10102     } else
10103       return SDValue();
10104   }
10105 
10106   if (ShiftAmt != 0)
10107     Res = DAG.getNode(ISD::SHL, DL, VT,
10108                       Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
10109 
10110   // Do not add new nodes to DAG combiner worklist.
10111   DCI.CombineTo(N, Res, false);
10112   return SDValue();
10113 }
10114 
10115 static SDValue PerformANDCombine(SDNode *N,
10116                                  TargetLowering::DAGCombinerInfo &DCI,
10117                                  const ARMSubtarget *Subtarget) {
10118   // Attempt to use immediate-form VBIC
10119   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
10120   SDLoc dl(N);
10121   EVT VT = N->getValueType(0);
10122   SelectionDAG &DAG = DCI.DAG;
10123 
10124   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
10125     return SDValue();
10126 
10127   APInt SplatBits, SplatUndef;
10128   unsigned SplatBitSize;
10129   bool HasAnyUndefs;
10130   if (BVN &&
10131       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
10132     if (SplatBitSize <= 64) {
10133       EVT VbicVT;
10134       SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
10135                                       SplatUndef.getZExtValue(), SplatBitSize,
10136                                       DAG, dl, VbicVT, VT.is128BitVector(),
10137                                       OtherModImm);
10138       if (Val.getNode()) {
10139         SDValue Input =
10140           DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
10141         SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
10142         return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
10143       }
10144     }
10145   }
10146 
10147   if (!Subtarget->isThumb1Only()) {
10148     // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
10149     if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
10150       return Result;
10151   }
10152 
10153   return SDValue();
10154 }
10155 
10156 // Try combining OR nodes to SMULWB, SMULWT.
10157 static SDValue PerformORCombineToSMULWBT(SDNode *OR,
10158                                          TargetLowering::DAGCombinerInfo &DCI,
10159                                          const ARMSubtarget *Subtarget) {
10160   if (!Subtarget->hasV6Ops() ||
10161       (Subtarget->isThumb() &&
10162        (!Subtarget->hasThumb2() || !Subtarget->hasDSP())))
10163     return SDValue();
10164 
10165   SDValue SRL = OR->getOperand(0);
10166   SDValue SHL = OR->getOperand(1);
10167 
10168   if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) {
10169     SRL = OR->getOperand(1);
10170     SHL = OR->getOperand(0);
10171   }
10172   if (!isSRL16(SRL) || !isSHL16(SHL))
10173     return SDValue();
10174 
10175   // The first operands to the shifts need to be the two results from the
10176   // same smul_lohi node.
10177   if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
10178        SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI)
10179     return SDValue();
10180 
10181   SDNode *SMULLOHI = SRL.getOperand(0).getNode();
10182   if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) ||
10183       SHL.getOperand(0) != SDValue(SMULLOHI, 1))
10184     return SDValue();
10185 
10186   // Now we have:
10187   // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
10188   // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
10189   // For SMUWB the 16-bit value will signed extended somehow.
10190   // For SMULWT only the SRA is required.
10191   // Check both sides of SMUL_LOHI
10192   SDValue OpS16 = SMULLOHI->getOperand(0);
10193   SDValue OpS32 = SMULLOHI->getOperand(1);
10194 
10195   SelectionDAG &DAG = DCI.DAG;
10196   if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) {
10197     OpS16 = OpS32;
10198     OpS32 = SMULLOHI->getOperand(0);
10199   }
10200 
10201   SDLoc dl(OR);
10202   unsigned Opcode = 0;
10203   if (isS16(OpS16, DAG))
10204     Opcode = ARMISD::SMULWB;
10205   else if (isSRA16(OpS16)) {
10206     Opcode = ARMISD::SMULWT;
10207     OpS16 = OpS16->getOperand(0);
10208   }
10209   else
10210     return SDValue();
10211 
10212   SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16);
10213   DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res);
10214   return SDValue(OR, 0);
10215 }
10216 
10217 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
10218 static SDValue PerformORCombine(SDNode *N,
10219                                 TargetLowering::DAGCombinerInfo &DCI,
10220                                 const ARMSubtarget *Subtarget) {
10221   // Attempt to use immediate-form VORR
10222   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
10223   SDLoc dl(N);
10224   EVT VT = N->getValueType(0);
10225   SelectionDAG &DAG = DCI.DAG;
10226 
10227   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
10228     return SDValue();
10229 
10230   APInt SplatBits, SplatUndef;
10231   unsigned SplatBitSize;
10232   bool HasAnyUndefs;
10233   if (BVN && Subtarget->hasNEON() &&
10234       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
10235     if (SplatBitSize <= 64) {
10236       EVT VorrVT;
10237       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
10238                                       SplatUndef.getZExtValue(), SplatBitSize,
10239                                       DAG, dl, VorrVT, VT.is128BitVector(),
10240                                       OtherModImm);
10241       if (Val.getNode()) {
10242         SDValue Input =
10243           DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
10244         SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
10245         return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
10246       }
10247     }
10248   }
10249 
10250   if (!Subtarget->isThumb1Only()) {
10251     // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
10252     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
10253       return Result;
10254     if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget))
10255       return Result;
10256   }
10257 
10258   // The code below optimizes (or (and X, Y), Z).
10259   // The AND operand needs to have a single user to make these optimizations
10260   // profitable.
10261   SDValue N0 = N->getOperand(0);
10262   if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
10263     return SDValue();
10264   SDValue N1 = N->getOperand(1);
10265 
10266   // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
10267   if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
10268       DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
10269     APInt SplatUndef;
10270     unsigned SplatBitSize;
10271     bool HasAnyUndefs;
10272 
10273     APInt SplatBits0, SplatBits1;
10274     BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
10275     BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
10276     // Ensure that the second operand of both ands are constants
10277     if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
10278                                       HasAnyUndefs) && !HasAnyUndefs) {
10279         if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
10280                                           HasAnyUndefs) && !HasAnyUndefs) {
10281             // Ensure that the bit width of the constants are the same and that
10282             // the splat arguments are logical inverses as per the pattern we
10283             // are trying to simplify.
10284             if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
10285                 SplatBits0 == ~SplatBits1) {
10286                 // Canonicalize the vector type to make instruction selection
10287                 // simpler.
10288                 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
10289                 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
10290                                              N0->getOperand(1),
10291                                              N0->getOperand(0),
10292                                              N1->getOperand(0));
10293                 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
10294             }
10295         }
10296     }
10297   }
10298 
10299   // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
10300   // reasonable.
10301 
10302   // BFI is only available on V6T2+
10303   if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
10304     return SDValue();
10305 
10306   SDLoc DL(N);
10307   // 1) or (and A, mask), val => ARMbfi A, val, mask
10308   //      iff (val & mask) == val
10309   //
10310   // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
10311   //  2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
10312   //          && mask == ~mask2
10313   //  2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
10314   //          && ~mask == mask2
10315   //  (i.e., copy a bitfield value into another bitfield of the same width)
10316 
10317   if (VT != MVT::i32)
10318     return SDValue();
10319 
10320   SDValue N00 = N0.getOperand(0);
10321 
10322   // The value and the mask need to be constants so we can verify this is
10323   // actually a bitfield set. If the mask is 0xffff, we can do better
10324   // via a movt instruction, so don't use BFI in that case.
10325   SDValue MaskOp = N0.getOperand(1);
10326   ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
10327   if (!MaskC)
10328     return SDValue();
10329   unsigned Mask = MaskC->getZExtValue();
10330   if (Mask == 0xffff)
10331     return SDValue();
10332   SDValue Res;
10333   // Case (1): or (and A, mask), val => ARMbfi A, val, mask
10334   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
10335   if (N1C) {
10336     unsigned Val = N1C->getZExtValue();
10337     if ((Val & ~Mask) != Val)
10338       return SDValue();
10339 
10340     if (ARM::isBitFieldInvertedMask(Mask)) {
10341       Val >>= countTrailingZeros(~Mask);
10342 
10343       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
10344                         DAG.getConstant(Val, DL, MVT::i32),
10345                         DAG.getConstant(Mask, DL, MVT::i32));
10346 
10347       // Do not add new nodes to DAG combiner worklist.
10348       DCI.CombineTo(N, Res, false);
10349       return SDValue();
10350     }
10351   } else if (N1.getOpcode() == ISD::AND) {
10352     // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
10353     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
10354     if (!N11C)
10355       return SDValue();
10356     unsigned Mask2 = N11C->getZExtValue();
10357 
10358     // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
10359     // as is to match.
10360     if (ARM::isBitFieldInvertedMask(Mask) &&
10361         (Mask == ~Mask2)) {
10362       // The pack halfword instruction works better for masks that fit it,
10363       // so use that when it's available.
10364       if (Subtarget->hasDSP() &&
10365           (Mask == 0xffff || Mask == 0xffff0000))
10366         return SDValue();
10367       // 2a
10368       unsigned amt = countTrailingZeros(Mask2);
10369       Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
10370                         DAG.getConstant(amt, DL, MVT::i32));
10371       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
10372                         DAG.getConstant(Mask, DL, MVT::i32));
10373       // Do not add new nodes to DAG combiner worklist.
10374       DCI.CombineTo(N, Res, false);
10375       return SDValue();
10376     } else if (ARM::isBitFieldInvertedMask(~Mask) &&
10377                (~Mask == Mask2)) {
10378       // The pack halfword instruction works better for masks that fit it,
10379       // so use that when it's available.
10380       if (Subtarget->hasDSP() &&
10381           (Mask2 == 0xffff || Mask2 == 0xffff0000))
10382         return SDValue();
10383       // 2b
10384       unsigned lsb = countTrailingZeros(Mask);
10385       Res = DAG.getNode(ISD::SRL, DL, VT, N00,
10386                         DAG.getConstant(lsb, DL, MVT::i32));
10387       Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
10388                         DAG.getConstant(Mask2, DL, MVT::i32));
10389       // Do not add new nodes to DAG combiner worklist.
10390       DCI.CombineTo(N, Res, false);
10391       return SDValue();
10392     }
10393   }
10394 
10395   if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
10396       N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
10397       ARM::isBitFieldInvertedMask(~Mask)) {
10398     // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
10399     // where lsb(mask) == #shamt and masked bits of B are known zero.
10400     SDValue ShAmt = N00.getOperand(1);
10401     unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
10402     unsigned LSB = countTrailingZeros(Mask);
10403     if (ShAmtC != LSB)
10404       return SDValue();
10405 
10406     Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
10407                       DAG.getConstant(~Mask, DL, MVT::i32));
10408 
10409     // Do not add new nodes to DAG combiner worklist.
10410     DCI.CombineTo(N, Res, false);
10411   }
10412 
10413   return SDValue();
10414 }
10415 
10416 static SDValue PerformXORCombine(SDNode *N,
10417                                  TargetLowering::DAGCombinerInfo &DCI,
10418                                  const ARMSubtarget *Subtarget) {
10419   EVT VT = N->getValueType(0);
10420   SelectionDAG &DAG = DCI.DAG;
10421 
10422   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
10423     return SDValue();
10424 
10425   if (!Subtarget->isThumb1Only()) {
10426     // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
10427     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
10428       return Result;
10429   }
10430 
10431   return SDValue();
10432 }
10433 
10434 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
10435 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
10436 // their position in "to" (Rd).
10437 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
10438   assert(N->getOpcode() == ARMISD::BFI);
10439 
10440   SDValue From = N->getOperand(1);
10441   ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
10442   FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation());
10443 
10444   // If the Base came from a SHR #C, we can deduce that it is really testing bit
10445   // #C in the base of the SHR.
10446   if (From->getOpcode() == ISD::SRL &&
10447       isa<ConstantSDNode>(From->getOperand(1))) {
10448     APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
10449     assert(Shift.getLimitedValue() < 32 && "Shift too large!");
10450     FromMask <<= Shift.getLimitedValue(31);
10451     From = From->getOperand(0);
10452   }
10453 
10454   return From;
10455 }
10456 
10457 // If A and B contain one contiguous set of bits, does A | B == A . B?
10458 //
10459 // Neither A nor B must be zero.
10460 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
10461   unsigned LastActiveBitInA =  A.countTrailingZeros();
10462   unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1;
10463   return LastActiveBitInA - 1 == FirstActiveBitInB;
10464 }
10465 
10466 static SDValue FindBFIToCombineWith(SDNode *N) {
10467   // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
10468   // if one exists.
10469   APInt ToMask, FromMask;
10470   SDValue From = ParseBFI(N, ToMask, FromMask);
10471   SDValue To = N->getOperand(0);
10472 
10473   // Now check for a compatible BFI to merge with. We can pass through BFIs that
10474   // aren't compatible, but not if they set the same bit in their destination as
10475   // we do (or that of any BFI we're going to combine with).
10476   SDValue V = To;
10477   APInt CombinedToMask = ToMask;
10478   while (V.getOpcode() == ARMISD::BFI) {
10479     APInt NewToMask, NewFromMask;
10480     SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
10481     if (NewFrom != From) {
10482       // This BFI has a different base. Keep going.
10483       CombinedToMask |= NewToMask;
10484       V = V.getOperand(0);
10485       continue;
10486     }
10487 
10488     // Do the written bits conflict with any we've seen so far?
10489     if ((NewToMask & CombinedToMask).getBoolValue())
10490       // Conflicting bits - bail out because going further is unsafe.
10491       return SDValue();
10492 
10493     // Are the new bits contiguous when combined with the old bits?
10494     if (BitsProperlyConcatenate(ToMask, NewToMask) &&
10495         BitsProperlyConcatenate(FromMask, NewFromMask))
10496       return V;
10497     if (BitsProperlyConcatenate(NewToMask, ToMask) &&
10498         BitsProperlyConcatenate(NewFromMask, FromMask))
10499       return V;
10500 
10501     // We've seen a write to some bits, so track it.
10502     CombinedToMask |= NewToMask;
10503     // Keep going...
10504     V = V.getOperand(0);
10505   }
10506 
10507   return SDValue();
10508 }
10509 
10510 static SDValue PerformBFICombine(SDNode *N,
10511                                  TargetLowering::DAGCombinerInfo &DCI) {
10512   SDValue N1 = N->getOperand(1);
10513   if (N1.getOpcode() == ISD::AND) {
10514     // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
10515     // the bits being cleared by the AND are not demanded by the BFI.
10516     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
10517     if (!N11C)
10518       return SDValue();
10519     unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
10520     unsigned LSB = countTrailingZeros(~InvMask);
10521     unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
10522     assert(Width <
10523                static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
10524            "undefined behavior");
10525     unsigned Mask = (1u << Width) - 1;
10526     unsigned Mask2 = N11C->getZExtValue();
10527     if ((Mask & (~Mask2)) == 0)
10528       return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
10529                              N->getOperand(0), N1.getOperand(0),
10530                              N->getOperand(2));
10531   } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
10532     // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
10533     // Keep track of any consecutive bits set that all come from the same base
10534     // value. We can combine these together into a single BFI.
10535     SDValue CombineBFI = FindBFIToCombineWith(N);
10536     if (CombineBFI == SDValue())
10537       return SDValue();
10538 
10539     // We've found a BFI.
10540     APInt ToMask1, FromMask1;
10541     SDValue From1 = ParseBFI(N, ToMask1, FromMask1);
10542 
10543     APInt ToMask2, FromMask2;
10544     SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
10545     assert(From1 == From2);
10546     (void)From2;
10547 
10548     // First, unlink CombineBFI.
10549     DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0));
10550     // Then create a new BFI, combining the two together.
10551     APInt NewFromMask = FromMask1 | FromMask2;
10552     APInt NewToMask = ToMask1 | ToMask2;
10553 
10554     EVT VT = N->getValueType(0);
10555     SDLoc dl(N);
10556 
10557     if (NewFromMask[0] == 0)
10558       From1 = DCI.DAG.getNode(
10559         ISD::SRL, dl, VT, From1,
10560         DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT));
10561     return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1,
10562                            DCI.DAG.getConstant(~NewToMask, dl, VT));
10563   }
10564   return SDValue();
10565 }
10566 
10567 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
10568 /// ARMISD::VMOVRRD.
10569 static SDValue PerformVMOVRRDCombine(SDNode *N,
10570                                      TargetLowering::DAGCombinerInfo &DCI,
10571                                      const ARMSubtarget *Subtarget) {
10572   // vmovrrd(vmovdrr x, y) -> x,y
10573   SDValue InDouble = N->getOperand(0);
10574   if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP())
10575     return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
10576 
10577   // vmovrrd(load f64) -> (load i32), (load i32)
10578   SDNode *InNode = InDouble.getNode();
10579   if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
10580       InNode->getValueType(0) == MVT::f64 &&
10581       InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
10582       !cast<LoadSDNode>(InNode)->isVolatile()) {
10583     // TODO: Should this be done for non-FrameIndex operands?
10584     LoadSDNode *LD = cast<LoadSDNode>(InNode);
10585 
10586     SelectionDAG &DAG = DCI.DAG;
10587     SDLoc DL(LD);
10588     SDValue BasePtr = LD->getBasePtr();
10589     SDValue NewLD1 =
10590         DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
10591                     LD->getAlignment(), LD->getMemOperand()->getFlags());
10592 
10593     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
10594                                     DAG.getConstant(4, DL, MVT::i32));
10595     SDValue NewLD2 = DAG.getLoad(
10596         MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(),
10597         std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags());
10598 
10599     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
10600     if (DCI.DAG.getDataLayout().isBigEndian())
10601       std::swap (NewLD1, NewLD2);
10602     SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
10603     return Result;
10604   }
10605 
10606   return SDValue();
10607 }
10608 
10609 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
10610 /// ARMISD::VMOVDRR.  This is also used for BUILD_VECTORs with 2 operands.
10611 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
10612   // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
10613   SDValue Op0 = N->getOperand(0);
10614   SDValue Op1 = N->getOperand(1);
10615   if (Op0.getOpcode() == ISD::BITCAST)
10616     Op0 = Op0.getOperand(0);
10617   if (Op1.getOpcode() == ISD::BITCAST)
10618     Op1 = Op1.getOperand(0);
10619   if (Op0.getOpcode() == ARMISD::VMOVRRD &&
10620       Op0.getNode() == Op1.getNode() &&
10621       Op0.getResNo() == 0 && Op1.getResNo() == 1)
10622     return DAG.getNode(ISD::BITCAST, SDLoc(N),
10623                        N->getValueType(0), Op0.getOperand(0));
10624   return SDValue();
10625 }
10626 
10627 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
10628 /// are normal, non-volatile loads.  If so, it is profitable to bitcast an
10629 /// i64 vector to have f64 elements, since the value can then be loaded
10630 /// directly into a VFP register.
10631 static bool hasNormalLoadOperand(SDNode *N) {
10632   unsigned NumElts = N->getValueType(0).getVectorNumElements();
10633   for (unsigned i = 0; i < NumElts; ++i) {
10634     SDNode *Elt = N->getOperand(i).getNode();
10635     if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
10636       return true;
10637   }
10638   return false;
10639 }
10640 
10641 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
10642 /// ISD::BUILD_VECTOR.
10643 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
10644                                           TargetLowering::DAGCombinerInfo &DCI,
10645                                           const ARMSubtarget *Subtarget) {
10646   // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
10647   // VMOVRRD is introduced when legalizing i64 types.  It forces the i64 value
10648   // into a pair of GPRs, which is fine when the value is used as a scalar,
10649   // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
10650   SelectionDAG &DAG = DCI.DAG;
10651   if (N->getNumOperands() == 2)
10652     if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
10653       return RV;
10654 
10655   // Load i64 elements as f64 values so that type legalization does not split
10656   // them up into i32 values.
10657   EVT VT = N->getValueType(0);
10658   if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
10659     return SDValue();
10660   SDLoc dl(N);
10661   SmallVector<SDValue, 8> Ops;
10662   unsigned NumElts = VT.getVectorNumElements();
10663   for (unsigned i = 0; i < NumElts; ++i) {
10664     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
10665     Ops.push_back(V);
10666     // Make the DAGCombiner fold the bitcast.
10667     DCI.AddToWorklist(V.getNode());
10668   }
10669   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
10670   SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
10671   return DAG.getNode(ISD::BITCAST, dl, VT, BV);
10672 }
10673 
10674 /// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
10675 static SDValue
10676 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
10677   // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
10678   // At that time, we may have inserted bitcasts from integer to float.
10679   // If these bitcasts have survived DAGCombine, change the lowering of this
10680   // BUILD_VECTOR in something more vector friendly, i.e., that does not
10681   // force to use floating point types.
10682 
10683   // Make sure we can change the type of the vector.
10684   // This is possible iff:
10685   // 1. The vector is only used in a bitcast to a integer type. I.e.,
10686   //    1.1. Vector is used only once.
10687   //    1.2. Use is a bit convert to an integer type.
10688   // 2. The size of its operands are 32-bits (64-bits are not legal).
10689   EVT VT = N->getValueType(0);
10690   EVT EltVT = VT.getVectorElementType();
10691 
10692   // Check 1.1. and 2.
10693   if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
10694     return SDValue();
10695 
10696   // By construction, the input type must be float.
10697   assert(EltVT == MVT::f32 && "Unexpected type!");
10698 
10699   // Check 1.2.
10700   SDNode *Use = *N->use_begin();
10701   if (Use->getOpcode() != ISD::BITCAST ||
10702       Use->getValueType(0).isFloatingPoint())
10703     return SDValue();
10704 
10705   // Check profitability.
10706   // Model is, if more than half of the relevant operands are bitcast from
10707   // i32, turn the build_vector into a sequence of insert_vector_elt.
10708   // Relevant operands are everything that is not statically
10709   // (i.e., at compile time) bitcasted.
10710   unsigned NumOfBitCastedElts = 0;
10711   unsigned NumElts = VT.getVectorNumElements();
10712   unsigned NumOfRelevantElts = NumElts;
10713   for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
10714     SDValue Elt = N->getOperand(Idx);
10715     if (Elt->getOpcode() == ISD::BITCAST) {
10716       // Assume only bit cast to i32 will go away.
10717       if (Elt->getOperand(0).getValueType() == MVT::i32)
10718         ++NumOfBitCastedElts;
10719     } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
10720       // Constants are statically casted, thus do not count them as
10721       // relevant operands.
10722       --NumOfRelevantElts;
10723   }
10724 
10725   // Check if more than half of the elements require a non-free bitcast.
10726   if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
10727     return SDValue();
10728 
10729   SelectionDAG &DAG = DCI.DAG;
10730   // Create the new vector type.
10731   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
10732   // Check if the type is legal.
10733   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10734   if (!TLI.isTypeLegal(VecVT))
10735     return SDValue();
10736 
10737   // Combine:
10738   // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
10739   // => BITCAST INSERT_VECTOR_ELT
10740   //                      (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
10741   //                      (BITCAST EN), N.
10742   SDValue Vec = DAG.getUNDEF(VecVT);
10743   SDLoc dl(N);
10744   for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
10745     SDValue V = N->getOperand(Idx);
10746     if (V.isUndef())
10747       continue;
10748     if (V.getOpcode() == ISD::BITCAST &&
10749         V->getOperand(0).getValueType() == MVT::i32)
10750       // Fold obvious case.
10751       V = V.getOperand(0);
10752     else {
10753       V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
10754       // Make the DAGCombiner fold the bitcasts.
10755       DCI.AddToWorklist(V.getNode());
10756     }
10757     SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
10758     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
10759   }
10760   Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
10761   // Make the DAGCombiner fold the bitcasts.
10762   DCI.AddToWorklist(Vec.getNode());
10763   return Vec;
10764 }
10765 
10766 /// PerformInsertEltCombine - Target-specific dag combine xforms for
10767 /// ISD::INSERT_VECTOR_ELT.
10768 static SDValue PerformInsertEltCombine(SDNode *N,
10769                                        TargetLowering::DAGCombinerInfo &DCI) {
10770   // Bitcast an i64 load inserted into a vector to f64.
10771   // Otherwise, the i64 value will be legalized to a pair of i32 values.
10772   EVT VT = N->getValueType(0);
10773   SDNode *Elt = N->getOperand(1).getNode();
10774   if (VT.getVectorElementType() != MVT::i64 ||
10775       !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
10776     return SDValue();
10777 
10778   SelectionDAG &DAG = DCI.DAG;
10779   SDLoc dl(N);
10780   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
10781                                  VT.getVectorNumElements());
10782   SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
10783   SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
10784   // Make the DAGCombiner fold the bitcasts.
10785   DCI.AddToWorklist(Vec.getNode());
10786   DCI.AddToWorklist(V.getNode());
10787   SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
10788                                Vec, V, N->getOperand(2));
10789   return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
10790 }
10791 
10792 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
10793 /// ISD::VECTOR_SHUFFLE.
10794 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
10795   // The LLVM shufflevector instruction does not require the shuffle mask
10796   // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
10797   // have that requirement.  When translating to ISD::VECTOR_SHUFFLE, if the
10798   // operands do not match the mask length, they are extended by concatenating
10799   // them with undef vectors.  That is probably the right thing for other
10800   // targets, but for NEON it is better to concatenate two double-register
10801   // size vector operands into a single quad-register size vector.  Do that
10802   // transformation here:
10803   //   shuffle(concat(v1, undef), concat(v2, undef)) ->
10804   //   shuffle(concat(v1, v2), undef)
10805   SDValue Op0 = N->getOperand(0);
10806   SDValue Op1 = N->getOperand(1);
10807   if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
10808       Op1.getOpcode() != ISD::CONCAT_VECTORS ||
10809       Op0.getNumOperands() != 2 ||
10810       Op1.getNumOperands() != 2)
10811     return SDValue();
10812   SDValue Concat0Op1 = Op0.getOperand(1);
10813   SDValue Concat1Op1 = Op1.getOperand(1);
10814   if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
10815     return SDValue();
10816   // Skip the transformation if any of the types are illegal.
10817   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10818   EVT VT = N->getValueType(0);
10819   if (!TLI.isTypeLegal(VT) ||
10820       !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
10821       !TLI.isTypeLegal(Concat1Op1.getValueType()))
10822     return SDValue();
10823 
10824   SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
10825                                   Op0.getOperand(0), Op1.getOperand(0));
10826   // Translate the shuffle mask.
10827   SmallVector<int, 16> NewMask;
10828   unsigned NumElts = VT.getVectorNumElements();
10829   unsigned HalfElts = NumElts/2;
10830   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
10831   for (unsigned n = 0; n < NumElts; ++n) {
10832     int MaskElt = SVN->getMaskElt(n);
10833     int NewElt = -1;
10834     if (MaskElt < (int)HalfElts)
10835       NewElt = MaskElt;
10836     else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
10837       NewElt = HalfElts + MaskElt - NumElts;
10838     NewMask.push_back(NewElt);
10839   }
10840   return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
10841                               DAG.getUNDEF(VT), NewMask);
10842 }
10843 
10844 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
10845 /// NEON load/store intrinsics, and generic vector load/stores, to merge
10846 /// base address updates.
10847 /// For generic load/stores, the memory type is assumed to be a vector.
10848 /// The caller is assumed to have checked legality.
10849 static SDValue CombineBaseUpdate(SDNode *N,
10850                                  TargetLowering::DAGCombinerInfo &DCI) {
10851   SelectionDAG &DAG = DCI.DAG;
10852   const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
10853                             N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
10854   const bool isStore = N->getOpcode() == ISD::STORE;
10855   const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
10856   SDValue Addr = N->getOperand(AddrOpIdx);
10857   MemSDNode *MemN = cast<MemSDNode>(N);
10858   SDLoc dl(N);
10859 
10860   // Search for a use of the address operand that is an increment.
10861   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
10862          UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
10863     SDNode *User = *UI;
10864     if (User->getOpcode() != ISD::ADD ||
10865         UI.getUse().getResNo() != Addr.getResNo())
10866       continue;
10867 
10868     // Check that the add is independent of the load/store.  Otherwise, folding
10869     // it would create a cycle.
10870     if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
10871       continue;
10872 
10873     // Find the new opcode for the updating load/store.
10874     bool isLoadOp = true;
10875     bool isLaneOp = false;
10876     unsigned NewOpc = 0;
10877     unsigned NumVecs = 0;
10878     if (isIntrinsic) {
10879       unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10880       switch (IntNo) {
10881       default: llvm_unreachable("unexpected intrinsic for Neon base update");
10882       case Intrinsic::arm_neon_vld1:     NewOpc = ARMISD::VLD1_UPD;
10883         NumVecs = 1; break;
10884       case Intrinsic::arm_neon_vld2:     NewOpc = ARMISD::VLD2_UPD;
10885         NumVecs = 2; break;
10886       case Intrinsic::arm_neon_vld3:     NewOpc = ARMISD::VLD3_UPD;
10887         NumVecs = 3; break;
10888       case Intrinsic::arm_neon_vld4:     NewOpc = ARMISD::VLD4_UPD;
10889         NumVecs = 4; break;
10890       case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
10891         NumVecs = 2; isLaneOp = true; break;
10892       case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
10893         NumVecs = 3; isLaneOp = true; break;
10894       case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
10895         NumVecs = 4; isLaneOp = true; break;
10896       case Intrinsic::arm_neon_vst1:     NewOpc = ARMISD::VST1_UPD;
10897         NumVecs = 1; isLoadOp = false; break;
10898       case Intrinsic::arm_neon_vst2:     NewOpc = ARMISD::VST2_UPD;
10899         NumVecs = 2; isLoadOp = false; break;
10900       case Intrinsic::arm_neon_vst3:     NewOpc = ARMISD::VST3_UPD;
10901         NumVecs = 3; isLoadOp = false; break;
10902       case Intrinsic::arm_neon_vst4:     NewOpc = ARMISD::VST4_UPD;
10903         NumVecs = 4; isLoadOp = false; break;
10904       case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
10905         NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
10906       case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
10907         NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
10908       case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
10909         NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
10910       }
10911     } else {
10912       isLaneOp = true;
10913       switch (N->getOpcode()) {
10914       default: llvm_unreachable("unexpected opcode for Neon base update");
10915       case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break;
10916       case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
10917       case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
10918       case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
10919       case ISD::LOAD:       NewOpc = ARMISD::VLD1_UPD;
10920         NumVecs = 1; isLaneOp = false; break;
10921       case ISD::STORE:      NewOpc = ARMISD::VST1_UPD;
10922         NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
10923       }
10924     }
10925 
10926     // Find the size of memory referenced by the load/store.
10927     EVT VecTy;
10928     if (isLoadOp) {
10929       VecTy = N->getValueType(0);
10930     } else if (isIntrinsic) {
10931       VecTy = N->getOperand(AddrOpIdx+1).getValueType();
10932     } else {
10933       assert(isStore && "Node has to be a load, a store, or an intrinsic!");
10934       VecTy = N->getOperand(1).getValueType();
10935     }
10936 
10937     unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
10938     if (isLaneOp)
10939       NumBytes /= VecTy.getVectorNumElements();
10940 
10941     // If the increment is a constant, it must match the memory ref size.
10942     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
10943     ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
10944     if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) {
10945       // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
10946       // separate instructions that make it harder to use a non-constant update.
10947       continue;
10948     }
10949 
10950     // OK, we found an ADD we can fold into the base update.
10951     // Now, create a _UPD node, taking care of not breaking alignment.
10952 
10953     EVT AlignedVecTy = VecTy;
10954     unsigned Alignment = MemN->getAlignment();
10955 
10956     // If this is a less-than-standard-aligned load/store, change the type to
10957     // match the standard alignment.
10958     // The alignment is overlooked when selecting _UPD variants; and it's
10959     // easier to introduce bitcasts here than fix that.
10960     // There are 3 ways to get to this base-update combine:
10961     // - intrinsics: they are assumed to be properly aligned (to the standard
10962     //   alignment of the memory type), so we don't need to do anything.
10963     // - ARMISD::VLDx nodes: they are only generated from the aforementioned
10964     //   intrinsics, so, likewise, there's nothing to do.
10965     // - generic load/store instructions: the alignment is specified as an
10966     //   explicit operand, rather than implicitly as the standard alignment
10967     //   of the memory type (like the intrisics).  We need to change the
10968     //   memory type to match the explicit alignment.  That way, we don't
10969     //   generate non-standard-aligned ARMISD::VLDx nodes.
10970     if (isa<LSBaseSDNode>(N)) {
10971       if (Alignment == 0)
10972         Alignment = 1;
10973       if (Alignment < VecTy.getScalarSizeInBits() / 8) {
10974         MVT EltTy = MVT::getIntegerVT(Alignment * 8);
10975         assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
10976         assert(!isLaneOp && "Unexpected generic load/store lane.");
10977         unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
10978         AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
10979       }
10980       // Don't set an explicit alignment on regular load/stores that we want
10981       // to transform to VLD/VST 1_UPD nodes.
10982       // This matches the behavior of regular load/stores, which only get an
10983       // explicit alignment if the MMO alignment is larger than the standard
10984       // alignment of the memory type.
10985       // Intrinsics, however, always get an explicit alignment, set to the
10986       // alignment of the MMO.
10987       Alignment = 1;
10988     }
10989 
10990     // Create the new updating load/store node.
10991     // First, create an SDVTList for the new updating node's results.
10992     EVT Tys[6];
10993     unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
10994     unsigned n;
10995     for (n = 0; n < NumResultVecs; ++n)
10996       Tys[n] = AlignedVecTy;
10997     Tys[n++] = MVT::i32;
10998     Tys[n] = MVT::Other;
10999     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
11000 
11001     // Then, gather the new node's operands.
11002     SmallVector<SDValue, 8> Ops;
11003     Ops.push_back(N->getOperand(0)); // incoming chain
11004     Ops.push_back(N->getOperand(AddrOpIdx));
11005     Ops.push_back(Inc);
11006 
11007     if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
11008       // Try to match the intrinsic's signature
11009       Ops.push_back(StN->getValue());
11010     } else {
11011       // Loads (and of course intrinsics) match the intrinsics' signature,
11012       // so just add all but the alignment operand.
11013       for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
11014         Ops.push_back(N->getOperand(i));
11015     }
11016 
11017     // For all node types, the alignment operand is always the last one.
11018     Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));
11019 
11020     // If this is a non-standard-aligned STORE, the penultimate operand is the
11021     // stored value.  Bitcast it to the aligned type.
11022     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
11023       SDValue &StVal = Ops[Ops.size()-2];
11024       StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
11025     }
11026 
11027     EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
11028     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
11029                                            MemN->getMemOperand());
11030 
11031     // Update the uses.
11032     SmallVector<SDValue, 5> NewResults;
11033     for (unsigned i = 0; i < NumResultVecs; ++i)
11034       NewResults.push_back(SDValue(UpdN.getNode(), i));
11035 
11036     // If this is an non-standard-aligned LOAD, the first result is the loaded
11037     // value.  Bitcast it to the expected result type.
11038     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
11039       SDValue &LdVal = NewResults[0];
11040       LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
11041     }
11042 
11043     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
11044     DCI.CombineTo(N, NewResults);
11045     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
11046 
11047     break;
11048   }
11049   return SDValue();
11050 }
11051 
11052 static SDValue PerformVLDCombine(SDNode *N,
11053                                  TargetLowering::DAGCombinerInfo &DCI) {
11054   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
11055     return SDValue();
11056 
11057   return CombineBaseUpdate(N, DCI);
11058 }
11059 
11060 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
11061 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
11062 /// are also VDUPLANEs.  If so, combine them to a vldN-dup operation and
11063 /// return true.
11064 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
11065   SelectionDAG &DAG = DCI.DAG;
11066   EVT VT = N->getValueType(0);
11067   // vldN-dup instructions only support 64-bit vectors for N > 1.
11068   if (!VT.is64BitVector())
11069     return false;
11070 
11071   // Check if the VDUPLANE operand is a vldN-dup intrinsic.
11072   SDNode *VLD = N->getOperand(0).getNode();
11073   if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
11074     return false;
11075   unsigned NumVecs = 0;
11076   unsigned NewOpc = 0;
11077   unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
11078   if (IntNo == Intrinsic::arm_neon_vld2lane) {
11079     NumVecs = 2;
11080     NewOpc = ARMISD::VLD2DUP;
11081   } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
11082     NumVecs = 3;
11083     NewOpc = ARMISD::VLD3DUP;
11084   } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
11085     NumVecs = 4;
11086     NewOpc = ARMISD::VLD4DUP;
11087   } else {
11088     return false;
11089   }
11090 
11091   // First check that all the vldN-lane uses are VDUPLANEs and that the lane
11092   // numbers match the load.
11093   unsigned VLDLaneNo =
11094     cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
11095   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
11096        UI != UE; ++UI) {
11097     // Ignore uses of the chain result.
11098     if (UI.getUse().getResNo() == NumVecs)
11099       continue;
11100     SDNode *User = *UI;
11101     if (User->getOpcode() != ARMISD::VDUPLANE ||
11102         VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
11103       return false;
11104   }
11105 
11106   // Create the vldN-dup node.
11107   EVT Tys[5];
11108   unsigned n;
11109   for (n = 0; n < NumVecs; ++n)
11110     Tys[n] = VT;
11111   Tys[n] = MVT::Other;
11112   SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1));
11113   SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
11114   MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
11115   SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
11116                                            Ops, VLDMemInt->getMemoryVT(),
11117                                            VLDMemInt->getMemOperand());
11118 
11119   // Update the uses.
11120   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
11121        UI != UE; ++UI) {
11122     unsigned ResNo = UI.getUse().getResNo();
11123     // Ignore uses of the chain result.
11124     if (ResNo == NumVecs)
11125       continue;
11126     SDNode *User = *UI;
11127     DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
11128   }
11129 
11130   // Now the vldN-lane intrinsic is dead except for its chain result.
11131   // Update uses of the chain.
11132   std::vector<SDValue> VLDDupResults;
11133   for (unsigned n = 0; n < NumVecs; ++n)
11134     VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
11135   VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
11136   DCI.CombineTo(VLD, VLDDupResults);
11137 
11138   return true;
11139 }
11140 
11141 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
11142 /// ARMISD::VDUPLANE.
11143 static SDValue PerformVDUPLANECombine(SDNode *N,
11144                                       TargetLowering::DAGCombinerInfo &DCI) {
11145   SDValue Op = N->getOperand(0);
11146 
11147   // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
11148   // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
11149   if (CombineVLDDUP(N, DCI))
11150     return SDValue(N, 0);
11151 
11152   // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
11153   // redundant.  Ignore bit_converts for now; element sizes are checked below.
11154   while (Op.getOpcode() == ISD::BITCAST)
11155     Op = Op.getOperand(0);
11156   if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
11157     return SDValue();
11158 
11159   // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
11160   unsigned EltSize = Op.getScalarValueSizeInBits();
11161   // The canonical VMOV for a zero vector uses a 32-bit element size.
11162   unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
11163   unsigned EltBits;
11164   if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
11165     EltSize = 8;
11166   EVT VT = N->getValueType(0);
11167   if (EltSize > VT.getScalarSizeInBits())
11168     return SDValue();
11169 
11170   return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
11171 }
11172 
11173 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
11174 static SDValue PerformVDUPCombine(SDNode *N,
11175                                   TargetLowering::DAGCombinerInfo &DCI) {
11176   SelectionDAG &DAG = DCI.DAG;
11177   SDValue Op = N->getOperand(0);
11178 
11179   // Match VDUP(LOAD) -> VLD1DUP.
11180   // We match this pattern here rather than waiting for isel because the
11181   // transform is only legal for unindexed loads.
11182   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
11183   if (LD && Op.hasOneUse() && LD->isUnindexed() &&
11184       LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
11185     SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
11186                       DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) };
11187     SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
11188     SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys,
11189                                              Ops, LD->getMemoryVT(),
11190                                              LD->getMemOperand());
11191     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
11192     return VLDDup;
11193   }
11194 
11195   return SDValue();
11196 }
11197 
11198 static SDValue PerformLOADCombine(SDNode *N,
11199                                   TargetLowering::DAGCombinerInfo &DCI) {
11200   EVT VT = N->getValueType(0);
11201 
11202   // If this is a legal vector load, try to combine it into a VLD1_UPD.
11203   if (ISD::isNormalLoad(N) && VT.isVector() &&
11204       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
11205     return CombineBaseUpdate(N, DCI);
11206 
11207   return SDValue();
11208 }
11209 
11210 /// PerformSTORECombine - Target-specific dag combine xforms for
11211 /// ISD::STORE.
11212 static SDValue PerformSTORECombine(SDNode *N,
11213                                    TargetLowering::DAGCombinerInfo &DCI) {
11214   StoreSDNode *St = cast<StoreSDNode>(N);
11215   if (St->isVolatile())
11216     return SDValue();
11217 
11218   // Optimize trunc store (of multiple scalars) to shuffle and store.  First,
11219   // pack all of the elements in one place.  Next, store to memory in fewer
11220   // chunks.
11221   SDValue StVal = St->getValue();
11222   EVT VT = StVal.getValueType();
11223   if (St->isTruncatingStore() && VT.isVector()) {
11224     SelectionDAG &DAG = DCI.DAG;
11225     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11226     EVT StVT = St->getMemoryVT();
11227     unsigned NumElems = VT.getVectorNumElements();
11228     assert(StVT != VT && "Cannot truncate to the same type");
11229     unsigned FromEltSz = VT.getScalarSizeInBits();
11230     unsigned ToEltSz = StVT.getScalarSizeInBits();
11231 
11232     // From, To sizes and ElemCount must be pow of two
11233     if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
11234 
11235     // We are going to use the original vector elt for storing.
11236     // Accumulated smaller vector elements must be a multiple of the store size.
11237     if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue();
11238 
11239     unsigned SizeRatio  = FromEltSz / ToEltSz;
11240     assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
11241 
11242     // Create a type on which we perform the shuffle.
11243     EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
11244                                      NumElems*SizeRatio);
11245     assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
11246 
11247     SDLoc DL(St);
11248     SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
11249     SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
11250     for (unsigned i = 0; i < NumElems; ++i)
11251       ShuffleVec[i] = DAG.getDataLayout().isBigEndian()
11252                           ? (i + 1) * SizeRatio - 1
11253                           : i * SizeRatio;
11254 
11255     // Can't shuffle using an illegal type.
11256     if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
11257 
11258     SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec,
11259                                 DAG.getUNDEF(WideVec.getValueType()),
11260                                 ShuffleVec);
11261     // At this point all of the data is stored at the bottom of the
11262     // register. We now need to save it to mem.
11263 
11264     // Find the largest store unit
11265     MVT StoreType = MVT::i8;
11266     for (MVT Tp : MVT::integer_valuetypes()) {
11267       if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
11268         StoreType = Tp;
11269     }
11270     // Didn't find a legal store type.
11271     if (!TLI.isTypeLegal(StoreType))
11272       return SDValue();
11273 
11274     // Bitcast the original vector into a vector of store-size units
11275     EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
11276             StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
11277     assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
11278     SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
11279     SmallVector<SDValue, 8> Chains;
11280     SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
11281                                         TLI.getPointerTy(DAG.getDataLayout()));
11282     SDValue BasePtr = St->getBasePtr();
11283 
11284     // Perform one or more big stores into memory.
11285     unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits();
11286     for (unsigned I = 0; I < E; I++) {
11287       SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
11288                                    StoreType, ShuffWide,
11289                                    DAG.getIntPtrConstant(I, DL));
11290       SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
11291                                 St->getPointerInfo(), St->getAlignment(),
11292                                 St->getMemOperand()->getFlags());
11293       BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
11294                             Increment);
11295       Chains.push_back(Ch);
11296     }
11297     return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
11298   }
11299 
11300   if (!ISD::isNormalStore(St))
11301     return SDValue();
11302 
11303   // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
11304   // ARM stores of arguments in the same cache line.
11305   if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
11306       StVal.getNode()->hasOneUse()) {
11307     SelectionDAG  &DAG = DCI.DAG;
11308     bool isBigEndian = DAG.getDataLayout().isBigEndian();
11309     SDLoc DL(St);
11310     SDValue BasePtr = St->getBasePtr();
11311     SDValue NewST1 = DAG.getStore(
11312         St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
11313         BasePtr, St->getPointerInfo(), St->getAlignment(),
11314         St->getMemOperand()->getFlags());
11315 
11316     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
11317                                     DAG.getConstant(4, DL, MVT::i32));
11318     return DAG.getStore(NewST1.getValue(0), DL,
11319                         StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
11320                         OffsetPtr, St->getPointerInfo(),
11321                         std::min(4U, St->getAlignment() / 2),
11322                         St->getMemOperand()->getFlags());
11323   }
11324 
11325   if (StVal.getValueType() == MVT::i64 &&
11326       StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11327 
11328     // Bitcast an i64 store extracted from a vector to f64.
11329     // Otherwise, the i64 value will be legalized to a pair of i32 values.
11330     SelectionDAG &DAG = DCI.DAG;
11331     SDLoc dl(StVal);
11332     SDValue IntVec = StVal.getOperand(0);
11333     EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
11334                                    IntVec.getValueType().getVectorNumElements());
11335     SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
11336     SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
11337                                  Vec, StVal.getOperand(1));
11338     dl = SDLoc(N);
11339     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
11340     // Make the DAGCombiner fold the bitcasts.
11341     DCI.AddToWorklist(Vec.getNode());
11342     DCI.AddToWorklist(ExtElt.getNode());
11343     DCI.AddToWorklist(V.getNode());
11344     return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
11345                         St->getPointerInfo(), St->getAlignment(),
11346                         St->getMemOperand()->getFlags(), St->getAAInfo());
11347   }
11348 
11349   // If this is a legal vector store, try to combine it into a VST1_UPD.
11350   if (ISD::isNormalStore(N) && VT.isVector() &&
11351       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
11352     return CombineBaseUpdate(N, DCI);
11353 
11354   return SDValue();
11355 }
11356 
11357 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
11358 /// can replace combinations of VMUL and VCVT (floating-point to integer)
11359 /// when the VMUL has a constant operand that is a power of 2.
11360 ///
11361 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
11362 ///  vmul.f32        d16, d17, d16
11363 ///  vcvt.s32.f32    d16, d16
11364 /// becomes:
11365 ///  vcvt.s32.f32    d16, d16, #3
11366 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
11367                                   const ARMSubtarget *Subtarget) {
11368   if (!Subtarget->hasNEON())
11369     return SDValue();
11370 
11371   SDValue Op = N->getOperand(0);
11372   if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
11373       Op.getOpcode() != ISD::FMUL)
11374     return SDValue();
11375 
11376   SDValue ConstVec = Op->getOperand(1);
11377   if (!isa<BuildVectorSDNode>(ConstVec))
11378     return SDValue();
11379 
11380   MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
11381   uint32_t FloatBits = FloatTy.getSizeInBits();
11382   MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
11383   uint32_t IntBits = IntTy.getSizeInBits();
11384   unsigned NumLanes = Op.getValueType().getVectorNumElements();
11385   if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
11386     // These instructions only exist converting from f32 to i32. We can handle
11387     // smaller integers by generating an extra truncate, but larger ones would
11388     // be lossy. We also can't handle more then 4 lanes, since these intructions
11389     // only support v2i32/v4i32 types.
11390     return SDValue();
11391   }
11392 
11393   BitVector UndefElements;
11394   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
11395   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
11396   if (C == -1 || C == 0 || C > 32)
11397     return SDValue();
11398 
11399   SDLoc dl(N);
11400   bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
11401   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
11402     Intrinsic::arm_neon_vcvtfp2fxu;
11403   SDValue FixConv = DAG.getNode(
11404       ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
11405       DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
11406       DAG.getConstant(C, dl, MVT::i32));
11407 
11408   if (IntBits < FloatBits)
11409     FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
11410 
11411   return FixConv;
11412 }
11413 
11414 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
11415 /// can replace combinations of VCVT (integer to floating-point) and VDIV
11416 /// when the VDIV has a constant operand that is a power of 2.
11417 ///
11418 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
11419 ///  vcvt.f32.s32    d16, d16
11420 ///  vdiv.f32        d16, d17, d16
11421 /// becomes:
11422 ///  vcvt.f32.s32    d16, d16, #3
11423 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
11424                                   const ARMSubtarget *Subtarget) {
11425   if (!Subtarget->hasNEON())
11426     return SDValue();
11427 
11428   SDValue Op = N->getOperand(0);
11429   unsigned OpOpcode = Op.getNode()->getOpcode();
11430   if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
11431       (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
11432     return SDValue();
11433 
11434   SDValue ConstVec = N->getOperand(1);
11435   if (!isa<BuildVectorSDNode>(ConstVec))
11436     return SDValue();
11437 
11438   MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
11439   uint32_t FloatBits = FloatTy.getSizeInBits();
11440   MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
11441   uint32_t IntBits = IntTy.getSizeInBits();
11442   unsigned NumLanes = Op.getValueType().getVectorNumElements();
11443   if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
11444     // These instructions only exist converting from i32 to f32. We can handle
11445     // smaller integers by generating an extra extend, but larger ones would
11446     // be lossy. We also can't handle more then 4 lanes, since these intructions
11447     // only support v2i32/v4i32 types.
11448     return SDValue();
11449   }
11450 
11451   BitVector UndefElements;
11452   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
11453   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
11454   if (C == -1 || C == 0 || C > 32)
11455     return SDValue();
11456 
11457   SDLoc dl(N);
11458   bool isSigned = OpOpcode == ISD::SINT_TO_FP;
11459   SDValue ConvInput = Op.getOperand(0);
11460   if (IntBits < FloatBits)
11461     ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
11462                             dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
11463                             ConvInput);
11464 
11465   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
11466     Intrinsic::arm_neon_vcvtfxu2fp;
11467   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
11468                      Op.getValueType(),
11469                      DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
11470                      ConvInput, DAG.getConstant(C, dl, MVT::i32));
11471 }
11472 
11473 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
11474 /// operand of a vector shift operation, where all the elements of the
11475 /// build_vector must have the same constant integer value.
11476 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
11477   // Ignore bit_converts.
11478   while (Op.getOpcode() == ISD::BITCAST)
11479     Op = Op.getOperand(0);
11480   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
11481   APInt SplatBits, SplatUndef;
11482   unsigned SplatBitSize;
11483   bool HasAnyUndefs;
11484   if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
11485                                       HasAnyUndefs, ElementBits) ||
11486       SplatBitSize > ElementBits)
11487     return false;
11488   Cnt = SplatBits.getSExtValue();
11489   return true;
11490 }
11491 
11492 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
11493 /// operand of a vector shift left operation.  That value must be in the range:
11494 ///   0 <= Value < ElementBits for a left shift; or
11495 ///   0 <= Value <= ElementBits for a long left shift.
11496 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
11497   assert(VT.isVector() && "vector shift count is not a vector type");
11498   int64_t ElementBits = VT.getScalarSizeInBits();
11499   if (! getVShiftImm(Op, ElementBits, Cnt))
11500     return false;
11501   return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
11502 }
11503 
11504 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
11505 /// operand of a vector shift right operation.  For a shift opcode, the value
11506 /// is positive, but for an intrinsic the value count must be negative. The
11507 /// absolute value must be in the range:
11508 ///   1 <= |Value| <= ElementBits for a right shift; or
11509 ///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
11510 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
11511                          int64_t &Cnt) {
11512   assert(VT.isVector() && "vector shift count is not a vector type");
11513   int64_t ElementBits = VT.getScalarSizeInBits();
11514   if (! getVShiftImm(Op, ElementBits, Cnt))
11515     return false;
11516   if (!isIntrinsic)
11517     return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
11518   if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) {
11519     Cnt = -Cnt;
11520     return true;
11521   }
11522   return false;
11523 }
11524 
11525 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
11526 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
11527   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
11528   switch (IntNo) {
11529   default:
11530     // Don't do anything for most intrinsics.
11531     break;
11532 
11533   // Vector shifts: check for immediate versions and lower them.
11534   // Note: This is done during DAG combining instead of DAG legalizing because
11535   // the build_vectors for 64-bit vector element shift counts are generally
11536   // not legal, and it is hard to see their values after they get legalized to
11537   // loads from a constant pool.
11538   case Intrinsic::arm_neon_vshifts:
11539   case Intrinsic::arm_neon_vshiftu:
11540   case Intrinsic::arm_neon_vrshifts:
11541   case Intrinsic::arm_neon_vrshiftu:
11542   case Intrinsic::arm_neon_vrshiftn:
11543   case Intrinsic::arm_neon_vqshifts:
11544   case Intrinsic::arm_neon_vqshiftu:
11545   case Intrinsic::arm_neon_vqshiftsu:
11546   case Intrinsic::arm_neon_vqshiftns:
11547   case Intrinsic::arm_neon_vqshiftnu:
11548   case Intrinsic::arm_neon_vqshiftnsu:
11549   case Intrinsic::arm_neon_vqrshiftns:
11550   case Intrinsic::arm_neon_vqrshiftnu:
11551   case Intrinsic::arm_neon_vqrshiftnsu: {
11552     EVT VT = N->getOperand(1).getValueType();
11553     int64_t Cnt;
11554     unsigned VShiftOpc = 0;
11555 
11556     switch (IntNo) {
11557     case Intrinsic::arm_neon_vshifts:
11558     case Intrinsic::arm_neon_vshiftu:
11559       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
11560         VShiftOpc = ARMISD::VSHL;
11561         break;
11562       }
11563       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
11564         VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
11565                      ARMISD::VSHRs : ARMISD::VSHRu);
11566         break;
11567       }
11568       return SDValue();
11569 
11570     case Intrinsic::arm_neon_vrshifts:
11571     case Intrinsic::arm_neon_vrshiftu:
11572       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
11573         break;
11574       return SDValue();
11575 
11576     case Intrinsic::arm_neon_vqshifts:
11577     case Intrinsic::arm_neon_vqshiftu:
11578       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
11579         break;
11580       return SDValue();
11581 
11582     case Intrinsic::arm_neon_vqshiftsu:
11583       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
11584         break;
11585       llvm_unreachable("invalid shift count for vqshlu intrinsic");
11586 
11587     case Intrinsic::arm_neon_vrshiftn:
11588     case Intrinsic::arm_neon_vqshiftns:
11589     case Intrinsic::arm_neon_vqshiftnu:
11590     case Intrinsic::arm_neon_vqshiftnsu:
11591     case Intrinsic::arm_neon_vqrshiftns:
11592     case Intrinsic::arm_neon_vqrshiftnu:
11593     case Intrinsic::arm_neon_vqrshiftnsu:
11594       // Narrowing shifts require an immediate right shift.
11595       if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
11596         break;
11597       llvm_unreachable("invalid shift count for narrowing vector shift "
11598                        "intrinsic");
11599 
11600     default:
11601       llvm_unreachable("unhandled vector shift");
11602     }
11603 
11604     switch (IntNo) {
11605     case Intrinsic::arm_neon_vshifts:
11606     case Intrinsic::arm_neon_vshiftu:
11607       // Opcode already set above.
11608       break;
11609     case Intrinsic::arm_neon_vrshifts:
11610       VShiftOpc = ARMISD::VRSHRs; break;
11611     case Intrinsic::arm_neon_vrshiftu:
11612       VShiftOpc = ARMISD::VRSHRu; break;
11613     case Intrinsic::arm_neon_vrshiftn:
11614       VShiftOpc = ARMISD::VRSHRN; break;
11615     case Intrinsic::arm_neon_vqshifts:
11616       VShiftOpc = ARMISD::VQSHLs; break;
11617     case Intrinsic::arm_neon_vqshiftu:
11618       VShiftOpc = ARMISD::VQSHLu; break;
11619     case Intrinsic::arm_neon_vqshiftsu:
11620       VShiftOpc = ARMISD::VQSHLsu; break;
11621     case Intrinsic::arm_neon_vqshiftns:
11622       VShiftOpc = ARMISD::VQSHRNs; break;
11623     case Intrinsic::arm_neon_vqshiftnu:
11624       VShiftOpc = ARMISD::VQSHRNu; break;
11625     case Intrinsic::arm_neon_vqshiftnsu:
11626       VShiftOpc = ARMISD::VQSHRNsu; break;
11627     case Intrinsic::arm_neon_vqrshiftns:
11628       VShiftOpc = ARMISD::VQRSHRNs; break;
11629     case Intrinsic::arm_neon_vqrshiftnu:
11630       VShiftOpc = ARMISD::VQRSHRNu; break;
11631     case Intrinsic::arm_neon_vqrshiftnsu:
11632       VShiftOpc = ARMISD::VQRSHRNsu; break;
11633     }
11634 
11635     SDLoc dl(N);
11636     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
11637                        N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
11638   }
11639 
11640   case Intrinsic::arm_neon_vshiftins: {
11641     EVT VT = N->getOperand(1).getValueType();
11642     int64_t Cnt;
11643     unsigned VShiftOpc = 0;
11644 
11645     if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
11646       VShiftOpc = ARMISD::VSLI;
11647     else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
11648       VShiftOpc = ARMISD::VSRI;
11649     else {
11650       llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
11651     }
11652 
11653     SDLoc dl(N);
11654     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
11655                        N->getOperand(1), N->getOperand(2),
11656                        DAG.getConstant(Cnt, dl, MVT::i32));
11657   }
11658 
11659   case Intrinsic::arm_neon_vqrshifts:
11660   case Intrinsic::arm_neon_vqrshiftu:
11661     // No immediate versions of these to check for.
11662     break;
11663   }
11664 
11665   return SDValue();
11666 }
11667 
11668 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
11669 /// lowers them.  As with the vector shift intrinsics, this is done during DAG
11670 /// combining instead of DAG legalizing because the build_vectors for 64-bit
11671 /// vector element shift counts are generally not legal, and it is hard to see
11672 /// their values after they get legalized to loads from a constant pool.
11673 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
11674                                    const ARMSubtarget *ST) {
11675   EVT VT = N->getValueType(0);
11676   if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
11677     // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
11678     // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
11679     SDValue N1 = N->getOperand(1);
11680     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
11681       SDValue N0 = N->getOperand(0);
11682       if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
11683           DAG.MaskedValueIsZero(N0.getOperand(0),
11684                                 APInt::getHighBitsSet(32, 16)))
11685         return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
11686     }
11687   }
11688 
11689   // Nothing to be done for scalar shifts.
11690   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11691   if (!VT.isVector() || !TLI.isTypeLegal(VT))
11692     return SDValue();
11693 
11694   assert(ST->hasNEON() && "unexpected vector shift");
11695   int64_t Cnt;
11696 
11697   switch (N->getOpcode()) {
11698   default: llvm_unreachable("unexpected shift opcode");
11699 
11700   case ISD::SHL:
11701     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
11702       SDLoc dl(N);
11703       return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0),
11704                          DAG.getConstant(Cnt, dl, MVT::i32));
11705     }
11706     break;
11707 
11708   case ISD::SRA:
11709   case ISD::SRL:
11710     if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
11711       unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
11712                             ARMISD::VSHRs : ARMISD::VSHRu);
11713       SDLoc dl(N);
11714       return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
11715                          DAG.getConstant(Cnt, dl, MVT::i32));
11716     }
11717   }
11718   return SDValue();
11719 }
11720 
11721 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
11722 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
11723 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
11724                                     const ARMSubtarget *ST) {
11725   SDValue N0 = N->getOperand(0);
11726 
11727   // Check for sign- and zero-extensions of vector extract operations of 8-
11728   // and 16-bit vector elements.  NEON supports these directly.  They are
11729   // handled during DAG combining because type legalization will promote them
11730   // to 32-bit types and it is messy to recognize the operations after that.
11731   if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11732     SDValue Vec = N0.getOperand(0);
11733     SDValue Lane = N0.getOperand(1);
11734     EVT VT = N->getValueType(0);
11735     EVT EltVT = N0.getValueType();
11736     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11737 
11738     if (VT == MVT::i32 &&
11739         (EltVT == MVT::i8 || EltVT == MVT::i16) &&
11740         TLI.isTypeLegal(Vec.getValueType()) &&
11741         isa<ConstantSDNode>(Lane)) {
11742 
11743       unsigned Opc = 0;
11744       switch (N->getOpcode()) {
11745       default: llvm_unreachable("unexpected opcode");
11746       case ISD::SIGN_EXTEND:
11747         Opc = ARMISD::VGETLANEs;
11748         break;
11749       case ISD::ZERO_EXTEND:
11750       case ISD::ANY_EXTEND:
11751         Opc = ARMISD::VGETLANEu;
11752         break;
11753       }
11754       return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
11755     }
11756   }
11757 
11758   return SDValue();
11759 }
11760 
11761 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
11762   // If we have a CMOV, OR and AND combination such as:
11763   //   if (x & CN)
11764   //     y |= CM;
11765   //
11766   // And:
11767   //   * CN is a single bit;
11768   //   * All bits covered by CM are known zero in y
11769   //
11770   // Then we can convert this into a sequence of BFI instructions. This will
11771   // always be a win if CM is a single bit, will always be no worse than the
11772   // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
11773   // three bits (due to the extra IT instruction).
11774 
11775   SDValue Op0 = CMOV->getOperand(0);
11776   SDValue Op1 = CMOV->getOperand(1);
11777   auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
11778   auto CC = CCNode->getAPIntValue().getLimitedValue();
11779   SDValue CmpZ = CMOV->getOperand(4);
11780 
11781   // The compare must be against zero.
11782   if (!isNullConstant(CmpZ->getOperand(1)))
11783     return SDValue();
11784 
11785   assert(CmpZ->getOpcode() == ARMISD::CMPZ);
11786   SDValue And = CmpZ->getOperand(0);
11787   if (And->getOpcode() != ISD::AND)
11788     return SDValue();
11789   ConstantSDNode *AndC = dyn_cast<ConstantSDNode>(And->getOperand(1));
11790   if (!AndC || !AndC->getAPIntValue().isPowerOf2())
11791     return SDValue();
11792   SDValue X = And->getOperand(0);
11793 
11794   if (CC == ARMCC::EQ) {
11795     // We're performing an "equal to zero" compare. Swap the operands so we
11796     // canonicalize on a "not equal to zero" compare.
11797     std::swap(Op0, Op1);
11798   } else {
11799     assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
11800   }
11801 
11802   if (Op1->getOpcode() != ISD::OR)
11803     return SDValue();
11804 
11805   ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
11806   if (!OrC)
11807     return SDValue();
11808   SDValue Y = Op1->getOperand(0);
11809 
11810   if (Op0 != Y)
11811     return SDValue();
11812 
11813   // Now, is it profitable to continue?
11814   APInt OrCI = OrC->getAPIntValue();
11815   unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
11816   if (OrCI.countPopulation() > Heuristic)
11817     return SDValue();
11818 
11819   // Lastly, can we determine that the bits defined by OrCI
11820   // are zero in Y?
11821   KnownBits Known;
11822   DAG.computeKnownBits(Y, Known);
11823   if ((OrCI & Known.Zero) != OrCI)
11824     return SDValue();
11825 
11826   // OK, we can do the combine.
11827   SDValue V = Y;
11828   SDLoc dl(X);
11829   EVT VT = X.getValueType();
11830   unsigned BitInX = AndC->getAPIntValue().logBase2();
11831 
11832   if (BitInX != 0) {
11833     // We must shift X first.
11834     X = DAG.getNode(ISD::SRL, dl, VT, X,
11835                     DAG.getConstant(BitInX, dl, VT));
11836   }
11837 
11838   for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
11839        BitInY < NumActiveBits; ++BitInY) {
11840     if (OrCI[BitInY] == 0)
11841       continue;
11842     APInt Mask(VT.getSizeInBits(), 0);
11843     Mask.setBit(BitInY);
11844     V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
11845                     // Confusingly, the operand is an *inverted* mask.
11846                     DAG.getConstant(~Mask, dl, VT));
11847   }
11848 
11849   return V;
11850 }
11851 
11852 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
11853 SDValue
11854 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
11855   SDValue Cmp = N->getOperand(4);
11856   if (Cmp.getOpcode() != ARMISD::CMPZ)
11857     // Only looking at NE cases.
11858     return SDValue();
11859 
11860   EVT VT = N->getValueType(0);
11861   SDLoc dl(N);
11862   SDValue LHS = Cmp.getOperand(0);
11863   SDValue RHS = Cmp.getOperand(1);
11864   SDValue Chain = N->getOperand(0);
11865   SDValue BB = N->getOperand(1);
11866   SDValue ARMcc = N->getOperand(2);
11867   ARMCC::CondCodes CC =
11868     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
11869 
11870   // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
11871   // -> (brcond Chain BB CC CPSR Cmp)
11872   if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
11873       LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
11874       LHS->getOperand(0)->hasOneUse()) {
11875     auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
11876     auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
11877     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
11878     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
11879     if ((LHS00C && LHS00C->getZExtValue() == 0) &&
11880         (LHS01C && LHS01C->getZExtValue() == 1) &&
11881         (LHS1C && LHS1C->getZExtValue() == 1) &&
11882         (RHSC && RHSC->getZExtValue() == 0)) {
11883       return DAG.getNode(
11884           ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
11885           LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
11886     }
11887   }
11888 
11889   return SDValue();
11890 }
11891 
11892 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
11893 SDValue
11894 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
11895   SDValue Cmp = N->getOperand(4);
11896   if (Cmp.getOpcode() != ARMISD::CMPZ)
11897     // Only looking at EQ and NE cases.
11898     return SDValue();
11899 
11900   EVT VT = N->getValueType(0);
11901   SDLoc dl(N);
11902   SDValue LHS = Cmp.getOperand(0);
11903   SDValue RHS = Cmp.getOperand(1);
11904   SDValue FalseVal = N->getOperand(0);
11905   SDValue TrueVal = N->getOperand(1);
11906   SDValue ARMcc = N->getOperand(2);
11907   ARMCC::CondCodes CC =
11908     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
11909 
11910   // BFI is only available on V6T2+.
11911   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
11912     SDValue R = PerformCMOVToBFICombine(N, DAG);
11913     if (R)
11914       return R;
11915   }
11916 
11917   // Simplify
11918   //   mov     r1, r0
11919   //   cmp     r1, x
11920   //   mov     r0, y
11921   //   moveq   r0, x
11922   // to
11923   //   cmp     r0, x
11924   //   movne   r0, y
11925   //
11926   //   mov     r1, r0
11927   //   cmp     r1, x
11928   //   mov     r0, x
11929   //   movne   r0, y
11930   // to
11931   //   cmp     r0, x
11932   //   movne   r0, y
11933   /// FIXME: Turn this into a target neutral optimization?
11934   SDValue Res;
11935   if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
11936     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
11937                       N->getOperand(3), Cmp);
11938   } else if (CC == ARMCC::EQ && TrueVal == RHS) {
11939     SDValue ARMcc;
11940     SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
11941     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
11942                       N->getOperand(3), NewCmp);
11943   }
11944 
11945   // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
11946   // -> (cmov F T CC CPSR Cmp)
11947   if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
11948     auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
11949     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
11950     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
11951     if ((LHS0C && LHS0C->getZExtValue() == 0) &&
11952         (LHS1C && LHS1C->getZExtValue() == 1) &&
11953         (RHSC && RHSC->getZExtValue() == 0)) {
11954       return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
11955                          LHS->getOperand(2), LHS->getOperand(3),
11956                          LHS->getOperand(4));
11957     }
11958   }
11959 
11960   if (Res.getNode()) {
11961     KnownBits Known;
11962     DAG.computeKnownBits(SDValue(N,0), Known);
11963     // Capture demanded bits information that would be otherwise lost.
11964     if (Known.Zero == 0xfffffffe)
11965       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11966                         DAG.getValueType(MVT::i1));
11967     else if (Known.Zero == 0xffffff00)
11968       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11969                         DAG.getValueType(MVT::i8));
11970     else if (Known.Zero == 0xffff0000)
11971       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11972                         DAG.getValueType(MVT::i16));
11973   }
11974 
11975   return Res;
11976 }
11977 
11978 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
11979                                              DAGCombinerInfo &DCI) const {
11980   switch (N->getOpcode()) {
11981   default: break;
11982   case ARMISD::ADDE:    return PerformADDECombine(N, DCI, Subtarget);
11983   case ARMISD::UMLAL:   return PerformUMLALCombine(N, DCI.DAG, Subtarget);
11984   case ISD::ADD:        return PerformADDCombine(N, DCI, Subtarget);
11985   case ISD::SUB:        return PerformSUBCombine(N, DCI);
11986   case ISD::MUL:        return PerformMULCombine(N, DCI, Subtarget);
11987   case ISD::OR:         return PerformORCombine(N, DCI, Subtarget);
11988   case ISD::XOR:        return PerformXORCombine(N, DCI, Subtarget);
11989   case ISD::AND:        return PerformANDCombine(N, DCI, Subtarget);
11990   case ARMISD::ADDC:
11991   case ARMISD::SUBC:    return PerformAddcSubcCombine(N, DCI.DAG, Subtarget);
11992   case ARMISD::SUBE:    return PerformAddeSubeCombine(N, DCI.DAG, Subtarget);
11993   case ARMISD::BFI:     return PerformBFICombine(N, DCI);
11994   case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
11995   case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
11996   case ISD::STORE:      return PerformSTORECombine(N, DCI);
11997   case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
11998   case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
11999   case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
12000   case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
12001   case ARMISD::VDUP: return PerformVDUPCombine(N, DCI);
12002   case ISD::FP_TO_SINT:
12003   case ISD::FP_TO_UINT:
12004     return PerformVCVTCombine(N, DCI.DAG, Subtarget);
12005   case ISD::FDIV:
12006     return PerformVDIVCombine(N, DCI.DAG, Subtarget);
12007   case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
12008   case ISD::SHL:
12009   case ISD::SRA:
12010   case ISD::SRL:        return PerformShiftCombine(N, DCI.DAG, Subtarget);
12011   case ISD::SIGN_EXTEND:
12012   case ISD::ZERO_EXTEND:
12013   case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
12014   case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
12015   case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
12016   case ISD::LOAD:       return PerformLOADCombine(N, DCI);
12017   case ARMISD::VLD1DUP:
12018   case ARMISD::VLD2DUP:
12019   case ARMISD::VLD3DUP:
12020   case ARMISD::VLD4DUP:
12021     return PerformVLDCombine(N, DCI);
12022   case ARMISD::BUILD_VECTOR:
12023     return PerformARMBUILD_VECTORCombine(N, DCI);
12024   case ARMISD::SMULWB: {
12025     unsigned BitWidth = N->getValueType(0).getSizeInBits();
12026     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
12027     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
12028       return SDValue();
12029     break;
12030   }
12031   case ARMISD::SMULWT: {
12032     unsigned BitWidth = N->getValueType(0).getSizeInBits();
12033     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
12034     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
12035       return SDValue();
12036     break;
12037   }
12038   case ARMISD::SMLALBB: {
12039     unsigned BitWidth = N->getValueType(0).getSizeInBits();
12040     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
12041     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
12042         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
12043       return SDValue();
12044     break;
12045   }
12046   case ARMISD::SMLALBT: {
12047     unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits();
12048     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
12049     unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits();
12050     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
12051     if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) ||
12052         (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI)))
12053       return SDValue();
12054     break;
12055   }
12056   case ARMISD::SMLALTB: {
12057     unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits();
12058     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
12059     unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits();
12060     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
12061     if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) ||
12062         (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI)))
12063       return SDValue();
12064     break;
12065   }
12066   case ARMISD::SMLALTT: {
12067     unsigned BitWidth = N->getValueType(0).getSizeInBits();
12068     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
12069     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
12070         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
12071       return SDValue();
12072     break;
12073   }
12074   case ISD::INTRINSIC_VOID:
12075   case ISD::INTRINSIC_W_CHAIN:
12076     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12077     case Intrinsic::arm_neon_vld1:
12078     case Intrinsic::arm_neon_vld2:
12079     case Intrinsic::arm_neon_vld3:
12080     case Intrinsic::arm_neon_vld4:
12081     case Intrinsic::arm_neon_vld2lane:
12082     case Intrinsic::arm_neon_vld3lane:
12083     case Intrinsic::arm_neon_vld4lane:
12084     case Intrinsic::arm_neon_vst1:
12085     case Intrinsic::arm_neon_vst2:
12086     case Intrinsic::arm_neon_vst3:
12087     case Intrinsic::arm_neon_vst4:
12088     case Intrinsic::arm_neon_vst2lane:
12089     case Intrinsic::arm_neon_vst3lane:
12090     case Intrinsic::arm_neon_vst4lane:
12091       return PerformVLDCombine(N, DCI);
12092     default: break;
12093     }
12094     break;
12095   }
12096   return SDValue();
12097 }
12098 
12099 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
12100                                                           EVT VT) const {
12101   return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
12102 }
12103 
12104 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
12105                                                        unsigned,
12106                                                        unsigned,
12107                                                        bool *Fast) const {
12108   // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
12109   bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
12110 
12111   switch (VT.getSimpleVT().SimpleTy) {
12112   default:
12113     return false;
12114   case MVT::i8:
12115   case MVT::i16:
12116   case MVT::i32: {
12117     // Unaligned access can use (for example) LRDB, LRDH, LDR
12118     if (AllowsUnaligned) {
12119       if (Fast)
12120         *Fast = Subtarget->hasV7Ops();
12121       return true;
12122     }
12123     return false;
12124   }
12125   case MVT::f64:
12126   case MVT::v2f64: {
12127     // For any little-endian targets with neon, we can support unaligned ld/st
12128     // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
12129     // A big-endian target may also explicitly support unaligned accesses
12130     if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
12131       if (Fast)
12132         *Fast = true;
12133       return true;
12134     }
12135     return false;
12136   }
12137   }
12138 }
12139 
12140 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign,
12141                        unsigned AlignCheck) {
12142   return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
12143           (DstAlign == 0 || DstAlign % AlignCheck == 0));
12144 }
12145 
12146 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
12147                                            unsigned DstAlign, unsigned SrcAlign,
12148                                            bool IsMemset, bool ZeroMemset,
12149                                            bool MemcpyStrSrc,
12150                                            MachineFunction &MF) const {
12151   const Function *F = MF.getFunction();
12152 
12153   // See if we can use NEON instructions for this...
12154   if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() &&
12155       !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
12156     bool Fast;
12157     if (Size >= 16 &&
12158         (memOpAlign(SrcAlign, DstAlign, 16) ||
12159          (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) {
12160       return MVT::v2f64;
12161     } else if (Size >= 8 &&
12162                (memOpAlign(SrcAlign, DstAlign, 8) ||
12163                 (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) &&
12164                  Fast))) {
12165       return MVT::f64;
12166     }
12167   }
12168 
12169   // Let the target-independent logic figure it out.
12170   return MVT::Other;
12171 }
12172 
12173 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
12174   if (Val.getOpcode() != ISD::LOAD)
12175     return false;
12176 
12177   EVT VT1 = Val.getValueType();
12178   if (!VT1.isSimple() || !VT1.isInteger() ||
12179       !VT2.isSimple() || !VT2.isInteger())
12180     return false;
12181 
12182   switch (VT1.getSimpleVT().SimpleTy) {
12183   default: break;
12184   case MVT::i1:
12185   case MVT::i8:
12186   case MVT::i16:
12187     // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
12188     return true;
12189   }
12190 
12191   return false;
12192 }
12193 
12194 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
12195   EVT VT = ExtVal.getValueType();
12196 
12197   if (!isTypeLegal(VT))
12198     return false;
12199 
12200   // Don't create a loadext if we can fold the extension into a wide/long
12201   // instruction.
12202   // If there's more than one user instruction, the loadext is desirable no
12203   // matter what.  There can be two uses by the same instruction.
12204   if (ExtVal->use_empty() ||
12205       !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
12206     return true;
12207 
12208   SDNode *U = *ExtVal->use_begin();
12209   if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
12210        U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL))
12211     return false;
12212 
12213   return true;
12214 }
12215 
12216 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
12217   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12218     return false;
12219 
12220   if (!isTypeLegal(EVT::getEVT(Ty1)))
12221     return false;
12222 
12223   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
12224 
12225   // Assuming the caller doesn't have a zeroext or signext return parameter,
12226   // truncation all the way down to i1 is valid.
12227   return true;
12228 }
12229 
12230 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
12231                                                 const AddrMode &AM, Type *Ty,
12232                                                 unsigned AS) const {
12233   if (isLegalAddressingMode(DL, AM, Ty, AS)) {
12234     if (Subtarget->hasFPAO())
12235       return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
12236     return 0;
12237   }
12238   return -1;
12239 }
12240 
12241 
12242 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
12243   if (V < 0)
12244     return false;
12245 
12246   unsigned Scale = 1;
12247   switch (VT.getSimpleVT().SimpleTy) {
12248   default: return false;
12249   case MVT::i1:
12250   case MVT::i8:
12251     // Scale == 1;
12252     break;
12253   case MVT::i16:
12254     // Scale == 2;
12255     Scale = 2;
12256     break;
12257   case MVT::i32:
12258     // Scale == 4;
12259     Scale = 4;
12260     break;
12261   }
12262 
12263   if ((V & (Scale - 1)) != 0)
12264     return false;
12265   V /= Scale;
12266   return V == (V & ((1LL << 5) - 1));
12267 }
12268 
12269 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
12270                                       const ARMSubtarget *Subtarget) {
12271   bool isNeg = false;
12272   if (V < 0) {
12273     isNeg = true;
12274     V = - V;
12275   }
12276 
12277   switch (VT.getSimpleVT().SimpleTy) {
12278   default: return false;
12279   case MVT::i1:
12280   case MVT::i8:
12281   case MVT::i16:
12282   case MVT::i32:
12283     // + imm12 or - imm8
12284     if (isNeg)
12285       return V == (V & ((1LL << 8) - 1));
12286     return V == (V & ((1LL << 12) - 1));
12287   case MVT::f32:
12288   case MVT::f64:
12289     // Same as ARM mode. FIXME: NEON?
12290     if (!Subtarget->hasVFP2())
12291       return false;
12292     if ((V & 3) != 0)
12293       return false;
12294     V >>= 2;
12295     return V == (V & ((1LL << 8) - 1));
12296   }
12297 }
12298 
12299 /// isLegalAddressImmediate - Return true if the integer value can be used
12300 /// as the offset of the target addressing mode for load / store of the
12301 /// given type.
12302 static bool isLegalAddressImmediate(int64_t V, EVT VT,
12303                                     const ARMSubtarget *Subtarget) {
12304   if (V == 0)
12305     return true;
12306 
12307   if (!VT.isSimple())
12308     return false;
12309 
12310   if (Subtarget->isThumb1Only())
12311     return isLegalT1AddressImmediate(V, VT);
12312   else if (Subtarget->isThumb2())
12313     return isLegalT2AddressImmediate(V, VT, Subtarget);
12314 
12315   // ARM mode.
12316   if (V < 0)
12317     V = - V;
12318   switch (VT.getSimpleVT().SimpleTy) {
12319   default: return false;
12320   case MVT::i1:
12321   case MVT::i8:
12322   case MVT::i32:
12323     // +- imm12
12324     return V == (V & ((1LL << 12) - 1));
12325   case MVT::i16:
12326     // +- imm8
12327     return V == (V & ((1LL << 8) - 1));
12328   case MVT::f32:
12329   case MVT::f64:
12330     if (!Subtarget->hasVFP2()) // FIXME: NEON?
12331       return false;
12332     if ((V & 3) != 0)
12333       return false;
12334     V >>= 2;
12335     return V == (V & ((1LL << 8) - 1));
12336   }
12337 }
12338 
12339 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
12340                                                       EVT VT) const {
12341   int Scale = AM.Scale;
12342   if (Scale < 0)
12343     return false;
12344 
12345   switch (VT.getSimpleVT().SimpleTy) {
12346   default: return false;
12347   case MVT::i1:
12348   case MVT::i8:
12349   case MVT::i16:
12350   case MVT::i32:
12351     if (Scale == 1)
12352       return true;
12353     // r + r << imm
12354     Scale = Scale & ~1;
12355     return Scale == 2 || Scale == 4 || Scale == 8;
12356   case MVT::i64:
12357     // r + r
12358     if (((unsigned)AM.HasBaseReg + Scale) <= 2)
12359       return true;
12360     return false;
12361   case MVT::isVoid:
12362     // Note, we allow "void" uses (basically, uses that aren't loads or
12363     // stores), because arm allows folding a scale into many arithmetic
12364     // operations.  This should be made more precise and revisited later.
12365 
12366     // Allow r << imm, but the imm has to be a multiple of two.
12367     if (Scale & 1) return false;
12368     return isPowerOf2_32(Scale);
12369   }
12370 }
12371 
12372 /// isLegalAddressingMode - Return true if the addressing mode represented
12373 /// by AM is legal for this target, for a load/store of the specified type.
12374 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
12375                                               const AddrMode &AM, Type *Ty,
12376                                               unsigned AS) const {
12377   EVT VT = getValueType(DL, Ty, true);
12378   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
12379     return false;
12380 
12381   // Can never fold addr of global into load/store.
12382   if (AM.BaseGV)
12383     return false;
12384 
12385   switch (AM.Scale) {
12386   case 0:  // no scale reg, must be "r+i" or "r", or "i".
12387     break;
12388   case 1:
12389     if (Subtarget->isThumb1Only())
12390       return false;
12391     LLVM_FALLTHROUGH;
12392   default:
12393     // ARM doesn't support any R+R*scale+imm addr modes.
12394     if (AM.BaseOffs)
12395       return false;
12396 
12397     if (!VT.isSimple())
12398       return false;
12399 
12400     if (Subtarget->isThumb2())
12401       return isLegalT2ScaledAddressingMode(AM, VT);
12402 
12403     int Scale = AM.Scale;
12404     switch (VT.getSimpleVT().SimpleTy) {
12405     default: return false;
12406     case MVT::i1:
12407     case MVT::i8:
12408     case MVT::i32:
12409       if (Scale < 0) Scale = -Scale;
12410       if (Scale == 1)
12411         return true;
12412       // r + r << imm
12413       return isPowerOf2_32(Scale & ~1);
12414     case MVT::i16:
12415     case MVT::i64:
12416       // r + r
12417       if (((unsigned)AM.HasBaseReg + Scale) <= 2)
12418         return true;
12419       return false;
12420 
12421     case MVT::isVoid:
12422       // Note, we allow "void" uses (basically, uses that aren't loads or
12423       // stores), because arm allows folding a scale into many arithmetic
12424       // operations.  This should be made more precise and revisited later.
12425 
12426       // Allow r << imm, but the imm has to be a multiple of two.
12427       if (Scale & 1) return false;
12428       return isPowerOf2_32(Scale);
12429     }
12430   }
12431   return true;
12432 }
12433 
12434 /// isLegalICmpImmediate - Return true if the specified immediate is legal
12435 /// icmp immediate, that is the target has icmp instructions which can compare
12436 /// a register against the immediate without having to materialize the
12437 /// immediate into a register.
12438 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
12439   // Thumb2 and ARM modes can use cmn for negative immediates.
12440   if (!Subtarget->isThumb())
12441     return ARM_AM::getSOImmVal(std::abs(Imm)) != -1;
12442   if (Subtarget->isThumb2())
12443     return ARM_AM::getT2SOImmVal(std::abs(Imm)) != -1;
12444   // Thumb1 doesn't have cmn, and only 8-bit immediates.
12445   return Imm >= 0 && Imm <= 255;
12446 }
12447 
12448 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
12449 /// *or sub* immediate, that is the target has add or sub instructions which can
12450 /// add a register with the immediate without having to materialize the
12451 /// immediate into a register.
12452 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
12453   // Same encoding for add/sub, just flip the sign.
12454   int64_t AbsImm = std::abs(Imm);
12455   if (!Subtarget->isThumb())
12456     return ARM_AM::getSOImmVal(AbsImm) != -1;
12457   if (Subtarget->isThumb2())
12458     return ARM_AM::getT2SOImmVal(AbsImm) != -1;
12459   // Thumb1 only has 8-bit unsigned immediate.
12460   return AbsImm >= 0 && AbsImm <= 255;
12461 }
12462 
12463 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
12464                                       bool isSEXTLoad, SDValue &Base,
12465                                       SDValue &Offset, bool &isInc,
12466                                       SelectionDAG &DAG) {
12467   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
12468     return false;
12469 
12470   if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
12471     // AddressingMode 3
12472     Base = Ptr->getOperand(0);
12473     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12474       int RHSC = (int)RHS->getZExtValue();
12475       if (RHSC < 0 && RHSC > -256) {
12476         assert(Ptr->getOpcode() == ISD::ADD);
12477         isInc = false;
12478         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12479         return true;
12480       }
12481     }
12482     isInc = (Ptr->getOpcode() == ISD::ADD);
12483     Offset = Ptr->getOperand(1);
12484     return true;
12485   } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
12486     // AddressingMode 2
12487     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12488       int RHSC = (int)RHS->getZExtValue();
12489       if (RHSC < 0 && RHSC > -0x1000) {
12490         assert(Ptr->getOpcode() == ISD::ADD);
12491         isInc = false;
12492         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12493         Base = Ptr->getOperand(0);
12494         return true;
12495       }
12496     }
12497 
12498     if (Ptr->getOpcode() == ISD::ADD) {
12499       isInc = true;
12500       ARM_AM::ShiftOpc ShOpcVal=
12501         ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
12502       if (ShOpcVal != ARM_AM::no_shift) {
12503         Base = Ptr->getOperand(1);
12504         Offset = Ptr->getOperand(0);
12505       } else {
12506         Base = Ptr->getOperand(0);
12507         Offset = Ptr->getOperand(1);
12508       }
12509       return true;
12510     }
12511 
12512     isInc = (Ptr->getOpcode() == ISD::ADD);
12513     Base = Ptr->getOperand(0);
12514     Offset = Ptr->getOperand(1);
12515     return true;
12516   }
12517 
12518   // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
12519   return false;
12520 }
12521 
12522 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
12523                                      bool isSEXTLoad, SDValue &Base,
12524                                      SDValue &Offset, bool &isInc,
12525                                      SelectionDAG &DAG) {
12526   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
12527     return false;
12528 
12529   Base = Ptr->getOperand(0);
12530   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12531     int RHSC = (int)RHS->getZExtValue();
12532     if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
12533       assert(Ptr->getOpcode() == ISD::ADD);
12534       isInc = false;
12535       Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12536       return true;
12537     } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
12538       isInc = Ptr->getOpcode() == ISD::ADD;
12539       Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
12540       return true;
12541     }
12542   }
12543 
12544   return false;
12545 }
12546 
12547 /// getPreIndexedAddressParts - returns true by value, base pointer and
12548 /// offset pointer and addressing mode by reference if the node's address
12549 /// can be legally represented as pre-indexed load / store address.
12550 bool
12551 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
12552                                              SDValue &Offset,
12553                                              ISD::MemIndexedMode &AM,
12554                                              SelectionDAG &DAG) const {
12555   if (Subtarget->isThumb1Only())
12556     return false;
12557 
12558   EVT VT;
12559   SDValue Ptr;
12560   bool isSEXTLoad = false;
12561   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12562     Ptr = LD->getBasePtr();
12563     VT  = LD->getMemoryVT();
12564     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
12565   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12566     Ptr = ST->getBasePtr();
12567     VT  = ST->getMemoryVT();
12568   } else
12569     return false;
12570 
12571   bool isInc;
12572   bool isLegal = false;
12573   if (Subtarget->isThumb2())
12574     isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
12575                                        Offset, isInc, DAG);
12576   else
12577     isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
12578                                         Offset, isInc, DAG);
12579   if (!isLegal)
12580     return false;
12581 
12582   AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
12583   return true;
12584 }
12585 
12586 /// getPostIndexedAddressParts - returns true by value, base pointer and
12587 /// offset pointer and addressing mode by reference if this node can be
12588 /// combined with a load / store to form a post-indexed load / store.
12589 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
12590                                                    SDValue &Base,
12591                                                    SDValue &Offset,
12592                                                    ISD::MemIndexedMode &AM,
12593                                                    SelectionDAG &DAG) const {
12594   EVT VT;
12595   SDValue Ptr;
12596   bool isSEXTLoad = false, isNonExt;
12597   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12598     VT  = LD->getMemoryVT();
12599     Ptr = LD->getBasePtr();
12600     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
12601     isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
12602   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12603     VT  = ST->getMemoryVT();
12604     Ptr = ST->getBasePtr();
12605     isNonExt = !ST->isTruncatingStore();
12606   } else
12607     return false;
12608 
12609   if (Subtarget->isThumb1Only()) {
12610     // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
12611     // must be non-extending/truncating, i32, with an offset of 4.
12612     assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
12613     if (Op->getOpcode() != ISD::ADD || !isNonExt)
12614       return false;
12615     auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
12616     if (!RHS || RHS->getZExtValue() != 4)
12617       return false;
12618 
12619     Offset = Op->getOperand(1);
12620     Base = Op->getOperand(0);
12621     AM = ISD::POST_INC;
12622     return true;
12623   }
12624 
12625   bool isInc;
12626   bool isLegal = false;
12627   if (Subtarget->isThumb2())
12628     isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
12629                                        isInc, DAG);
12630   else
12631     isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
12632                                         isInc, DAG);
12633   if (!isLegal)
12634     return false;
12635 
12636   if (Ptr != Base) {
12637     // Swap base ptr and offset to catch more post-index load / store when
12638     // it's legal. In Thumb2 mode, offset must be an immediate.
12639     if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
12640         !Subtarget->isThumb2())
12641       std::swap(Base, Offset);
12642 
12643     // Post-indexed load / store update the base pointer.
12644     if (Ptr != Base)
12645       return false;
12646   }
12647 
12648   AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
12649   return true;
12650 }
12651 
12652 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
12653                                                       KnownBits &Known,
12654                                                       const APInt &DemandedElts,
12655                                                       const SelectionDAG &DAG,
12656                                                       unsigned Depth) const {
12657   unsigned BitWidth = Known.getBitWidth();
12658   Known.resetAll();
12659   switch (Op.getOpcode()) {
12660   default: break;
12661   case ARMISD::ADDC:
12662   case ARMISD::ADDE:
12663   case ARMISD::SUBC:
12664   case ARMISD::SUBE:
12665     // These nodes' second result is a boolean
12666     if (Op.getResNo() == 0)
12667       break;
12668     Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
12669     break;
12670   case ARMISD::CMOV: {
12671     // Bits are known zero/one if known on the LHS and RHS.
12672     DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1);
12673     if (Known.isUnknown())
12674       return;
12675 
12676     KnownBits KnownRHS;
12677     DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1);
12678     Known.Zero &= KnownRHS.Zero;
12679     Known.One  &= KnownRHS.One;
12680     return;
12681   }
12682   case ISD::INTRINSIC_W_CHAIN: {
12683     ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
12684     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
12685     switch (IntID) {
12686     default: return;
12687     case Intrinsic::arm_ldaex:
12688     case Intrinsic::arm_ldrex: {
12689       EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
12690       unsigned MemBits = VT.getScalarSizeInBits();
12691       Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
12692       return;
12693     }
12694     }
12695   }
12696   case ARMISD::BFI: {
12697     // Conservatively, we can recurse down the first operand
12698     // and just mask out all affected bits.
12699     DAG.computeKnownBits(Op.getOperand(0), Known, Depth + 1);
12700 
12701     // The operand to BFI is already a mask suitable for removing the bits it
12702     // sets.
12703     ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
12704     const APInt &Mask = CI->getAPIntValue();
12705     Known.Zero &= Mask;
12706     Known.One &= Mask;
12707     return;
12708   }
12709   }
12710 }
12711 
12712 //===----------------------------------------------------------------------===//
12713 //                           ARM Inline Assembly Support
12714 //===----------------------------------------------------------------------===//
12715 
12716 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
12717   // Looking for "rev" which is V6+.
12718   if (!Subtarget->hasV6Ops())
12719     return false;
12720 
12721   InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
12722   std::string AsmStr = IA->getAsmString();
12723   SmallVector<StringRef, 4> AsmPieces;
12724   SplitString(AsmStr, AsmPieces, ";\n");
12725 
12726   switch (AsmPieces.size()) {
12727   default: return false;
12728   case 1:
12729     AsmStr = AsmPieces[0];
12730     AsmPieces.clear();
12731     SplitString(AsmStr, AsmPieces, " \t,");
12732 
12733     // rev $0, $1
12734     if (AsmPieces.size() == 3 &&
12735         AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
12736         IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
12737       IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
12738       if (Ty && Ty->getBitWidth() == 32)
12739         return IntrinsicLowering::LowerToByteSwap(CI);
12740     }
12741     break;
12742   }
12743 
12744   return false;
12745 }
12746 
12747 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
12748   // At this point, we have to lower this constraint to something else, so we
12749   // lower it to an "r" or "w". However, by doing this we will force the result
12750   // to be in register, while the X constraint is much more permissive.
12751   //
12752   // Although we are correct (we are free to emit anything, without
12753   // constraints), we might break use cases that would expect us to be more
12754   // efficient and emit something else.
12755   if (!Subtarget->hasVFP2())
12756     return "r";
12757   if (ConstraintVT.isFloatingPoint())
12758     return "w";
12759   if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
12760      (ConstraintVT.getSizeInBits() == 64 ||
12761       ConstraintVT.getSizeInBits() == 128))
12762     return "w";
12763 
12764   return "r";
12765 }
12766 
12767 /// getConstraintType - Given a constraint letter, return the type of
12768 /// constraint it is for this target.
12769 ARMTargetLowering::ConstraintType
12770 ARMTargetLowering::getConstraintType(StringRef Constraint) const {
12771   if (Constraint.size() == 1) {
12772     switch (Constraint[0]) {
12773     default:  break;
12774     case 'l': return C_RegisterClass;
12775     case 'w': return C_RegisterClass;
12776     case 'h': return C_RegisterClass;
12777     case 'x': return C_RegisterClass;
12778     case 't': return C_RegisterClass;
12779     case 'j': return C_Other; // Constant for movw.
12780       // An address with a single base register. Due to the way we
12781       // currently handle addresses it is the same as an 'r' memory constraint.
12782     case 'Q': return C_Memory;
12783     }
12784   } else if (Constraint.size() == 2) {
12785     switch (Constraint[0]) {
12786     default: break;
12787     // All 'U+' constraints are addresses.
12788     case 'U': return C_Memory;
12789     }
12790   }
12791   return TargetLowering::getConstraintType(Constraint);
12792 }
12793 
12794 /// Examine constraint type and operand type and determine a weight value.
12795 /// This object must already have been set up with the operand type
12796 /// and the current alternative constraint selected.
12797 TargetLowering::ConstraintWeight
12798 ARMTargetLowering::getSingleConstraintMatchWeight(
12799     AsmOperandInfo &info, const char *constraint) const {
12800   ConstraintWeight weight = CW_Invalid;
12801   Value *CallOperandVal = info.CallOperandVal;
12802     // If we don't have a value, we can't do a match,
12803     // but allow it at the lowest weight.
12804   if (!CallOperandVal)
12805     return CW_Default;
12806   Type *type = CallOperandVal->getType();
12807   // Look at the constraint type.
12808   switch (*constraint) {
12809   default:
12810     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
12811     break;
12812   case 'l':
12813     if (type->isIntegerTy()) {
12814       if (Subtarget->isThumb())
12815         weight = CW_SpecificReg;
12816       else
12817         weight = CW_Register;
12818     }
12819     break;
12820   case 'w':
12821     if (type->isFloatingPointTy())
12822       weight = CW_Register;
12823     break;
12824   }
12825   return weight;
12826 }
12827 
12828 typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
12829 RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
12830     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
12831   if (Constraint.size() == 1) {
12832     // GCC ARM Constraint Letters
12833     switch (Constraint[0]) {
12834     case 'l': // Low regs or general regs.
12835       if (Subtarget->isThumb())
12836         return RCPair(0U, &ARM::tGPRRegClass);
12837       return RCPair(0U, &ARM::GPRRegClass);
12838     case 'h': // High regs or no regs.
12839       if (Subtarget->isThumb())
12840         return RCPair(0U, &ARM::hGPRRegClass);
12841       break;
12842     case 'r':
12843       if (Subtarget->isThumb1Only())
12844         return RCPair(0U, &ARM::tGPRRegClass);
12845       return RCPair(0U, &ARM::GPRRegClass);
12846     case 'w':
12847       if (VT == MVT::Other)
12848         break;
12849       if (VT == MVT::f32)
12850         return RCPair(0U, &ARM::SPRRegClass);
12851       if (VT.getSizeInBits() == 64)
12852         return RCPair(0U, &ARM::DPRRegClass);
12853       if (VT.getSizeInBits() == 128)
12854         return RCPair(0U, &ARM::QPRRegClass);
12855       break;
12856     case 'x':
12857       if (VT == MVT::Other)
12858         break;
12859       if (VT == MVT::f32)
12860         return RCPair(0U, &ARM::SPR_8RegClass);
12861       if (VT.getSizeInBits() == 64)
12862         return RCPair(0U, &ARM::DPR_8RegClass);
12863       if (VT.getSizeInBits() == 128)
12864         return RCPair(0U, &ARM::QPR_8RegClass);
12865       break;
12866     case 't':
12867       if (VT == MVT::f32)
12868         return RCPair(0U, &ARM::SPRRegClass);
12869       break;
12870     }
12871   }
12872   if (StringRef("{cc}").equals_lower(Constraint))
12873     return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
12874 
12875   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
12876 }
12877 
12878 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
12879 /// vector.  If it is invalid, don't add anything to Ops.
12880 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
12881                                                      std::string &Constraint,
12882                                                      std::vector<SDValue>&Ops,
12883                                                      SelectionDAG &DAG) const {
12884   SDValue Result;
12885 
12886   // Currently only support length 1 constraints.
12887   if (Constraint.length() != 1) return;
12888 
12889   char ConstraintLetter = Constraint[0];
12890   switch (ConstraintLetter) {
12891   default: break;
12892   case 'j':
12893   case 'I': case 'J': case 'K': case 'L':
12894   case 'M': case 'N': case 'O':
12895     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
12896     if (!C)
12897       return;
12898 
12899     int64_t CVal64 = C->getSExtValue();
12900     int CVal = (int) CVal64;
12901     // None of these constraints allow values larger than 32 bits.  Check
12902     // that the value fits in an int.
12903     if (CVal != CVal64)
12904       return;
12905 
12906     switch (ConstraintLetter) {
12907       case 'j':
12908         // Constant suitable for movw, must be between 0 and
12909         // 65535.
12910         if (Subtarget->hasV6T2Ops())
12911           if (CVal >= 0 && CVal <= 65535)
12912             break;
12913         return;
12914       case 'I':
12915         if (Subtarget->isThumb1Only()) {
12916           // This must be a constant between 0 and 255, for ADD
12917           // immediates.
12918           if (CVal >= 0 && CVal <= 255)
12919             break;
12920         } else if (Subtarget->isThumb2()) {
12921           // A constant that can be used as an immediate value in a
12922           // data-processing instruction.
12923           if (ARM_AM::getT2SOImmVal(CVal) != -1)
12924             break;
12925         } else {
12926           // A constant that can be used as an immediate value in a
12927           // data-processing instruction.
12928           if (ARM_AM::getSOImmVal(CVal) != -1)
12929             break;
12930         }
12931         return;
12932 
12933       case 'J':
12934         if (Subtarget->isThumb1Only()) {
12935           // This must be a constant between -255 and -1, for negated ADD
12936           // immediates. This can be used in GCC with an "n" modifier that
12937           // prints the negated value, for use with SUB instructions. It is
12938           // not useful otherwise but is implemented for compatibility.
12939           if (CVal >= -255 && CVal <= -1)
12940             break;
12941         } else {
12942           // This must be a constant between -4095 and 4095. It is not clear
12943           // what this constraint is intended for. Implemented for
12944           // compatibility with GCC.
12945           if (CVal >= -4095 && CVal <= 4095)
12946             break;
12947         }
12948         return;
12949 
12950       case 'K':
12951         if (Subtarget->isThumb1Only()) {
12952           // A 32-bit value where only one byte has a nonzero value. Exclude
12953           // zero to match GCC. This constraint is used by GCC internally for
12954           // constants that can be loaded with a move/shift combination.
12955           // It is not useful otherwise but is implemented for compatibility.
12956           if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
12957             break;
12958         } else if (Subtarget->isThumb2()) {
12959           // A constant whose bitwise inverse can be used as an immediate
12960           // value in a data-processing instruction. This can be used in GCC
12961           // with a "B" modifier that prints the inverted value, for use with
12962           // BIC and MVN instructions. It is not useful otherwise but is
12963           // implemented for compatibility.
12964           if (ARM_AM::getT2SOImmVal(~CVal) != -1)
12965             break;
12966         } else {
12967           // A constant whose bitwise inverse can be used as an immediate
12968           // value in a data-processing instruction. This can be used in GCC
12969           // with a "B" modifier that prints the inverted value, for use with
12970           // BIC and MVN instructions. It is not useful otherwise but is
12971           // implemented for compatibility.
12972           if (ARM_AM::getSOImmVal(~CVal) != -1)
12973             break;
12974         }
12975         return;
12976 
12977       case 'L':
12978         if (Subtarget->isThumb1Only()) {
12979           // This must be a constant between -7 and 7,
12980           // for 3-operand ADD/SUB immediate instructions.
12981           if (CVal >= -7 && CVal < 7)
12982             break;
12983         } else if (Subtarget->isThumb2()) {
12984           // A constant whose negation can be used as an immediate value in a
12985           // data-processing instruction. This can be used in GCC with an "n"
12986           // modifier that prints the negated value, for use with SUB
12987           // instructions. It is not useful otherwise but is implemented for
12988           // compatibility.
12989           if (ARM_AM::getT2SOImmVal(-CVal) != -1)
12990             break;
12991         } else {
12992           // A constant whose negation can be used as an immediate value in a
12993           // data-processing instruction. This can be used in GCC with an "n"
12994           // modifier that prints the negated value, for use with SUB
12995           // instructions. It is not useful otherwise but is implemented for
12996           // compatibility.
12997           if (ARM_AM::getSOImmVal(-CVal) != -1)
12998             break;
12999         }
13000         return;
13001 
13002       case 'M':
13003         if (Subtarget->isThumb1Only()) {
13004           // This must be a multiple of 4 between 0 and 1020, for
13005           // ADD sp + immediate.
13006           if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
13007             break;
13008         } else {
13009           // A power of two or a constant between 0 and 32.  This is used in
13010           // GCC for the shift amount on shifted register operands, but it is
13011           // useful in general for any shift amounts.
13012           if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
13013             break;
13014         }
13015         return;
13016 
13017       case 'N':
13018         if (Subtarget->isThumb()) {  // FIXME thumb2
13019           // This must be a constant between 0 and 31, for shift amounts.
13020           if (CVal >= 0 && CVal <= 31)
13021             break;
13022         }
13023         return;
13024 
13025       case 'O':
13026         if (Subtarget->isThumb()) {  // FIXME thumb2
13027           // This must be a multiple of 4 between -508 and 508, for
13028           // ADD/SUB sp = sp + immediate.
13029           if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
13030             break;
13031         }
13032         return;
13033     }
13034     Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
13035     break;
13036   }
13037 
13038   if (Result.getNode()) {
13039     Ops.push_back(Result);
13040     return;
13041   }
13042   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
13043 }
13044 
13045 static RTLIB::Libcall getDivRemLibcall(
13046     const SDNode *N, MVT::SimpleValueType SVT) {
13047   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
13048           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
13049          "Unhandled Opcode in getDivRemLibcall");
13050   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
13051                   N->getOpcode() == ISD::SREM;
13052   RTLIB::Libcall LC;
13053   switch (SVT) {
13054   default: llvm_unreachable("Unexpected request for libcall!");
13055   case MVT::i8:  LC = isSigned ? RTLIB::SDIVREM_I8  : RTLIB::UDIVREM_I8;  break;
13056   case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
13057   case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
13058   case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
13059   }
13060   return LC;
13061 }
13062 
13063 static TargetLowering::ArgListTy getDivRemArgList(
13064     const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
13065   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
13066           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
13067          "Unhandled Opcode in getDivRemArgList");
13068   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
13069                   N->getOpcode() == ISD::SREM;
13070   TargetLowering::ArgListTy Args;
13071   TargetLowering::ArgListEntry Entry;
13072   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
13073     EVT ArgVT = N->getOperand(i).getValueType();
13074     Type *ArgTy = ArgVT.getTypeForEVT(*Context);
13075     Entry.Node = N->getOperand(i);
13076     Entry.Ty = ArgTy;
13077     Entry.IsSExt = isSigned;
13078     Entry.IsZExt = !isSigned;
13079     Args.push_back(Entry);
13080   }
13081   if (Subtarget->isTargetWindows() && Args.size() >= 2)
13082     std::swap(Args[0], Args[1]);
13083   return Args;
13084 }
13085 
13086 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
13087   assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
13088           Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
13089           Subtarget->isTargetWindows()) &&
13090          "Register-based DivRem lowering only");
13091   unsigned Opcode = Op->getOpcode();
13092   assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
13093          "Invalid opcode for Div/Rem lowering");
13094   bool isSigned = (Opcode == ISD::SDIVREM);
13095   EVT VT = Op->getValueType(0);
13096   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
13097   SDLoc dl(Op);
13098 
13099   // If the target has hardware divide, use divide + multiply + subtract:
13100   //     div = a / b
13101   //     rem = a - b * div
13102   //     return {div, rem}
13103   // This should be lowered into UDIV/SDIV + MLS later on.
13104   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
13105                                         : Subtarget->hasDivideInARMMode();
13106   if (hasDivide && Op->getValueType(0).isSimple() &&
13107       Op->getSimpleValueType(0) == MVT::i32) {
13108     unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
13109     const SDValue Dividend = Op->getOperand(0);
13110     const SDValue Divisor = Op->getOperand(1);
13111     SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
13112     SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
13113     SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
13114 
13115     SDValue Values[2] = {Div, Rem};
13116     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
13117   }
13118 
13119   RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
13120                                        VT.getSimpleVT().SimpleTy);
13121   SDValue InChain = DAG.getEntryNode();
13122 
13123   TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
13124                                                     DAG.getContext(),
13125                                                     Subtarget);
13126 
13127   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
13128                                          getPointerTy(DAG.getDataLayout()));
13129 
13130   Type *RetTy = StructType::get(Ty, Ty);
13131 
13132   if (Subtarget->isTargetWindows())
13133     InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);
13134 
13135   TargetLowering::CallLoweringInfo CLI(DAG);
13136   CLI.setDebugLoc(dl).setChain(InChain)
13137     .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
13138     .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
13139 
13140   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
13141   return CallInfo.first;
13142 }
13143 
13144 // Lowers REM using divmod helpers
13145 // see RTABI section 4.2/4.3
13146 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
13147   // Build return types (div and rem)
13148   std::vector<Type*> RetTyParams;
13149   Type *RetTyElement;
13150 
13151   switch (N->getValueType(0).getSimpleVT().SimpleTy) {
13152   default: llvm_unreachable("Unexpected request for libcall!");
13153   case MVT::i8:   RetTyElement = Type::getInt8Ty(*DAG.getContext());  break;
13154   case MVT::i16:  RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
13155   case MVT::i32:  RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
13156   case MVT::i64:  RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
13157   }
13158 
13159   RetTyParams.push_back(RetTyElement);
13160   RetTyParams.push_back(RetTyElement);
13161   ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
13162   Type *RetTy = StructType::get(*DAG.getContext(), ret);
13163 
13164   RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
13165                                                              SimpleTy);
13166   SDValue InChain = DAG.getEntryNode();
13167   TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
13168                                                     Subtarget);
13169   bool isSigned = N->getOpcode() == ISD::SREM;
13170   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
13171                                          getPointerTy(DAG.getDataLayout()));
13172 
13173   if (Subtarget->isTargetWindows())
13174     InChain = WinDBZCheckDenominator(DAG, N, InChain);
13175 
13176   // Lower call
13177   CallLoweringInfo CLI(DAG);
13178   CLI.setChain(InChain)
13179      .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
13180      .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
13181   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
13182 
13183   // Return second (rem) result operand (first contains div)
13184   SDNode *ResNode = CallResult.first.getNode();
13185   assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
13186   return ResNode->getOperand(1);
13187 }
13188 
13189 SDValue
13190 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
13191   assert(Subtarget->isTargetWindows() && "unsupported target platform");
13192   SDLoc DL(Op);
13193 
13194   // Get the inputs.
13195   SDValue Chain = Op.getOperand(0);
13196   SDValue Size  = Op.getOperand(1);
13197 
13198   SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
13199                               DAG.getConstant(2, DL, MVT::i32));
13200 
13201   SDValue Flag;
13202   Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
13203   Flag = Chain.getValue(1);
13204 
13205   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13206   Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);
13207 
13208   SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
13209   Chain = NewSP.getValue(1);
13210 
13211   SDValue Ops[2] = { NewSP, Chain };
13212   return DAG.getMergeValues(Ops, DL);
13213 }
13214 
13215 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
13216   assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() &&
13217          "Unexpected type for custom-lowering FP_EXTEND");
13218 
13219   RTLIB::Libcall LC;
13220   LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
13221 
13222   SDValue SrcVal = Op.getOperand(0);
13223   return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false,
13224                      SDLoc(Op)).first;
13225 }
13226 
13227 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
13228   assert(Op.getOperand(0).getValueType() == MVT::f64 &&
13229          Subtarget->isFPOnlySP() &&
13230          "Unexpected type for custom-lowering FP_ROUND");
13231 
13232   RTLIB::Libcall LC;
13233   LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
13234 
13235   SDValue SrcVal = Op.getOperand(0);
13236   return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false,
13237                      SDLoc(Op)).first;
13238 }
13239 
13240 bool
13241 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
13242   // The ARM target isn't yet aware of offsets.
13243   return false;
13244 }
13245 
13246 bool ARM::isBitFieldInvertedMask(unsigned v) {
13247   if (v == 0xffffffff)
13248     return false;
13249 
13250   // there can be 1's on either or both "outsides", all the "inside"
13251   // bits must be 0's
13252   return isShiftedMask_32(~v);
13253 }
13254 
13255 /// isFPImmLegal - Returns true if the target can instruction select the
13256 /// specified FP immediate natively. If false, the legalizer will
13257 /// materialize the FP immediate as a load from a constant pool.
13258 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
13259   if (!Subtarget->hasVFP3())
13260     return false;
13261   if (VT == MVT::f32)
13262     return ARM_AM::getFP32Imm(Imm) != -1;
13263   if (VT == MVT::f64 && !Subtarget->isFPOnlySP())
13264     return ARM_AM::getFP64Imm(Imm) != -1;
13265   return false;
13266 }
13267 
13268 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
13269 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
13270 /// specified in the intrinsic calls.
13271 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
13272                                            const CallInst &I,
13273                                            unsigned Intrinsic) const {
13274   switch (Intrinsic) {
13275   case Intrinsic::arm_neon_vld1:
13276   case Intrinsic::arm_neon_vld2:
13277   case Intrinsic::arm_neon_vld3:
13278   case Intrinsic::arm_neon_vld4:
13279   case Intrinsic::arm_neon_vld2lane:
13280   case Intrinsic::arm_neon_vld3lane:
13281   case Intrinsic::arm_neon_vld4lane: {
13282     Info.opc = ISD::INTRINSIC_W_CHAIN;
13283     // Conservatively set memVT to the entire set of vectors loaded.
13284     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
13285     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
13286     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
13287     Info.ptrVal = I.getArgOperand(0);
13288     Info.offset = 0;
13289     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
13290     Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
13291     Info.vol = false; // volatile loads with NEON intrinsics not supported
13292     Info.readMem = true;
13293     Info.writeMem = false;
13294     return true;
13295   }
13296   case Intrinsic::arm_neon_vst1:
13297   case Intrinsic::arm_neon_vst2:
13298   case Intrinsic::arm_neon_vst3:
13299   case Intrinsic::arm_neon_vst4:
13300   case Intrinsic::arm_neon_vst2lane:
13301   case Intrinsic::arm_neon_vst3lane:
13302   case Intrinsic::arm_neon_vst4lane: {
13303     Info.opc = ISD::INTRINSIC_VOID;
13304     // Conservatively set memVT to the entire set of vectors stored.
13305     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
13306     unsigned NumElts = 0;
13307     for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
13308       Type *ArgTy = I.getArgOperand(ArgI)->getType();
13309       if (!ArgTy->isVectorTy())
13310         break;
13311       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
13312     }
13313     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
13314     Info.ptrVal = I.getArgOperand(0);
13315     Info.offset = 0;
13316     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
13317     Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
13318     Info.vol = false; // volatile stores with NEON intrinsics not supported
13319     Info.readMem = false;
13320     Info.writeMem = true;
13321     return true;
13322   }
13323   case Intrinsic::arm_ldaex:
13324   case Intrinsic::arm_ldrex: {
13325     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
13326     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
13327     Info.opc = ISD::INTRINSIC_W_CHAIN;
13328     Info.memVT = MVT::getVT(PtrTy->getElementType());
13329     Info.ptrVal = I.getArgOperand(0);
13330     Info.offset = 0;
13331     Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
13332     Info.vol = true;
13333     Info.readMem = true;
13334     Info.writeMem = false;
13335     return true;
13336   }
13337   case Intrinsic::arm_stlex:
13338   case Intrinsic::arm_strex: {
13339     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
13340     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
13341     Info.opc = ISD::INTRINSIC_W_CHAIN;
13342     Info.memVT = MVT::getVT(PtrTy->getElementType());
13343     Info.ptrVal = I.getArgOperand(1);
13344     Info.offset = 0;
13345     Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
13346     Info.vol = true;
13347     Info.readMem = false;
13348     Info.writeMem = true;
13349     return true;
13350   }
13351   case Intrinsic::arm_stlexd:
13352   case Intrinsic::arm_strexd:
13353     Info.opc = ISD::INTRINSIC_W_CHAIN;
13354     Info.memVT = MVT::i64;
13355     Info.ptrVal = I.getArgOperand(2);
13356     Info.offset = 0;
13357     Info.align = 8;
13358     Info.vol = true;
13359     Info.readMem = false;
13360     Info.writeMem = true;
13361     return true;
13362 
13363   case Intrinsic::arm_ldaexd:
13364   case Intrinsic::arm_ldrexd:
13365     Info.opc = ISD::INTRINSIC_W_CHAIN;
13366     Info.memVT = MVT::i64;
13367     Info.ptrVal = I.getArgOperand(0);
13368     Info.offset = 0;
13369     Info.align = 8;
13370     Info.vol = true;
13371     Info.readMem = true;
13372     Info.writeMem = false;
13373     return true;
13374 
13375   default:
13376     break;
13377   }
13378 
13379   return false;
13380 }
13381 
13382 /// \brief Returns true if it is beneficial to convert a load of a constant
13383 /// to just the constant itself.
13384 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13385                                                           Type *Ty) const {
13386   assert(Ty->isIntegerTy());
13387 
13388   unsigned Bits = Ty->getPrimitiveSizeInBits();
13389   if (Bits == 0 || Bits > 32)
13390     return false;
13391   return true;
13392 }
13393 
13394 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT,
13395                                                 unsigned Index) const {
13396   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
13397     return false;
13398 
13399   return (Index == 0 || Index == ResVT.getVectorNumElements());
13400 }
13401 
13402 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
13403                                         ARM_MB::MemBOpt Domain) const {
13404   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13405 
13406   // First, if the target has no DMB, see what fallback we can use.
13407   if (!Subtarget->hasDataBarrier()) {
13408     // Some ARMv6 cpus can support data barriers with an mcr instruction.
13409     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
13410     // here.
13411     if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
13412       Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
13413       Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
13414                         Builder.getInt32(0), Builder.getInt32(7),
13415                         Builder.getInt32(10), Builder.getInt32(5)};
13416       return Builder.CreateCall(MCR, args);
13417     } else {
13418       // Instead of using barriers, atomic accesses on these subtargets use
13419       // libcalls.
13420       llvm_unreachable("makeDMB on a target so old that it has no barriers");
13421     }
13422   } else {
13423     Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
13424     // Only a full system barrier exists in the M-class architectures.
13425     Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
13426     Constant *CDomain = Builder.getInt32(Domain);
13427     return Builder.CreateCall(DMB, CDomain);
13428   }
13429 }
13430 
13431 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
13432 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
13433                                                  Instruction *Inst,
13434                                                  AtomicOrdering Ord) const {
13435   switch (Ord) {
13436   case AtomicOrdering::NotAtomic:
13437   case AtomicOrdering::Unordered:
13438     llvm_unreachable("Invalid fence: unordered/non-atomic");
13439   case AtomicOrdering::Monotonic:
13440   case AtomicOrdering::Acquire:
13441     return nullptr; // Nothing to do
13442   case AtomicOrdering::SequentiallyConsistent:
13443     if (!Inst->hasAtomicStore())
13444       return nullptr; // Nothing to do
13445     /*FALLTHROUGH*/
13446   case AtomicOrdering::Release:
13447   case AtomicOrdering::AcquireRelease:
13448     if (Subtarget->preferISHSTBarriers())
13449       return makeDMB(Builder, ARM_MB::ISHST);
13450     // FIXME: add a comment with a link to documentation justifying this.
13451     else
13452       return makeDMB(Builder, ARM_MB::ISH);
13453   }
13454   llvm_unreachable("Unknown fence ordering in emitLeadingFence");
13455 }
13456 
13457 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
13458                                                   Instruction *Inst,
13459                                                   AtomicOrdering Ord) const {
13460   switch (Ord) {
13461   case AtomicOrdering::NotAtomic:
13462   case AtomicOrdering::Unordered:
13463     llvm_unreachable("Invalid fence: unordered/not-atomic");
13464   case AtomicOrdering::Monotonic:
13465   case AtomicOrdering::Release:
13466     return nullptr; // Nothing to do
13467   case AtomicOrdering::Acquire:
13468   case AtomicOrdering::AcquireRelease:
13469   case AtomicOrdering::SequentiallyConsistent:
13470     return makeDMB(Builder, ARM_MB::ISH);
13471   }
13472   llvm_unreachable("Unknown fence ordering in emitTrailingFence");
13473 }
13474 
13475 // Loads and stores less than 64-bits are already atomic; ones above that
13476 // are doomed anyway, so defer to the default libcall and blame the OS when
13477 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
13478 // anything for those.
13479 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
13480   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
13481   return (Size == 64) && !Subtarget->isMClass();
13482 }
13483 
13484 // Loads and stores less than 64-bits are already atomic; ones above that
13485 // are doomed anyway, so defer to the default libcall and blame the OS when
13486 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
13487 // anything for those.
13488 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
13489 // guarantee, see DDI0406C ARM architecture reference manual,
13490 // sections A8.8.72-74 LDRD)
13491 TargetLowering::AtomicExpansionKind
13492 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
13493   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
13494   return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
13495                                                   : AtomicExpansionKind::None;
13496 }
13497 
13498 // For the real atomic operations, we have ldrex/strex up to 32 bits,
13499 // and up to 64 bits on the non-M profiles
13500 TargetLowering::AtomicExpansionKind
13501 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
13502   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
13503   bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
13504   return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
13505              ? AtomicExpansionKind::LLSC
13506              : AtomicExpansionKind::None;
13507 }
13508 
13509 bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(
13510     AtomicCmpXchgInst *AI) const {
13511   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
13512   // implement cmpxchg without spilling. If the address being exchanged is also
13513   // on the stack and close enough to the spill slot, this can lead to a
13514   // situation where the monitor always gets cleared and the atomic operation
13515   // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
13516   bool hasAtomicCmpXchg =
13517       !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
13518   return getTargetMachine().getOptLevel() != 0 && hasAtomicCmpXchg;
13519 }
13520 
13521 bool ARMTargetLowering::shouldInsertFencesForAtomic(
13522     const Instruction *I) const {
13523   return InsertFencesForAtomic;
13524 }
13525 
13526 // This has so far only been implemented for MachO.
13527 bool ARMTargetLowering::useLoadStackGuardNode() const {
13528   return Subtarget->isTargetMachO();
13529 }
13530 
13531 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
13532                                                   unsigned &Cost) const {
13533   // If we do not have NEON, vector types are not natively supported.
13534   if (!Subtarget->hasNEON())
13535     return false;
13536 
13537   // Floating point values and vector values map to the same register file.
13538   // Therefore, although we could do a store extract of a vector type, this is
13539   // better to leave at float as we have more freedom in the addressing mode for
13540   // those.
13541   if (VectorTy->isFPOrFPVectorTy())
13542     return false;
13543 
13544   // If the index is unknown at compile time, this is very expensive to lower
13545   // and it is not possible to combine the store with the extract.
13546   if (!isa<ConstantInt>(Idx))
13547     return false;
13548 
13549   assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
13550   unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth();
13551   // We can do a store + vector extract on any vector that fits perfectly in a D
13552   // or Q register.
13553   if (BitWidth == 64 || BitWidth == 128) {
13554     Cost = 0;
13555     return true;
13556   }
13557   return false;
13558 }
13559 
13560 bool ARMTargetLowering::isCheapToSpeculateCttz() const {
13561   return Subtarget->hasV6T2Ops();
13562 }
13563 
13564 bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
13565   return Subtarget->hasV6T2Ops();
13566 }
13567 
13568 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
13569                                          AtomicOrdering Ord) const {
13570   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13571   Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
13572   bool IsAcquire = isAcquireOrStronger(Ord);
13573 
13574   // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
13575   // intrinsic must return {i32, i32} and we have to recombine them into a
13576   // single i64 here.
13577   if (ValTy->getPrimitiveSizeInBits() == 64) {
13578     Intrinsic::ID Int =
13579         IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
13580     Function *Ldrex = Intrinsic::getDeclaration(M, Int);
13581 
13582     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
13583     Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
13584 
13585     Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
13586     Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
13587     if (!Subtarget->isLittle())
13588       std::swap (Lo, Hi);
13589     Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
13590     Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
13591     return Builder.CreateOr(
13592         Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
13593   }
13594 
13595   Type *Tys[] = { Addr->getType() };
13596   Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
13597   Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys);
13598 
13599   return Builder.CreateTruncOrBitCast(
13600       Builder.CreateCall(Ldrex, Addr),
13601       cast<PointerType>(Addr->getType())->getElementType());
13602 }
13603 
13604 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
13605     IRBuilder<> &Builder) const {
13606   if (!Subtarget->hasV7Ops())
13607     return;
13608   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13609   Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
13610 }
13611 
13612 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
13613                                                Value *Addr,
13614                                                AtomicOrdering Ord) const {
13615   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13616   bool IsRelease = isReleaseOrStronger(Ord);
13617 
13618   // Since the intrinsics must have legal type, the i64 intrinsics take two
13619   // parameters: "i32, i32". We must marshal Val into the appropriate form
13620   // before the call.
13621   if (Val->getType()->getPrimitiveSizeInBits() == 64) {
13622     Intrinsic::ID Int =
13623         IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
13624     Function *Strex = Intrinsic::getDeclaration(M, Int);
13625     Type *Int32Ty = Type::getInt32Ty(M->getContext());
13626 
13627     Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
13628     Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
13629     if (!Subtarget->isLittle())
13630       std::swap (Lo, Hi);
13631     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
13632     return Builder.CreateCall(Strex, {Lo, Hi, Addr});
13633   }
13634 
13635   Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
13636   Type *Tys[] = { Addr->getType() };
13637   Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
13638 
13639   return Builder.CreateCall(
13640       Strex, {Builder.CreateZExtOrBitCast(
13641                   Val, Strex->getFunctionType()->getParamType(0)),
13642               Addr});
13643 }
13644 
13645 /// A helper function for determining the number of interleaved accesses we
13646 /// will generate when lowering accesses of the given type.
13647 unsigned
13648 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
13649                                              const DataLayout &DL) const {
13650   return (DL.getTypeSizeInBits(VecTy) + 127) / 128;
13651 }
13652 
13653 bool ARMTargetLowering::isLegalInterleavedAccessType(
13654     VectorType *VecTy, const DataLayout &DL) const {
13655 
13656   unsigned VecSize = DL.getTypeSizeInBits(VecTy);
13657   unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
13658 
13659   // Ensure the vector doesn't have f16 elements. Even though we could do an
13660   // i16 vldN, we can't hold the f16 vectors and will end up converting via
13661   // f32.
13662   if (VecTy->getElementType()->isHalfTy())
13663     return false;
13664 
13665   // Ensure the number of vector elements is greater than 1.
13666   if (VecTy->getNumElements() < 2)
13667     return false;
13668 
13669   // Ensure the element type is legal.
13670   if (ElSize != 8 && ElSize != 16 && ElSize != 32)
13671     return false;
13672 
13673   // Ensure the total vector size is 64 or a multiple of 128. Types larger than
13674   // 128 will be split into multiple interleaved accesses.
13675   return VecSize == 64 || VecSize % 128 == 0;
13676 }
13677 
13678 /// \brief Lower an interleaved load into a vldN intrinsic.
13679 ///
13680 /// E.g. Lower an interleaved load (Factor = 2):
13681 ///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
13682 ///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
13683 ///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
13684 ///
13685 ///      Into:
13686 ///        %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
13687 ///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
13688 ///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
13689 bool ARMTargetLowering::lowerInterleavedLoad(
13690     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
13691     ArrayRef<unsigned> Indices, unsigned Factor) const {
13692   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13693          "Invalid interleave factor");
13694   assert(!Shuffles.empty() && "Empty shufflevector input");
13695   assert(Shuffles.size() == Indices.size() &&
13696          "Unmatched number of shufflevectors and indices");
13697 
13698   VectorType *VecTy = Shuffles[0]->getType();
13699   Type *EltTy = VecTy->getVectorElementType();
13700 
13701   const DataLayout &DL = LI->getModule()->getDataLayout();
13702 
13703   // Skip if we do not have NEON and skip illegal vector types. We can
13704   // "legalize" wide vector types into multiple interleaved accesses as long as
13705   // the vector types are divisible by 128.
13706   if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VecTy, DL))
13707     return false;
13708 
13709   unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL);
13710 
13711   // A pointer vector can not be the return type of the ldN intrinsics. Need to
13712   // load integer vectors first and then convert to pointer vectors.
13713   if (EltTy->isPointerTy())
13714     VecTy =
13715         VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
13716 
13717   IRBuilder<> Builder(LI);
13718 
13719   // The base address of the load.
13720   Value *BaseAddr = LI->getPointerOperand();
13721 
13722   if (NumLoads > 1) {
13723     // If we're going to generate more than one load, reset the sub-vector type
13724     // to something legal.
13725     VecTy = VectorType::get(VecTy->getVectorElementType(),
13726                             VecTy->getVectorNumElements() / NumLoads);
13727 
13728     // We will compute the pointer operand of each load from the original base
13729     // address using GEPs. Cast the base address to a pointer to the scalar
13730     // element type.
13731     BaseAddr = Builder.CreateBitCast(
13732         BaseAddr, VecTy->getVectorElementType()->getPointerTo(
13733                       LI->getPointerAddressSpace()));
13734   }
13735 
13736   assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!");
13737 
13738   Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
13739   Type *Tys[] = {VecTy, Int8Ptr};
13740   static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
13741                                             Intrinsic::arm_neon_vld3,
13742                                             Intrinsic::arm_neon_vld4};
13743   Function *VldnFunc =
13744       Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
13745 
13746   // Holds sub-vectors extracted from the load intrinsic return values. The
13747   // sub-vectors are associated with the shufflevector instructions they will
13748   // replace.
13749   DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
13750 
13751   for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
13752 
13753     // If we're generating more than one load, compute the base address of
13754     // subsequent loads as an offset from the previous.
13755     if (LoadCount > 0)
13756       BaseAddr = Builder.CreateConstGEP1_32(
13757           BaseAddr, VecTy->getVectorNumElements() * Factor);
13758 
13759     SmallVector<Value *, 2> Ops;
13760     Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
13761     Ops.push_back(Builder.getInt32(LI->getAlignment()));
13762 
13763     CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN");
13764 
13765     // Replace uses of each shufflevector with the corresponding vector loaded
13766     // by ldN.
13767     for (unsigned i = 0; i < Shuffles.size(); i++) {
13768       ShuffleVectorInst *SV = Shuffles[i];
13769       unsigned Index = Indices[i];
13770 
13771       Value *SubVec = Builder.CreateExtractValue(VldN, Index);
13772 
13773       // Convert the integer vector to pointer vector if the element is pointer.
13774       if (EltTy->isPointerTy())
13775         SubVec = Builder.CreateIntToPtr(SubVec, SV->getType());
13776 
13777       SubVecs[SV].push_back(SubVec);
13778     }
13779   }
13780 
13781   // Replace uses of the shufflevector instructions with the sub-vectors
13782   // returned by the load intrinsic. If a shufflevector instruction is
13783   // associated with more than one sub-vector, those sub-vectors will be
13784   // concatenated into a single wide vector.
13785   for (ShuffleVectorInst *SVI : Shuffles) {
13786     auto &SubVec = SubVecs[SVI];
13787     auto *WideVec =
13788         SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
13789     SVI->replaceAllUsesWith(WideVec);
13790   }
13791 
13792   return true;
13793 }
13794 
13795 /// \brief Lower an interleaved store into a vstN intrinsic.
13796 ///
13797 /// E.g. Lower an interleaved store (Factor = 3):
13798 ///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
13799 ///                                  <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
13800 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
13801 ///
13802 ///      Into:
13803 ///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
13804 ///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
13805 ///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
13806 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
13807 ///
13808 /// Note that the new shufflevectors will be removed and we'll only generate one
13809 /// vst3 instruction in CodeGen.
13810 ///
13811 /// Example for a more general valid mask (Factor 3). Lower:
13812 ///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
13813 ///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
13814 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
13815 ///
13816 ///      Into:
13817 ///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
13818 ///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
13819 ///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
13820 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
13821 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
13822                                               ShuffleVectorInst *SVI,
13823                                               unsigned Factor) const {
13824   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13825          "Invalid interleave factor");
13826 
13827   VectorType *VecTy = SVI->getType();
13828   assert(VecTy->getVectorNumElements() % Factor == 0 &&
13829          "Invalid interleaved store");
13830 
13831   unsigned LaneLen = VecTy->getVectorNumElements() / Factor;
13832   Type *EltTy = VecTy->getVectorElementType();
13833   VectorType *SubVecTy = VectorType::get(EltTy, LaneLen);
13834 
13835   const DataLayout &DL = SI->getModule()->getDataLayout();
13836 
13837   // Skip if we do not have NEON and skip illegal vector types. We can
13838   // "legalize" wide vector types into multiple interleaved accesses as long as
13839   // the vector types are divisible by 128.
13840   if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL))
13841     return false;
13842 
13843   unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL);
13844 
13845   Value *Op0 = SVI->getOperand(0);
13846   Value *Op1 = SVI->getOperand(1);
13847   IRBuilder<> Builder(SI);
13848 
13849   // StN intrinsics don't support pointer vectors as arguments. Convert pointer
13850   // vectors to integer vectors.
13851   if (EltTy->isPointerTy()) {
13852     Type *IntTy = DL.getIntPtrType(EltTy);
13853 
13854     // Convert to the corresponding integer vector.
13855     Type *IntVecTy =
13856         VectorType::get(IntTy, Op0->getType()->getVectorNumElements());
13857     Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
13858     Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
13859 
13860     SubVecTy = VectorType::get(IntTy, LaneLen);
13861   }
13862 
13863   // The base address of the store.
13864   Value *BaseAddr = SI->getPointerOperand();
13865 
13866   if (NumStores > 1) {
13867     // If we're going to generate more than one store, reset the lane length
13868     // and sub-vector type to something legal.
13869     LaneLen /= NumStores;
13870     SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen);
13871 
13872     // We will compute the pointer operand of each store from the original base
13873     // address using GEPs. Cast the base address to a pointer to the scalar
13874     // element type.
13875     BaseAddr = Builder.CreateBitCast(
13876         BaseAddr, SubVecTy->getVectorElementType()->getPointerTo(
13877                       SI->getPointerAddressSpace()));
13878   }
13879 
13880   assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");
13881 
13882   auto Mask = SVI->getShuffleMask();
13883 
13884   Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
13885   Type *Tys[] = {Int8Ptr, SubVecTy};
13886   static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
13887                                              Intrinsic::arm_neon_vst3,
13888                                              Intrinsic::arm_neon_vst4};
13889 
13890   for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
13891 
13892     // If we generating more than one store, we compute the base address of
13893     // subsequent stores as an offset from the previous.
13894     if (StoreCount > 0)
13895       BaseAddr = Builder.CreateConstGEP1_32(BaseAddr, LaneLen * Factor);
13896 
13897     SmallVector<Value *, 6> Ops;
13898     Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
13899 
13900     Function *VstNFunc =
13901         Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys);
13902 
13903     // Split the shufflevector operands into sub vectors for the new vstN call.
13904     for (unsigned i = 0; i < Factor; i++) {
13905       unsigned IdxI = StoreCount * LaneLen * Factor + i;
13906       if (Mask[IdxI] >= 0) {
13907         Ops.push_back(Builder.CreateShuffleVector(
13908             Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0)));
13909       } else {
13910         unsigned StartMask = 0;
13911         for (unsigned j = 1; j < LaneLen; j++) {
13912           unsigned IdxJ = StoreCount * LaneLen * Factor + j;
13913           if (Mask[IdxJ * Factor + IdxI] >= 0) {
13914             StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
13915             break;
13916           }
13917         }
13918         // Note: If all elements in a chunk are undefs, StartMask=0!
13919         // Note: Filling undef gaps with random elements is ok, since
13920         // those elements were being written anyway (with undefs).
13921         // In the case of all undefs we're defaulting to using elems from 0
13922         // Note: StartMask cannot be negative, it's checked in
13923         // isReInterleaveMask
13924         Ops.push_back(Builder.CreateShuffleVector(
13925             Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0)));
13926       }
13927     }
13928 
13929     Ops.push_back(Builder.getInt32(SI->getAlignment()));
13930     Builder.CreateCall(VstNFunc, Ops);
13931   }
13932   return true;
13933 }
13934 
13935 enum HABaseType {
13936   HA_UNKNOWN = 0,
13937   HA_FLOAT,
13938   HA_DOUBLE,
13939   HA_VECT64,
13940   HA_VECT128
13941 };
13942 
13943 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
13944                                    uint64_t &Members) {
13945   if (auto *ST = dyn_cast<StructType>(Ty)) {
13946     for (unsigned i = 0; i < ST->getNumElements(); ++i) {
13947       uint64_t SubMembers = 0;
13948       if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
13949         return false;
13950       Members += SubMembers;
13951     }
13952   } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
13953     uint64_t SubMembers = 0;
13954     if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
13955       return false;
13956     Members += SubMembers * AT->getNumElements();
13957   } else if (Ty->isFloatTy()) {
13958     if (Base != HA_UNKNOWN && Base != HA_FLOAT)
13959       return false;
13960     Members = 1;
13961     Base = HA_FLOAT;
13962   } else if (Ty->isDoubleTy()) {
13963     if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
13964       return false;
13965     Members = 1;
13966     Base = HA_DOUBLE;
13967   } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
13968     Members = 1;
13969     switch (Base) {
13970     case HA_FLOAT:
13971     case HA_DOUBLE:
13972       return false;
13973     case HA_VECT64:
13974       return VT->getBitWidth() == 64;
13975     case HA_VECT128:
13976       return VT->getBitWidth() == 128;
13977     case HA_UNKNOWN:
13978       switch (VT->getBitWidth()) {
13979       case 64:
13980         Base = HA_VECT64;
13981         return true;
13982       case 128:
13983         Base = HA_VECT128;
13984         return true;
13985       default:
13986         return false;
13987       }
13988     }
13989   }
13990 
13991   return (Members > 0 && Members <= 4);
13992 }
13993 
13994 /// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
13995 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
13996 /// passing according to AAPCS rules.
13997 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
13998     Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
13999   if (getEffectiveCallingConv(CallConv, isVarArg) !=
14000       CallingConv::ARM_AAPCS_VFP)
14001     return false;
14002 
14003   HABaseType Base = HA_UNKNOWN;
14004   uint64_t Members = 0;
14005   bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
14006   DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
14007 
14008   bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
14009   return IsHA || IsIntArray;
14010 }
14011 
14012 unsigned ARMTargetLowering::getExceptionPointerRegister(
14013     const Constant *PersonalityFn) const {
14014   // Platforms which do not use SjLj EH may return values in these registers
14015   // via the personality function.
14016   return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0;
14017 }
14018 
14019 unsigned ARMTargetLowering::getExceptionSelectorRegister(
14020     const Constant *PersonalityFn) const {
14021   // Platforms which do not use SjLj EH may return values in these registers
14022   // via the personality function.
14023   return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1;
14024 }
14025 
14026 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
14027   // Update IsSplitCSR in ARMFunctionInfo.
14028   ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
14029   AFI->setIsSplitCSR(true);
14030 }
14031 
14032 void ARMTargetLowering::insertCopiesSplitCSR(
14033     MachineBasicBlock *Entry,
14034     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
14035   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
14036   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
14037   if (!IStart)
14038     return;
14039 
14040   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
14041   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
14042   MachineBasicBlock::iterator MBBI = Entry->begin();
14043   for (const MCPhysReg *I = IStart; *I; ++I) {
14044     const TargetRegisterClass *RC = nullptr;
14045     if (ARM::GPRRegClass.contains(*I))
14046       RC = &ARM::GPRRegClass;
14047     else if (ARM::DPRRegClass.contains(*I))
14048       RC = &ARM::DPRRegClass;
14049     else
14050       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
14051 
14052     unsigned NewVR = MRI->createVirtualRegister(RC);
14053     // Create copy from CSR to a virtual register.
14054     // FIXME: this currently does not emit CFI pseudo-instructions, it works
14055     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
14056     // nounwind. If we want to generalize this later, we may need to emit
14057     // CFI pseudo-instructions.
14058     assert(Entry->getParent()->getFunction()->hasFnAttribute(
14059                Attribute::NoUnwind) &&
14060            "Function should be nounwind in insertCopiesSplitCSR!");
14061     Entry->addLiveIn(*I);
14062     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
14063         .addReg(*I);
14064 
14065     // Insert the copy-back instructions right before the terminator.
14066     for (auto *Exit : Exits)
14067       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
14068               TII->get(TargetOpcode::COPY), *I)
14069           .addReg(NewVR);
14070   }
14071 }
14072 
14073 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
14074   MF.getFrameInfo().computeMaxCallFrameSize(MF);
14075   TargetLoweringBase::finalizeLowering(MF);
14076 }
14077