1 //===- ARMFastISel.cpp - ARM FastISel implementation ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the ARM-specific support for the FastISel class. Some
10 // of the target-specific code is generated by tablegen in the file
11 // ARMGenFastISel.inc, which is #included here.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "ARM.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMCallingConv.h"
19 #include "ARMConstantPoolValue.h"
20 #include "ARMISelLowering.h"
21 #include "ARMMachineFunctionInfo.h"
22 #include "ARMSubtarget.h"
23 #include "MCTargetDesc/ARMAddressingModes.h"
24 #include "MCTargetDesc/ARMBaseInfo.h"
25 #include "Utils/ARMBaseInfo.h"
26 #include "llvm/ADT/APFloat.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/CodeGen/CallingConvLower.h"
31 #include "llvm/CodeGen/FastISel.h"
32 #include "llvm/CodeGen/FunctionLoweringInfo.h"
33 #include "llvm/CodeGen/ISDOpcodes.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineConstantPool.h"
36 #include "llvm/CodeGen/MachineFrameInfo.h"
37 #include "llvm/CodeGen/MachineFunction.h"
38 #include "llvm/CodeGen/MachineInstr.h"
39 #include "llvm/CodeGen/MachineInstrBuilder.h"
40 #include "llvm/CodeGen/MachineMemOperand.h"
41 #include "llvm/CodeGen/MachineOperand.h"
42 #include "llvm/CodeGen/MachineRegisterInfo.h"
43 #include "llvm/CodeGen/RuntimeLibcalls.h"
44 #include "llvm/CodeGen/TargetInstrInfo.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/CodeGen/TargetOpcodes.h"
47 #include "llvm/CodeGen/TargetRegisterInfo.h"
48 #include "llvm/CodeGen/ValueTypes.h"
49 #include "llvm/IR/Argument.h"
50 #include "llvm/IR/Attributes.h"
51 #include "llvm/IR/CallingConv.h"
52 #include "llvm/IR/Constant.h"
53 #include "llvm/IR/Constants.h"
54 #include "llvm/IR/DataLayout.h"
55 #include "llvm/IR/DerivedTypes.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GetElementPtrTypeIterator.h"
58 #include "llvm/IR/GlobalValue.h"
59 #include "llvm/IR/GlobalVariable.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/IR/User.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/MC/MCInstrDesc.h"
71 #include "llvm/MC/MCRegisterInfo.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/Compiler.h"
74 #include "llvm/Support/ErrorHandling.h"
75 #include "llvm/Support/MachineValueType.h"
76 #include "llvm/Support/MathExtras.h"
77 #include "llvm/Target/TargetMachine.h"
78 #include "llvm/Target/TargetOptions.h"
79 #include <cassert>
80 #include <cstdint>
81 #include <utility>
82 
83 using namespace llvm;
84 
85 namespace {
86 
87   // All possible address modes, plus some.
88   struct Address {
89     enum {
90       RegBase,
91       FrameIndexBase
92     } BaseType = RegBase;
93 
94     union {
95       unsigned Reg;
96       int FI;
97     } Base;
98 
99     int Offset = 0;
100 
101     // Innocuous defaults for our address.
102     Address() {
103       Base.Reg = 0;
104     }
105   };
106 
107 class ARMFastISel final : public FastISel {
108   /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
109   /// make the right decision when generating code for different targets.
110   const ARMSubtarget *Subtarget;
111   Module &M;
112   const TargetMachine &TM;
113   const TargetInstrInfo &TII;
114   const TargetLowering &TLI;
115   ARMFunctionInfo *AFI;
116 
117   // Convenience variables to avoid some queries.
118   bool isThumb2;
119   LLVMContext *Context;
120 
121   public:
122     explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
123                          const TargetLibraryInfo *libInfo)
124         : FastISel(funcInfo, libInfo),
125           Subtarget(
126               &static_cast<const ARMSubtarget &>(funcInfo.MF->getSubtarget())),
127           M(const_cast<Module &>(*funcInfo.Fn->getParent())),
128           TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()),
129           TLI(*Subtarget->getTargetLowering()) {
130       AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
131       isThumb2 = AFI->isThumbFunction();
132       Context = &funcInfo.Fn->getContext();
133     }
134 
135   private:
136     // Code from FastISel.cpp.
137 
138     unsigned fastEmitInst_r(unsigned MachineInstOpcode,
139                             const TargetRegisterClass *RC,
140                             unsigned Op0, bool Op0IsKill);
141     unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
142                              const TargetRegisterClass *RC,
143                              unsigned Op0, bool Op0IsKill,
144                              unsigned Op1, bool Op1IsKill);
145     unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
146                              const TargetRegisterClass *RC,
147                              unsigned Op0, bool Op0IsKill,
148                              uint64_t Imm);
149     unsigned fastEmitInst_i(unsigned MachineInstOpcode,
150                             const TargetRegisterClass *RC,
151                             uint64_t Imm);
152 
153     // Backend specific FastISel code.
154 
155     bool fastSelectInstruction(const Instruction *I) override;
156     unsigned fastMaterializeConstant(const Constant *C) override;
157     unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
158     bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
159                              const LoadInst *LI) override;
160     bool fastLowerArguments() override;
161 
162   #include "ARMGenFastISel.inc"
163 
164     // Instruction selection routines.
165 
166     bool SelectLoad(const Instruction *I);
167     bool SelectStore(const Instruction *I);
168     bool SelectBranch(const Instruction *I);
169     bool SelectIndirectBr(const Instruction *I);
170     bool SelectCmp(const Instruction *I);
171     bool SelectFPExt(const Instruction *I);
172     bool SelectFPTrunc(const Instruction *I);
173     bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
174     bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);
175     bool SelectIToFP(const Instruction *I, bool isSigned);
176     bool SelectFPToI(const Instruction *I, bool isSigned);
177     bool SelectDiv(const Instruction *I, bool isSigned);
178     bool SelectRem(const Instruction *I, bool isSigned);
179     bool SelectCall(const Instruction *I, const char *IntrMemName);
180     bool SelectIntrinsicCall(const IntrinsicInst &I);
181     bool SelectSelect(const Instruction *I);
182     bool SelectRet(const Instruction *I);
183     bool SelectTrunc(const Instruction *I);
184     bool SelectIntExt(const Instruction *I);
185     bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy);
186 
187     // Utility routines.
188 
189     bool isPositionIndependent() const;
190     bool isTypeLegal(Type *Ty, MVT &VT);
191     bool isLoadTypeLegal(Type *Ty, MVT &VT);
192     bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
193                     bool isZExt);
194     bool ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
195                      unsigned Alignment = 0, bool isZExt = true,
196                      bool allocReg = true);
197     bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
198                       unsigned Alignment = 0);
199     bool ARMComputeAddress(const Value *Obj, Address &Addr);
200     void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3);
201     bool ARMIsMemCpySmall(uint64_t Len);
202     bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
203                                unsigned Alignment);
204     unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
205     unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT);
206     unsigned ARMMaterializeInt(const Constant *C, MVT VT);
207     unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT);
208     unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg);
209     unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg);
210     unsigned ARMSelectCallOp(bool UseReg);
211     unsigned ARMLowerPICELF(const GlobalValue *GV, MVT VT);
212 
213     const TargetLowering *getTargetLowering() { return &TLI; }
214 
215     // Call handling routines.
216 
217     CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
218                                   bool Return,
219                                   bool isVarArg);
220     bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
221                          SmallVectorImpl<Register> &ArgRegs,
222                          SmallVectorImpl<MVT> &ArgVTs,
223                          SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
224                          SmallVectorImpl<Register> &RegArgs,
225                          CallingConv::ID CC,
226                          unsigned &NumBytes,
227                          bool isVarArg);
228     unsigned getLibcallReg(const Twine &Name);
229     bool FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
230                     const Instruction *I, CallingConv::ID CC,
231                     unsigned &NumBytes, bool isVarArg);
232     bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
233 
234     // OptionalDef handling routines.
235 
236     bool isARMNEONPred(const MachineInstr *MI);
237     bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
238     const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
239     void AddLoadStoreOperands(MVT VT, Address &Addr,
240                               const MachineInstrBuilder &MIB,
241                               MachineMemOperand::Flags Flags, bool useAM3);
242 };
243 
244 } // end anonymous namespace
245 
246 // DefinesOptionalPredicate - This is different from DefinesPredicate in that
247 // we don't care about implicit defs here, just places we'll need to add a
248 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
249 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
250   if (!MI->hasOptionalDef())
251     return false;
252 
253   // Look to see if our OptionalDef is defining CPSR or CCR.
254   for (const MachineOperand &MO : MI->operands()) {
255     if (!MO.isReg() || !MO.isDef()) continue;
256     if (MO.getReg() == ARM::CPSR)
257       *CPSR = true;
258   }
259   return true;
260 }
261 
262 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
263   const MCInstrDesc &MCID = MI->getDesc();
264 
265   // If we're a thumb2 or not NEON function we'll be handled via isPredicable.
266   if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
267        AFI->isThumb2Function())
268     return MI->isPredicable();
269 
270   for (const MCOperandInfo &opInfo : MCID.operands())
271     if (opInfo.isPredicate())
272       return true;
273 
274   return false;
275 }
276 
277 // If the machine is predicable go ahead and add the predicate operands, if
278 // it needs default CC operands add those.
279 // TODO: If we want to support thumb1 then we'll need to deal with optional
280 // CPSR defs that need to be added before the remaining operands. See s_cc_out
281 // for descriptions why.
282 const MachineInstrBuilder &
283 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
284   MachineInstr *MI = &*MIB;
285 
286   // Do we use a predicate? or...
287   // Are we NEON in ARM mode and have a predicate operand? If so, I know
288   // we're not predicable but add it anyways.
289   if (isARMNEONPred(MI))
290     MIB.add(predOps(ARMCC::AL));
291 
292   // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
293   // defines CPSR. All other OptionalDefines in ARM are the CCR register.
294   bool CPSR = false;
295   if (DefinesOptionalPredicate(MI, &CPSR))
296     MIB.add(CPSR ? t1CondCodeOp() : condCodeOp());
297   return MIB;
298 }
299 
300 unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
301                                      const TargetRegisterClass *RC,
302                                      unsigned Op0, bool Op0IsKill) {
303   Register ResultReg = createResultReg(RC);
304   const MCInstrDesc &II = TII.get(MachineInstOpcode);
305 
306   // Make sure the input operand is sufficiently constrained to be legal
307   // for this instruction.
308   Op0 = constrainOperandRegClass(II, Op0, 1);
309   if (II.getNumDefs() >= 1) {
310     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
311                             ResultReg).addReg(Op0, Op0IsKill * RegState::Kill));
312   } else {
313     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
314                    .addReg(Op0, Op0IsKill * RegState::Kill));
315     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
316                    TII.get(TargetOpcode::COPY), ResultReg)
317                    .addReg(II.ImplicitDefs[0]));
318   }
319   return ResultReg;
320 }
321 
322 unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
323                                       const TargetRegisterClass *RC,
324                                       unsigned Op0, bool Op0IsKill,
325                                       unsigned Op1, bool Op1IsKill) {
326   unsigned ResultReg = createResultReg(RC);
327   const MCInstrDesc &II = TII.get(MachineInstOpcode);
328 
329   // Make sure the input operands are sufficiently constrained to be legal
330   // for this instruction.
331   Op0 = constrainOperandRegClass(II, Op0, 1);
332   Op1 = constrainOperandRegClass(II, Op1, 2);
333 
334   if (II.getNumDefs() >= 1) {
335     AddOptionalDefs(
336         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
337             .addReg(Op0, Op0IsKill * RegState::Kill)
338             .addReg(Op1, Op1IsKill * RegState::Kill));
339   } else {
340     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
341                    .addReg(Op0, Op0IsKill * RegState::Kill)
342                    .addReg(Op1, Op1IsKill * RegState::Kill));
343     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
344                            TII.get(TargetOpcode::COPY), ResultReg)
345                    .addReg(II.ImplicitDefs[0]));
346   }
347   return ResultReg;
348 }
349 
350 unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
351                                       const TargetRegisterClass *RC,
352                                       unsigned Op0, bool Op0IsKill,
353                                       uint64_t Imm) {
354   unsigned ResultReg = createResultReg(RC);
355   const MCInstrDesc &II = TII.get(MachineInstOpcode);
356 
357   // Make sure the input operand is sufficiently constrained to be legal
358   // for this instruction.
359   Op0 = constrainOperandRegClass(II, Op0, 1);
360   if (II.getNumDefs() >= 1) {
361     AddOptionalDefs(
362         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
363             .addReg(Op0, Op0IsKill * RegState::Kill)
364             .addImm(Imm));
365   } else {
366     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
367                    .addReg(Op0, Op0IsKill * RegState::Kill)
368                    .addImm(Imm));
369     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
370                            TII.get(TargetOpcode::COPY), ResultReg)
371                    .addReg(II.ImplicitDefs[0]));
372   }
373   return ResultReg;
374 }
375 
376 unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
377                                      const TargetRegisterClass *RC,
378                                      uint64_t Imm) {
379   unsigned ResultReg = createResultReg(RC);
380   const MCInstrDesc &II = TII.get(MachineInstOpcode);
381 
382   if (II.getNumDefs() >= 1) {
383     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
384                             ResultReg).addImm(Imm));
385   } else {
386     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
387                    .addImm(Imm));
388     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
389                            TII.get(TargetOpcode::COPY), ResultReg)
390                    .addReg(II.ImplicitDefs[0]));
391   }
392   return ResultReg;
393 }
394 
395 // TODO: Don't worry about 64-bit now, but when this is fixed remove the
396 // checks from the various callers.
397 unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
398   if (VT == MVT::f64) return 0;
399 
400   unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
401   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
402                           TII.get(ARM::VMOVSR), MoveReg)
403                   .addReg(SrcReg));
404   return MoveReg;
405 }
406 
407 unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
408   if (VT == MVT::i64) return 0;
409 
410   unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
411   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
412                           TII.get(ARM::VMOVRS), MoveReg)
413                   .addReg(SrcReg));
414   return MoveReg;
415 }
416 
417 // For double width floating point we need to materialize two constants
418 // (the high and the low) into integer registers then use a move to get
419 // the combined constant into an FP reg.
420 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
421   const APFloat Val = CFP->getValueAPF();
422   bool is64bit = VT == MVT::f64;
423 
424   // This checks to see if we can use VFP3 instructions to materialize
425   // a constant, otherwise we have to go through the constant pool.
426   if (TLI.isFPImmLegal(Val, VT)) {
427     int Imm;
428     unsigned Opc;
429     if (is64bit) {
430       Imm = ARM_AM::getFP64Imm(Val);
431       Opc = ARM::FCONSTD;
432     } else {
433       Imm = ARM_AM::getFP32Imm(Val);
434       Opc = ARM::FCONSTS;
435     }
436     unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
437     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
438                             TII.get(Opc), DestReg).addImm(Imm));
439     return DestReg;
440   }
441 
442   // Require VFP2 for loading fp constants.
443   if (!Subtarget->hasVFP2Base()) return false;
444 
445   // MachineConstantPool wants an explicit alignment.
446   unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
447   if (Align == 0) {
448     // TODO: Figure out if this is correct.
449     Align = DL.getTypeAllocSize(CFP->getType());
450   }
451   unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
452   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
453   unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
454 
455   // The extra reg is for addrmode5.
456   AddOptionalDefs(
457       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
458           .addConstantPoolIndex(Idx)
459           .addReg(0));
460   return DestReg;
461 }
462 
463 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
464   if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
465     return 0;
466 
467   // If we can do this in a single instruction without a constant pool entry
468   // do so now.
469   const ConstantInt *CI = cast<ConstantInt>(C);
470   if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
471     unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
472     const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
473       &ARM::GPRRegClass;
474     unsigned ImmReg = createResultReg(RC);
475     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
476                             TII.get(Opc), ImmReg)
477                     .addImm(CI->getZExtValue()));
478     return ImmReg;
479   }
480 
481   // Use MVN to emit negative constants.
482   if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
483     unsigned Imm = (unsigned)~(CI->getSExtValue());
484     bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
485       (ARM_AM::getSOImmVal(Imm) != -1);
486     if (UseImm) {
487       unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
488       const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
489                                                  &ARM::GPRRegClass;
490       unsigned ImmReg = createResultReg(RC);
491       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
492                               TII.get(Opc), ImmReg)
493                       .addImm(Imm));
494       return ImmReg;
495     }
496   }
497 
498   unsigned ResultReg = 0;
499   if (Subtarget->useMovt())
500     ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
501 
502   if (ResultReg)
503     return ResultReg;
504 
505   // Load from constant pool.  For now 32-bit only.
506   if (VT != MVT::i32)
507     return 0;
508 
509   // MachineConstantPool wants an explicit alignment.
510   unsigned Align = DL.getPrefTypeAlignment(C->getType());
511   if (Align == 0) {
512     // TODO: Figure out if this is correct.
513     Align = DL.getTypeAllocSize(C->getType());
514   }
515   unsigned Idx = MCP.getConstantPoolIndex(C, Align);
516   ResultReg = createResultReg(TLI.getRegClassFor(VT));
517   if (isThumb2)
518     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
519                             TII.get(ARM::t2LDRpci), ResultReg)
520                       .addConstantPoolIndex(Idx));
521   else {
522     // The extra immediate is for addrmode2.
523     ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0);
524     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
525                             TII.get(ARM::LDRcp), ResultReg)
526                       .addConstantPoolIndex(Idx)
527                       .addImm(0));
528   }
529   return ResultReg;
530 }
531 
532 bool ARMFastISel::isPositionIndependent() const {
533   return TLI.isPositionIndependent();
534 }
535 
536 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
537   // For now 32-bit only.
538   if (VT != MVT::i32 || GV->isThreadLocal()) return 0;
539 
540   // ROPI/RWPI not currently supported.
541   if (Subtarget->isROPI() || Subtarget->isRWPI())
542     return 0;
543 
544   bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
545   const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
546                                            : &ARM::GPRRegClass;
547   unsigned DestReg = createResultReg(RC);
548 
549   // FastISel TLS support on non-MachO is broken, punt to SelectionDAG.
550   const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
551   bool IsThreadLocal = GVar && GVar->isThreadLocal();
552   if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0;
553 
554   bool IsPositionIndependent = isPositionIndependent();
555   // Use movw+movt when possible, it avoids constant pool entries.
556   // Non-darwin targets only support static movt relocations in FastISel.
557   if (Subtarget->useMovt() &&
558       (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
559     unsigned Opc;
560     unsigned char TF = 0;
561     if (Subtarget->isTargetMachO())
562       TF = ARMII::MO_NONLAZY;
563 
564     if (IsPositionIndependent)
565       Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
566     else
567       Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
568     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
569                             TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
570   } else {
571     // MachineConstantPool wants an explicit alignment.
572     Align Alignment = DL.getPrefTypeAlign(GV->getType());
573 
574     if (Subtarget->isTargetELF() && IsPositionIndependent)
575       return ARMLowerPICELF(GV, VT);
576 
577     // Grab index.
578     unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
579     unsigned Id = AFI->createPICLabelUId();
580     ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
581                                                                 ARMCP::CPValue,
582                                                                 PCAdj);
583     unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment.value());
584 
585     // Load value.
586     MachineInstrBuilder MIB;
587     if (isThumb2) {
588       unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
589       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
590                     DestReg).addConstantPoolIndex(Idx);
591       if (IsPositionIndependent)
592         MIB.addImm(Id);
593       AddOptionalDefs(MIB);
594     } else {
595       // The extra immediate is for addrmode2.
596       DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
597       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
598                     TII.get(ARM::LDRcp), DestReg)
599                 .addConstantPoolIndex(Idx)
600                 .addImm(0);
601       AddOptionalDefs(MIB);
602 
603       if (IsPositionIndependent) {
604         unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
605         unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
606 
607         MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
608                                           DbgLoc, TII.get(Opc), NewDestReg)
609                                   .addReg(DestReg)
610                                   .addImm(Id);
611         AddOptionalDefs(MIB);
612         return NewDestReg;
613       }
614     }
615   }
616 
617   if (IsIndirect) {
618     MachineInstrBuilder MIB;
619     unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
620     if (isThumb2)
621       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
622                     TII.get(ARM::t2LDRi12), NewDestReg)
623             .addReg(DestReg)
624             .addImm(0);
625     else
626       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
627                     TII.get(ARM::LDRi12), NewDestReg)
628                 .addReg(DestReg)
629                 .addImm(0);
630     DestReg = NewDestReg;
631     AddOptionalDefs(MIB);
632   }
633 
634   return DestReg;
635 }
636 
637 unsigned ARMFastISel::fastMaterializeConstant(const Constant *C) {
638   EVT CEVT = TLI.getValueType(DL, C->getType(), true);
639 
640   // Only handle simple types.
641   if (!CEVT.isSimple()) return 0;
642   MVT VT = CEVT.getSimpleVT();
643 
644   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
645     return ARMMaterializeFP(CFP, VT);
646   else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
647     return ARMMaterializeGV(GV, VT);
648   else if (isa<ConstantInt>(C))
649     return ARMMaterializeInt(C, VT);
650 
651   return 0;
652 }
653 
654 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
655 
656 unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
657   // Don't handle dynamic allocas.
658   if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
659 
660   MVT VT;
661   if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
662 
663   DenseMap<const AllocaInst*, int>::iterator SI =
664     FuncInfo.StaticAllocaMap.find(AI);
665 
666   // This will get lowered later into the correct offsets and registers
667   // via rewriteXFrameIndex.
668   if (SI != FuncInfo.StaticAllocaMap.end()) {
669     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
670     const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
671     unsigned ResultReg = createResultReg(RC);
672     ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0);
673 
674     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
675                             TII.get(Opc), ResultReg)
676                             .addFrameIndex(SI->second)
677                             .addImm(0));
678     return ResultReg;
679   }
680 
681   return 0;
682 }
683 
684 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
685   EVT evt = TLI.getValueType(DL, Ty, true);
686 
687   // Only handle simple types.
688   if (evt == MVT::Other || !evt.isSimple()) return false;
689   VT = evt.getSimpleVT();
690 
691   // Handle all legal types, i.e. a register that will directly hold this
692   // value.
693   return TLI.isTypeLegal(VT);
694 }
695 
696 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
697   if (isTypeLegal(Ty, VT)) return true;
698 
699   // If this is a type than can be sign or zero-extended to a basic operation
700   // go ahead and accept it now.
701   if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
702     return true;
703 
704   return false;
705 }
706 
707 // Computes the address to get to an object.
708 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
709   // Some boilerplate from the X86 FastISel.
710   const User *U = nullptr;
711   unsigned Opcode = Instruction::UserOp1;
712   if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
713     // Don't walk into other basic blocks unless the object is an alloca from
714     // another block, otherwise it may not have a virtual register assigned.
715     if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
716         FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
717       Opcode = I->getOpcode();
718       U = I;
719     }
720   } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
721     Opcode = C->getOpcode();
722     U = C;
723   }
724 
725   if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
726     if (Ty->getAddressSpace() > 255)
727       // Fast instruction selection doesn't support the special
728       // address spaces.
729       return false;
730 
731   switch (Opcode) {
732     default:
733     break;
734     case Instruction::BitCast:
735       // Look through bitcasts.
736       return ARMComputeAddress(U->getOperand(0), Addr);
737     case Instruction::IntToPtr:
738       // Look past no-op inttoptrs.
739       if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
740           TLI.getPointerTy(DL))
741         return ARMComputeAddress(U->getOperand(0), Addr);
742       break;
743     case Instruction::PtrToInt:
744       // Look past no-op ptrtoints.
745       if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
746         return ARMComputeAddress(U->getOperand(0), Addr);
747       break;
748     case Instruction::GetElementPtr: {
749       Address SavedAddr = Addr;
750       int TmpOffset = Addr.Offset;
751 
752       // Iterate through the GEP folding the constants into offsets where
753       // we can.
754       gep_type_iterator GTI = gep_type_begin(U);
755       for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
756            i != e; ++i, ++GTI) {
757         const Value *Op = *i;
758         if (StructType *STy = GTI.getStructTypeOrNull()) {
759           const StructLayout *SL = DL.getStructLayout(STy);
760           unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
761           TmpOffset += SL->getElementOffset(Idx);
762         } else {
763           uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
764           while (true) {
765             if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
766               // Constant-offset addressing.
767               TmpOffset += CI->getSExtValue() * S;
768               break;
769             }
770             if (canFoldAddIntoGEP(U, Op)) {
771               // A compatible add with a constant operand. Fold the constant.
772               ConstantInt *CI =
773               cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
774               TmpOffset += CI->getSExtValue() * S;
775               // Iterate on the other operand.
776               Op = cast<AddOperator>(Op)->getOperand(0);
777               continue;
778             }
779             // Unsupported
780             goto unsupported_gep;
781           }
782         }
783       }
784 
785       // Try to grab the base operand now.
786       Addr.Offset = TmpOffset;
787       if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
788 
789       // We failed, restore everything and try the other options.
790       Addr = SavedAddr;
791 
792       unsupported_gep:
793       break;
794     }
795     case Instruction::Alloca: {
796       const AllocaInst *AI = cast<AllocaInst>(Obj);
797       DenseMap<const AllocaInst*, int>::iterator SI =
798         FuncInfo.StaticAllocaMap.find(AI);
799       if (SI != FuncInfo.StaticAllocaMap.end()) {
800         Addr.BaseType = Address::FrameIndexBase;
801         Addr.Base.FI = SI->second;
802         return true;
803       }
804       break;
805     }
806   }
807 
808   // Try to get this in a register if nothing else has worked.
809   if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
810   return Addr.Base.Reg != 0;
811 }
812 
813 void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
814   bool needsLowering = false;
815   switch (VT.SimpleTy) {
816     default: llvm_unreachable("Unhandled load/store type!");
817     case MVT::i1:
818     case MVT::i8:
819     case MVT::i16:
820     case MVT::i32:
821       if (!useAM3) {
822         // Integer loads/stores handle 12-bit offsets.
823         needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
824         // Handle negative offsets.
825         if (needsLowering && isThumb2)
826           needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
827                             Addr.Offset > -256);
828       } else {
829         // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
830         needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
831       }
832       break;
833     case MVT::f32:
834     case MVT::f64:
835       // Floating point operands handle 8-bit offsets.
836       needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
837       break;
838   }
839 
840   // If this is a stack pointer and the offset needs to be simplified then
841   // put the alloca address into a register, set the base type back to
842   // register and continue. This should almost never happen.
843   if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
844     const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
845                                              : &ARM::GPRRegClass;
846     unsigned ResultReg = createResultReg(RC);
847     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
848     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
849                             TII.get(Opc), ResultReg)
850                             .addFrameIndex(Addr.Base.FI)
851                             .addImm(0));
852     Addr.Base.Reg = ResultReg;
853     Addr.BaseType = Address::RegBase;
854   }
855 
856   // Since the offset is too large for the load/store instruction
857   // get the reg+offset into a register.
858   if (needsLowering) {
859     Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
860                                  /*Op0IsKill*/false, Addr.Offset, MVT::i32);
861     Addr.Offset = 0;
862   }
863 }
864 
865 void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,
866                                        const MachineInstrBuilder &MIB,
867                                        MachineMemOperand::Flags Flags,
868                                        bool useAM3) {
869   // addrmode5 output depends on the selection dag addressing dividing the
870   // offset by 4 that it then later multiplies. Do this here as well.
871   if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64)
872     Addr.Offset /= 4;
873 
874   // Frame base works a bit differently. Handle it separately.
875   if (Addr.BaseType == Address::FrameIndexBase) {
876     int FI = Addr.Base.FI;
877     int Offset = Addr.Offset;
878     MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
879         MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags,
880         MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
881     // Now add the rest of the operands.
882     MIB.addFrameIndex(FI);
883 
884     // ARM halfword load/stores and signed byte loads need an additional
885     // operand.
886     if (useAM3) {
887       int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
888       MIB.addReg(0);
889       MIB.addImm(Imm);
890     } else {
891       MIB.addImm(Addr.Offset);
892     }
893     MIB.addMemOperand(MMO);
894   } else {
895     // Now add the rest of the operands.
896     MIB.addReg(Addr.Base.Reg);
897 
898     // ARM halfword load/stores and signed byte loads need an additional
899     // operand.
900     if (useAM3) {
901       int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
902       MIB.addReg(0);
903       MIB.addImm(Imm);
904     } else {
905       MIB.addImm(Addr.Offset);
906     }
907   }
908   AddOptionalDefs(MIB);
909 }
910 
911 bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
912                               unsigned Alignment, bool isZExt, bool allocReg) {
913   unsigned Opc;
914   bool useAM3 = false;
915   bool needVMOV = false;
916   const TargetRegisterClass *RC;
917   switch (VT.SimpleTy) {
918     // This is mostly going to be Neon/vector support.
919     default: return false;
920     case MVT::i1:
921     case MVT::i8:
922       if (isThumb2) {
923         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
924           Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
925         else
926           Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
927       } else {
928         if (isZExt) {
929           Opc = ARM::LDRBi12;
930         } else {
931           Opc = ARM::LDRSB;
932           useAM3 = true;
933         }
934       }
935       RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
936       break;
937     case MVT::i16:
938       if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
939         return false;
940 
941       if (isThumb2) {
942         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
943           Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
944         else
945           Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
946       } else {
947         Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
948         useAM3 = true;
949       }
950       RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
951       break;
952     case MVT::i32:
953       if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
954         return false;
955 
956       if (isThumb2) {
957         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
958           Opc = ARM::t2LDRi8;
959         else
960           Opc = ARM::t2LDRi12;
961       } else {
962         Opc = ARM::LDRi12;
963       }
964       RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
965       break;
966     case MVT::f32:
967       if (!Subtarget->hasVFP2Base()) return false;
968       // Unaligned loads need special handling. Floats require word-alignment.
969       if (Alignment && Alignment < 4) {
970         needVMOV = true;
971         VT = MVT::i32;
972         Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
973         RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
974       } else {
975         Opc = ARM::VLDRS;
976         RC = TLI.getRegClassFor(VT);
977       }
978       break;
979     case MVT::f64:
980       // Can load and store double precision even without FeatureFP64
981       if (!Subtarget->hasVFP2Base()) return false;
982       // FIXME: Unaligned loads need special handling.  Doublewords require
983       // word-alignment.
984       if (Alignment && Alignment < 4)
985         return false;
986 
987       Opc = ARM::VLDRD;
988       RC = TLI.getRegClassFor(VT);
989       break;
990   }
991   // Simplify this down to something we can handle.
992   ARMSimplifyAddress(Addr, VT, useAM3);
993 
994   // Create the base instruction, then add the operands.
995   if (allocReg)
996     ResultReg = createResultReg(RC);
997   assert(ResultReg > 255 && "Expected an allocated virtual register.");
998   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
999                                     TII.get(Opc), ResultReg);
1000   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
1001 
1002   // If we had an unaligned load of a float we've converted it to an regular
1003   // load.  Now we must move from the GRP to the FP register.
1004   if (needVMOV) {
1005     unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1006     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1007                             TII.get(ARM::VMOVSR), MoveReg)
1008                     .addReg(ResultReg));
1009     ResultReg = MoveReg;
1010   }
1011   return true;
1012 }
1013 
1014 bool ARMFastISel::SelectLoad(const Instruction *I) {
1015   // Atomic loads need special handling.
1016   if (cast<LoadInst>(I)->isAtomic())
1017     return false;
1018 
1019   const Value *SV = I->getOperand(0);
1020   if (TLI.supportSwiftError()) {
1021     // Swifterror values can come from either a function parameter with
1022     // swifterror attribute or an alloca with swifterror attribute.
1023     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
1024       if (Arg->hasSwiftErrorAttr())
1025         return false;
1026     }
1027 
1028     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1029       if (Alloca->isSwiftError())
1030         return false;
1031     }
1032   }
1033 
1034   // Verify we have a legal type before going any further.
1035   MVT VT;
1036   if (!isLoadTypeLegal(I->getType(), VT))
1037     return false;
1038 
1039   // See if we can handle this address.
1040   Address Addr;
1041   if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
1042 
1043   Register ResultReg;
1044   if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
1045     return false;
1046   updateValueMap(I, ResultReg);
1047   return true;
1048 }
1049 
1050 bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
1051                                unsigned Alignment) {
1052   unsigned StrOpc;
1053   bool useAM3 = false;
1054   switch (VT.SimpleTy) {
1055     // This is mostly going to be Neon/vector support.
1056     default: return false;
1057     case MVT::i1: {
1058       unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1059                                               : &ARM::GPRRegClass);
1060       unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1061       SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
1062       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1063                               TII.get(Opc), Res)
1064                       .addReg(SrcReg).addImm(1));
1065       SrcReg = Res;
1066       LLVM_FALLTHROUGH;
1067     }
1068     case MVT::i8:
1069       if (isThumb2) {
1070         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1071           StrOpc = ARM::t2STRBi8;
1072         else
1073           StrOpc = ARM::t2STRBi12;
1074       } else {
1075         StrOpc = ARM::STRBi12;
1076       }
1077       break;
1078     case MVT::i16:
1079       if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
1080         return false;
1081 
1082       if (isThumb2) {
1083         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1084           StrOpc = ARM::t2STRHi8;
1085         else
1086           StrOpc = ARM::t2STRHi12;
1087       } else {
1088         StrOpc = ARM::STRH;
1089         useAM3 = true;
1090       }
1091       break;
1092     case MVT::i32:
1093       if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
1094         return false;
1095 
1096       if (isThumb2) {
1097         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1098           StrOpc = ARM::t2STRi8;
1099         else
1100           StrOpc = ARM::t2STRi12;
1101       } else {
1102         StrOpc = ARM::STRi12;
1103       }
1104       break;
1105     case MVT::f32:
1106       if (!Subtarget->hasVFP2Base()) return false;
1107       // Unaligned stores need special handling. Floats require word-alignment.
1108       if (Alignment && Alignment < 4) {
1109         unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1110         AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1111                                 TII.get(ARM::VMOVRS), MoveReg)
1112                         .addReg(SrcReg));
1113         SrcReg = MoveReg;
1114         VT = MVT::i32;
1115         StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1116       } else {
1117         StrOpc = ARM::VSTRS;
1118       }
1119       break;
1120     case MVT::f64:
1121       // Can load and store double precision even without FeatureFP64
1122       if (!Subtarget->hasVFP2Base()) return false;
1123       // FIXME: Unaligned stores need special handling.  Doublewords require
1124       // word-alignment.
1125       if (Alignment && Alignment < 4)
1126           return false;
1127 
1128       StrOpc = ARM::VSTRD;
1129       break;
1130   }
1131   // Simplify this down to something we can handle.
1132   ARMSimplifyAddress(Addr, VT, useAM3);
1133 
1134   // Create the base instruction, then add the operands.
1135   SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0);
1136   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1137                                     TII.get(StrOpc))
1138                             .addReg(SrcReg);
1139   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
1140   return true;
1141 }
1142 
1143 bool ARMFastISel::SelectStore(const Instruction *I) {
1144   Value *Op0 = I->getOperand(0);
1145   unsigned SrcReg = 0;
1146 
1147   // Atomic stores need special handling.
1148   if (cast<StoreInst>(I)->isAtomic())
1149     return false;
1150 
1151   const Value *PtrV = I->getOperand(1);
1152   if (TLI.supportSwiftError()) {
1153     // Swifterror values can come from either a function parameter with
1154     // swifterror attribute or an alloca with swifterror attribute.
1155     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1156       if (Arg->hasSwiftErrorAttr())
1157         return false;
1158     }
1159 
1160     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1161       if (Alloca->isSwiftError())
1162         return false;
1163     }
1164   }
1165 
1166   // Verify we have a legal type before going any further.
1167   MVT VT;
1168   if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
1169     return false;
1170 
1171   // Get the value to be stored into a register.
1172   SrcReg = getRegForValue(Op0);
1173   if (SrcReg == 0) return false;
1174 
1175   // See if we can handle this address.
1176   Address Addr;
1177   if (!ARMComputeAddress(I->getOperand(1), Addr))
1178     return false;
1179 
1180   if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
1181     return false;
1182   return true;
1183 }
1184 
1185 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
1186   switch (Pred) {
1187     // Needs two compares...
1188     case CmpInst::FCMP_ONE:
1189     case CmpInst::FCMP_UEQ:
1190     default:
1191       // AL is our "false" for now. The other two need more compares.
1192       return ARMCC::AL;
1193     case CmpInst::ICMP_EQ:
1194     case CmpInst::FCMP_OEQ:
1195       return ARMCC::EQ;
1196     case CmpInst::ICMP_SGT:
1197     case CmpInst::FCMP_OGT:
1198       return ARMCC::GT;
1199     case CmpInst::ICMP_SGE:
1200     case CmpInst::FCMP_OGE:
1201       return ARMCC::GE;
1202     case CmpInst::ICMP_UGT:
1203     case CmpInst::FCMP_UGT:
1204       return ARMCC::HI;
1205     case CmpInst::FCMP_OLT:
1206       return ARMCC::MI;
1207     case CmpInst::ICMP_ULE:
1208     case CmpInst::FCMP_OLE:
1209       return ARMCC::LS;
1210     case CmpInst::FCMP_ORD:
1211       return ARMCC::VC;
1212     case CmpInst::FCMP_UNO:
1213       return ARMCC::VS;
1214     case CmpInst::FCMP_UGE:
1215       return ARMCC::PL;
1216     case CmpInst::ICMP_SLT:
1217     case CmpInst::FCMP_ULT:
1218       return ARMCC::LT;
1219     case CmpInst::ICMP_SLE:
1220     case CmpInst::FCMP_ULE:
1221       return ARMCC::LE;
1222     case CmpInst::FCMP_UNE:
1223     case CmpInst::ICMP_NE:
1224       return ARMCC::NE;
1225     case CmpInst::ICMP_UGE:
1226       return ARMCC::HS;
1227     case CmpInst::ICMP_ULT:
1228       return ARMCC::LO;
1229   }
1230 }
1231 
1232 bool ARMFastISel::SelectBranch(const Instruction *I) {
1233   const BranchInst *BI = cast<BranchInst>(I);
1234   MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1235   MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1236 
1237   // Simple branch support.
1238 
1239   // If we can, avoid recomputing the compare - redoing it could lead to wonky
1240   // behavior.
1241   if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1242     if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
1243       // Get the compare predicate.
1244       // Try to take advantage of fallthrough opportunities.
1245       CmpInst::Predicate Predicate = CI->getPredicate();
1246       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1247         std::swap(TBB, FBB);
1248         Predicate = CmpInst::getInversePredicate(Predicate);
1249       }
1250 
1251       ARMCC::CondCodes ARMPred = getComparePred(Predicate);
1252 
1253       // We may not handle every CC for now.
1254       if (ARMPred == ARMCC::AL) return false;
1255 
1256       // Emit the compare.
1257       if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1258         return false;
1259 
1260       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1261       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
1262       .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
1263       finishCondBranch(BI->getParent(), TBB, FBB);
1264       return true;
1265     }
1266   } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1267     MVT SourceVT;
1268     if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1269         (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1270       unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1271       unsigned OpReg = getRegForValue(TI->getOperand(0));
1272       OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0);
1273       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1274                               TII.get(TstOpc))
1275                       .addReg(OpReg).addImm(1));
1276 
1277       unsigned CCMode = ARMCC::NE;
1278       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1279         std::swap(TBB, FBB);
1280         CCMode = ARMCC::EQ;
1281       }
1282 
1283       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1284       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
1285       .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1286 
1287       finishCondBranch(BI->getParent(), TBB, FBB);
1288       return true;
1289     }
1290   } else if (const ConstantInt *CI =
1291              dyn_cast<ConstantInt>(BI->getCondition())) {
1292     uint64_t Imm = CI->getZExtValue();
1293     MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
1294     fastEmitBranch(Target, DbgLoc);
1295     return true;
1296   }
1297 
1298   unsigned CmpReg = getRegForValue(BI->getCondition());
1299   if (CmpReg == 0) return false;
1300 
1301   // We've been divorced from our compare!  Our block was split, and
1302   // now our compare lives in a predecessor block.  We musn't
1303   // re-compare here, as the children of the compare aren't guaranteed
1304   // live across the block boundary (we *could* check for this).
1305   // Regardless, the compare has been done in the predecessor block,
1306   // and it left a value for us in a virtual register.  Ergo, we test
1307   // the one-bit value left in the virtual register.
1308   unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1309   CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0);
1310   AddOptionalDefs(
1311       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc))
1312           .addReg(CmpReg)
1313           .addImm(1));
1314 
1315   unsigned CCMode = ARMCC::NE;
1316   if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1317     std::swap(TBB, FBB);
1318     CCMode = ARMCC::EQ;
1319   }
1320 
1321   unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1322   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
1323                   .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1324   finishCondBranch(BI->getParent(), TBB, FBB);
1325   return true;
1326 }
1327 
1328 bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
1329   unsigned AddrReg = getRegForValue(I->getOperand(0));
1330   if (AddrReg == 0) return false;
1331 
1332   unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1333   assert(isThumb2 || Subtarget->hasV4TOps());
1334 
1335   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1336                           TII.get(Opc)).addReg(AddrReg));
1337 
1338   const IndirectBrInst *IB = cast<IndirectBrInst>(I);
1339   for (const BasicBlock *SuccBB : IB->successors())
1340     FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]);
1341 
1342   return true;
1343 }
1344 
1345 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
1346                              bool isZExt) {
1347   Type *Ty = Src1Value->getType();
1348   EVT SrcEVT = TLI.getValueType(DL, Ty, true);
1349   if (!SrcEVT.isSimple()) return false;
1350   MVT SrcVT = SrcEVT.getSimpleVT();
1351 
1352   if (Ty->isFloatTy() && !Subtarget->hasVFP2Base())
1353     return false;
1354 
1355   if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1356     return false;
1357 
1358   // Check to see if the 2nd operand is a constant that we can encode directly
1359   // in the compare.
1360   int Imm = 0;
1361   bool UseImm = false;
1362   bool isNegativeImm = false;
1363   // FIXME: At -O0 we don't have anything that canonicalizes operand order.
1364   // Thus, Src1Value may be a ConstantInt, but we're missing it.
1365   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1366     if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1367         SrcVT == MVT::i1) {
1368       const APInt &CIVal = ConstInt->getValue();
1369       Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
1370       // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather
1371       // then a cmn, because there is no way to represent 2147483648 as a
1372       // signed 32-bit int.
1373       if (Imm < 0 && Imm != (int)0x80000000) {
1374         isNegativeImm = true;
1375         Imm = -Imm;
1376       }
1377       UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1378         (ARM_AM::getSOImmVal(Imm) != -1);
1379     }
1380   } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1381     if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1382       if (ConstFP->isZero() && !ConstFP->isNegative())
1383         UseImm = true;
1384   }
1385 
1386   unsigned CmpOpc;
1387   bool isICmp = true;
1388   bool needsExt = false;
1389   switch (SrcVT.SimpleTy) {
1390     default: return false;
1391     // TODO: Verify compares.
1392     case MVT::f32:
1393       isICmp = false;
1394       CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1395       break;
1396     case MVT::f64:
1397       isICmp = false;
1398       CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1399       break;
1400     case MVT::i1:
1401     case MVT::i8:
1402     case MVT::i16:
1403       needsExt = true;
1404       LLVM_FALLTHROUGH;
1405     case MVT::i32:
1406       if (isThumb2) {
1407         if (!UseImm)
1408           CmpOpc = ARM::t2CMPrr;
1409         else
1410           CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1411       } else {
1412         if (!UseImm)
1413           CmpOpc = ARM::CMPrr;
1414         else
1415           CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1416       }
1417       break;
1418   }
1419 
1420   unsigned SrcReg1 = getRegForValue(Src1Value);
1421   if (SrcReg1 == 0) return false;
1422 
1423   unsigned SrcReg2 = 0;
1424   if (!UseImm) {
1425     SrcReg2 = getRegForValue(Src2Value);
1426     if (SrcReg2 == 0) return false;
1427   }
1428 
1429   // We have i1, i8, or i16, we need to either zero extend or sign extend.
1430   if (needsExt) {
1431     SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1432     if (SrcReg1 == 0) return false;
1433     if (!UseImm) {
1434       SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1435       if (SrcReg2 == 0) return false;
1436     }
1437   }
1438 
1439   const MCInstrDesc &II = TII.get(CmpOpc);
1440   SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0);
1441   if (!UseImm) {
1442     SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1);
1443     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1444                     .addReg(SrcReg1).addReg(SrcReg2));
1445   } else {
1446     MachineInstrBuilder MIB;
1447     MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1448       .addReg(SrcReg1);
1449 
1450     // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
1451     if (isICmp)
1452       MIB.addImm(Imm);
1453     AddOptionalDefs(MIB);
1454   }
1455 
1456   // For floating point we need to move the result to a comparison register
1457   // that we can then use for branches.
1458   if (Ty->isFloatTy() || Ty->isDoubleTy())
1459     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1460                             TII.get(ARM::FMSTAT)));
1461   return true;
1462 }
1463 
1464 bool ARMFastISel::SelectCmp(const Instruction *I) {
1465   const CmpInst *CI = cast<CmpInst>(I);
1466 
1467   // Get the compare predicate.
1468   ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
1469 
1470   // We may not handle every CC for now.
1471   if (ARMPred == ARMCC::AL) return false;
1472 
1473   // Emit the compare.
1474   if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1475     return false;
1476 
1477   // Now set a register based on the comparison. Explicitly set the predicates
1478   // here.
1479   unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1480   const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
1481                                            : &ARM::GPRRegClass;
1482   unsigned DestReg = createResultReg(RC);
1483   Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
1484   unsigned ZeroReg = fastMaterializeConstant(Zero);
1485   // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
1486   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg)
1487           .addReg(ZeroReg).addImm(1)
1488           .addImm(ARMPred).addReg(ARM::CPSR);
1489 
1490   updateValueMap(I, DestReg);
1491   return true;
1492 }
1493 
1494 bool ARMFastISel::SelectFPExt(const Instruction *I) {
1495   // Make sure we have VFP and that we're extending float to double.
1496   if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false;
1497 
1498   Value *V = I->getOperand(0);
1499   if (!I->getType()->isDoubleTy() ||
1500       !V->getType()->isFloatTy()) return false;
1501 
1502   unsigned Op = getRegForValue(V);
1503   if (Op == 0) return false;
1504 
1505   unsigned Result = createResultReg(&ARM::DPRRegClass);
1506   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1507                           TII.get(ARM::VCVTDS), Result)
1508                   .addReg(Op));
1509   updateValueMap(I, Result);
1510   return true;
1511 }
1512 
1513 bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
1514   // Make sure we have VFP and that we're truncating double to float.
1515   if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false;
1516 
1517   Value *V = I->getOperand(0);
1518   if (!(I->getType()->isFloatTy() &&
1519         V->getType()->isDoubleTy())) return false;
1520 
1521   unsigned Op = getRegForValue(V);
1522   if (Op == 0) return false;
1523 
1524   unsigned Result = createResultReg(&ARM::SPRRegClass);
1525   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1526                           TII.get(ARM::VCVTSD), Result)
1527                   .addReg(Op));
1528   updateValueMap(I, Result);
1529   return true;
1530 }
1531 
1532 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
1533   // Make sure we have VFP.
1534   if (!Subtarget->hasVFP2Base()) return false;
1535 
1536   MVT DstVT;
1537   Type *Ty = I->getType();
1538   if (!isTypeLegal(Ty, DstVT))
1539     return false;
1540 
1541   Value *Src = I->getOperand(0);
1542   EVT SrcEVT = TLI.getValueType(DL, Src->getType(), true);
1543   if (!SrcEVT.isSimple())
1544     return false;
1545   MVT SrcVT = SrcEVT.getSimpleVT();
1546   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1547     return false;
1548 
1549   unsigned SrcReg = getRegForValue(Src);
1550   if (SrcReg == 0) return false;
1551 
1552   // Handle sign-extension.
1553   if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1554     SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
1555                                        /*isZExt*/!isSigned);
1556     if (SrcReg == 0) return false;
1557   }
1558 
1559   // The conversion routine works on fp-reg to fp-reg and the operand above
1560   // was an integer, move it to the fp registers if possible.
1561   unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1562   if (FP == 0) return false;
1563 
1564   unsigned Opc;
1565   if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1566   else if (Ty->isDoubleTy() && Subtarget->hasFP64())
1567     Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1568   else return false;
1569 
1570   unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1571   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1572                           TII.get(Opc), ResultReg).addReg(FP));
1573   updateValueMap(I, ResultReg);
1574   return true;
1575 }
1576 
1577 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
1578   // Make sure we have VFP.
1579   if (!Subtarget->hasVFP2Base()) return false;
1580 
1581   MVT DstVT;
1582   Type *RetTy = I->getType();
1583   if (!isTypeLegal(RetTy, DstVT))
1584     return false;
1585 
1586   unsigned Op = getRegForValue(I->getOperand(0));
1587   if (Op == 0) return false;
1588 
1589   unsigned Opc;
1590   Type *OpTy = I->getOperand(0)->getType();
1591   if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1592   else if (OpTy->isDoubleTy() && Subtarget->hasFP64())
1593     Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1594   else return false;
1595 
1596   // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
1597   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1598   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1599                           TII.get(Opc), ResultReg).addReg(Op));
1600 
1601   // This result needs to be in an integer register, but the conversion only
1602   // takes place in fp-regs.
1603   unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1604   if (IntReg == 0) return false;
1605 
1606   updateValueMap(I, IntReg);
1607   return true;
1608 }
1609 
1610 bool ARMFastISel::SelectSelect(const Instruction *I) {
1611   MVT VT;
1612   if (!isTypeLegal(I->getType(), VT))
1613     return false;
1614 
1615   // Things need to be register sized for register moves.
1616   if (VT != MVT::i32) return false;
1617 
1618   unsigned CondReg = getRegForValue(I->getOperand(0));
1619   if (CondReg == 0) return false;
1620   unsigned Op1Reg = getRegForValue(I->getOperand(1));
1621   if (Op1Reg == 0) return false;
1622 
1623   // Check to see if we can use an immediate in the conditional move.
1624   int Imm = 0;
1625   bool UseImm = false;
1626   bool isNegativeImm = false;
1627   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
1628     assert(VT == MVT::i32 && "Expecting an i32.");
1629     Imm = (int)ConstInt->getValue().getZExtValue();
1630     if (Imm < 0) {
1631       isNegativeImm = true;
1632       Imm = ~Imm;
1633     }
1634     UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1635       (ARM_AM::getSOImmVal(Imm) != -1);
1636   }
1637 
1638   unsigned Op2Reg = 0;
1639   if (!UseImm) {
1640     Op2Reg = getRegForValue(I->getOperand(2));
1641     if (Op2Reg == 0) return false;
1642   }
1643 
1644   unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1645   CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0);
1646   AddOptionalDefs(
1647       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc))
1648           .addReg(CondReg)
1649           .addImm(1));
1650 
1651   unsigned MovCCOpc;
1652   const TargetRegisterClass *RC;
1653   if (!UseImm) {
1654     RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1655     MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1656   } else {
1657     RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1658     if (!isNegativeImm)
1659       MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1660     else
1661       MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1662   }
1663   unsigned ResultReg = createResultReg(RC);
1664   if (!UseImm) {
1665     Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1);
1666     Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2);
1667     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc),
1668             ResultReg)
1669         .addReg(Op2Reg)
1670         .addReg(Op1Reg)
1671         .addImm(ARMCC::NE)
1672         .addReg(ARM::CPSR);
1673   } else {
1674     Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1);
1675     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc),
1676             ResultReg)
1677         .addReg(Op1Reg)
1678         .addImm(Imm)
1679         .addImm(ARMCC::EQ)
1680         .addReg(ARM::CPSR);
1681   }
1682   updateValueMap(I, ResultReg);
1683   return true;
1684 }
1685 
1686 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
1687   MVT VT;
1688   Type *Ty = I->getType();
1689   if (!isTypeLegal(Ty, VT))
1690     return false;
1691 
1692   // If we have integer div support we should have selected this automagically.
1693   // In case we have a real miss go ahead and return false and we'll pick
1694   // it up later.
1695   if (Subtarget->hasDivideInThumbMode())
1696     return false;
1697 
1698   // Otherwise emit a libcall.
1699   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1700   if (VT == MVT::i8)
1701     LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1702   else if (VT == MVT::i16)
1703     LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1704   else if (VT == MVT::i32)
1705     LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1706   else if (VT == MVT::i64)
1707     LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1708   else if (VT == MVT::i128)
1709     LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1710   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
1711 
1712   return ARMEmitLibcall(I, LC);
1713 }
1714 
1715 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
1716   MVT VT;
1717   Type *Ty = I->getType();
1718   if (!isTypeLegal(Ty, VT))
1719     return false;
1720 
1721   // Many ABIs do not provide a libcall for standalone remainder, so we need to
1722   // use divrem (see the RTABI 4.3.1). Since FastISel can't handle non-double
1723   // multi-reg returns, we'll have to bail out.
1724   if (!TLI.hasStandaloneRem(VT)) {
1725     return false;
1726   }
1727 
1728   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1729   if (VT == MVT::i8)
1730     LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1731   else if (VT == MVT::i16)
1732     LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1733   else if (VT == MVT::i32)
1734     LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1735   else if (VT == MVT::i64)
1736     LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1737   else if (VT == MVT::i128)
1738     LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1739   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
1740 
1741   return ARMEmitLibcall(I, LC);
1742 }
1743 
1744 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
1745   EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1746 
1747   // We can get here in the case when we have a binary operation on a non-legal
1748   // type and the target independent selector doesn't know how to handle it.
1749   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1750     return false;
1751 
1752   unsigned Opc;
1753   switch (ISDOpcode) {
1754     default: return false;
1755     case ISD::ADD:
1756       Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1757       break;
1758     case ISD::OR:
1759       Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1760       break;
1761     case ISD::SUB:
1762       Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1763       break;
1764   }
1765 
1766   unsigned SrcReg1 = getRegForValue(I->getOperand(0));
1767   if (SrcReg1 == 0) return false;
1768 
1769   // TODO: Often the 2nd operand is an immediate, which can be encoded directly
1770   // in the instruction, rather then materializing the value in a register.
1771   unsigned SrcReg2 = getRegForValue(I->getOperand(1));
1772   if (SrcReg2 == 0) return false;
1773 
1774   unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1775   SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1);
1776   SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2);
1777   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1778                           TII.get(Opc), ResultReg)
1779                   .addReg(SrcReg1).addReg(SrcReg2));
1780   updateValueMap(I, ResultReg);
1781   return true;
1782 }
1783 
1784 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
1785   EVT FPVT = TLI.getValueType(DL, I->getType(), true);
1786   if (!FPVT.isSimple()) return false;
1787   MVT VT = FPVT.getSimpleVT();
1788 
1789   // FIXME: Support vector types where possible.
1790   if (VT.isVector())
1791     return false;
1792 
1793   // We can get here in the case when we want to use NEON for our fp
1794   // operations, but can't figure out how to. Just use the vfp instructions
1795   // if we have them.
1796   // FIXME: It'd be nice to use NEON instructions.
1797   Type *Ty = I->getType();
1798   if (Ty->isFloatTy() && !Subtarget->hasVFP2Base())
1799     return false;
1800   if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1801     return false;
1802 
1803   unsigned Opc;
1804   bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1805   switch (ISDOpcode) {
1806     default: return false;
1807     case ISD::FADD:
1808       Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1809       break;
1810     case ISD::FSUB:
1811       Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1812       break;
1813     case ISD::FMUL:
1814       Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1815       break;
1816   }
1817   unsigned Op1 = getRegForValue(I->getOperand(0));
1818   if (Op1 == 0) return false;
1819 
1820   unsigned Op2 = getRegForValue(I->getOperand(1));
1821   if (Op2 == 0) return false;
1822 
1823   unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
1824   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1825                           TII.get(Opc), ResultReg)
1826                   .addReg(Op1).addReg(Op2));
1827   updateValueMap(I, ResultReg);
1828   return true;
1829 }
1830 
1831 // Call Handling Code
1832 
1833 // This is largely taken directly from CCAssignFnForNode
1834 // TODO: We may not support all of this.
1835 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
1836                                            bool Return,
1837                                            bool isVarArg) {
1838   switch (CC) {
1839   default:
1840     report_fatal_error("Unsupported calling convention");
1841   case CallingConv::Fast:
1842     if (Subtarget->hasVFP2Base() && !isVarArg) {
1843       if (!Subtarget->isAAPCS_ABI())
1844         return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1845       // For AAPCS ABI targets, just use VFP variant of the calling convention.
1846       return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1847     }
1848     LLVM_FALLTHROUGH;
1849   case CallingConv::C:
1850   case CallingConv::CXX_FAST_TLS:
1851     // Use target triple & subtarget features to do actual dispatch.
1852     if (Subtarget->isAAPCS_ABI()) {
1853       if (Subtarget->hasVFP2Base() &&
1854           TM.Options.FloatABIType == FloatABI::Hard && !isVarArg)
1855         return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1856       else
1857         return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1858     } else {
1859       return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1860     }
1861   case CallingConv::ARM_AAPCS_VFP:
1862   case CallingConv::Swift:
1863     if (!isVarArg)
1864       return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1865     // Fall through to soft float variant, variadic functions don't
1866     // use hard floating point ABI.
1867     LLVM_FALLTHROUGH;
1868   case CallingConv::ARM_AAPCS:
1869     return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1870   case CallingConv::ARM_APCS:
1871     return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1872   case CallingConv::GHC:
1873     if (Return)
1874       report_fatal_error("Can't return in GHC call convention");
1875     else
1876       return CC_ARM_APCS_GHC;
1877   case CallingConv::CFGuard_Check:
1878     return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check);
1879   }
1880 }
1881 
1882 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1883                                   SmallVectorImpl<Register> &ArgRegs,
1884                                   SmallVectorImpl<MVT> &ArgVTs,
1885                                   SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1886                                   SmallVectorImpl<Register> &RegArgs,
1887                                   CallingConv::ID CC,
1888                                   unsigned &NumBytes,
1889                                   bool isVarArg) {
1890   SmallVector<CCValAssign, 16> ArgLocs;
1891   CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context);
1892   CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1893                              CCAssignFnForCall(CC, false, isVarArg));
1894 
1895   // Check that we can handle all of the arguments. If we can't, then bail out
1896   // now before we add code to the MBB.
1897   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1898     CCValAssign &VA = ArgLocs[i];
1899     MVT ArgVT = ArgVTs[VA.getValNo()];
1900 
1901     // We don't handle NEON/vector parameters yet.
1902     if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
1903       return false;
1904 
1905     // Now copy/store arg to correct locations.
1906     if (VA.isRegLoc() && !VA.needsCustom()) {
1907       continue;
1908     } else if (VA.needsCustom()) {
1909       // TODO: We need custom lowering for vector (v2f64) args.
1910       if (VA.getLocVT() != MVT::f64 ||
1911           // TODO: Only handle register args for now.
1912           !VA.isRegLoc() || !ArgLocs[++i].isRegLoc())
1913         return false;
1914     } else {
1915       switch (ArgVT.SimpleTy) {
1916       default:
1917         return false;
1918       case MVT::i1:
1919       case MVT::i8:
1920       case MVT::i16:
1921       case MVT::i32:
1922         break;
1923       case MVT::f32:
1924         if (!Subtarget->hasVFP2Base())
1925           return false;
1926         break;
1927       case MVT::f64:
1928         if (!Subtarget->hasVFP2Base())
1929           return false;
1930         break;
1931       }
1932     }
1933   }
1934 
1935   // At the point, we are able to handle the call's arguments in fast isel.
1936 
1937   // Get a count of how many bytes are to be pushed on the stack.
1938   NumBytes = CCInfo.getNextStackOffset();
1939 
1940   // Issue CALLSEQ_START
1941   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1942   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1943                           TII.get(AdjStackDown))
1944                   .addImm(NumBytes).addImm(0));
1945 
1946   // Process the args.
1947   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1948     CCValAssign &VA = ArgLocs[i];
1949     const Value *ArgVal = Args[VA.getValNo()];
1950     Register Arg = ArgRegs[VA.getValNo()];
1951     MVT ArgVT = ArgVTs[VA.getValNo()];
1952 
1953     assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) &&
1954            "We don't handle NEON/vector parameters yet.");
1955 
1956     // Handle arg promotion, etc.
1957     switch (VA.getLocInfo()) {
1958       case CCValAssign::Full: break;
1959       case CCValAssign::SExt: {
1960         MVT DestVT = VA.getLocVT();
1961         Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false);
1962         assert(Arg != 0 && "Failed to emit a sext");
1963         ArgVT = DestVT;
1964         break;
1965       }
1966       case CCValAssign::AExt:
1967       // Intentional fall-through.  Handle AExt and ZExt.
1968       case CCValAssign::ZExt: {
1969         MVT DestVT = VA.getLocVT();
1970         Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true);
1971         assert(Arg != 0 && "Failed to emit a zext");
1972         ArgVT = DestVT;
1973         break;
1974       }
1975       case CCValAssign::BCvt: {
1976         unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
1977                                  /*TODO: Kill=*/false);
1978         assert(BC != 0 && "Failed to emit a bitcast!");
1979         Arg = BC;
1980         ArgVT = VA.getLocVT();
1981         break;
1982       }
1983       default: llvm_unreachable("Unknown arg promotion!");
1984     }
1985 
1986     // Now copy/store arg to correct locations.
1987     if (VA.isRegLoc() && !VA.needsCustom()) {
1988       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1989               TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg);
1990       RegArgs.push_back(VA.getLocReg());
1991     } else if (VA.needsCustom()) {
1992       // TODO: We need custom lowering for vector (v2f64) args.
1993       assert(VA.getLocVT() == MVT::f64 &&
1994              "Custom lowering for v2f64 args not available");
1995 
1996       // FIXME: ArgLocs[++i] may extend beyond ArgLocs.size()
1997       CCValAssign &NextVA = ArgLocs[++i];
1998 
1999       assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2000              "We only handle register args!");
2001 
2002       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2003                               TII.get(ARM::VMOVRRD), VA.getLocReg())
2004                       .addReg(NextVA.getLocReg(), RegState::Define)
2005                       .addReg(Arg));
2006       RegArgs.push_back(VA.getLocReg());
2007       RegArgs.push_back(NextVA.getLocReg());
2008     } else {
2009       assert(VA.isMemLoc());
2010       // Need to store on the stack.
2011 
2012       // Don't emit stores for undef values.
2013       if (isa<UndefValue>(ArgVal))
2014         continue;
2015 
2016       Address Addr;
2017       Addr.BaseType = Address::RegBase;
2018       Addr.Base.Reg = ARM::SP;
2019       Addr.Offset = VA.getLocMemOffset();
2020 
2021       bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2022       assert(EmitRet && "Could not emit a store for argument!");
2023     }
2024   }
2025 
2026   return true;
2027 }
2028 
2029 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
2030                              const Instruction *I, CallingConv::ID CC,
2031                              unsigned &NumBytes, bool isVarArg) {
2032   // Issue CALLSEQ_END
2033   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
2034   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2035                           TII.get(AdjStackUp))
2036                   .addImm(NumBytes).addImm(0));
2037 
2038   // Now the return value.
2039   if (RetVT != MVT::isVoid) {
2040     SmallVector<CCValAssign, 16> RVLocs;
2041     CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2042     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2043 
2044     // Copy all of the result registers out of their specified physreg.
2045     if (RVLocs.size() == 2 && RetVT == MVT::f64) {
2046       // For this move we copy into two registers and then move into the
2047       // double fp reg we want.
2048       MVT DestVT = RVLocs[0].getValVT();
2049       const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
2050       Register ResultReg = createResultReg(DstRC);
2051       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2052                               TII.get(ARM::VMOVDRR), ResultReg)
2053                       .addReg(RVLocs[0].getLocReg())
2054                       .addReg(RVLocs[1].getLocReg()));
2055 
2056       UsedRegs.push_back(RVLocs[0].getLocReg());
2057       UsedRegs.push_back(RVLocs[1].getLocReg());
2058 
2059       // Finally update the result.
2060       updateValueMap(I, ResultReg);
2061     } else {
2062       assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
2063       MVT CopyVT = RVLocs[0].getValVT();
2064 
2065       // Special handling for extended integers.
2066       if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2067         CopyVT = MVT::i32;
2068 
2069       const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
2070 
2071       Register ResultReg = createResultReg(DstRC);
2072       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2073               TII.get(TargetOpcode::COPY),
2074               ResultReg).addReg(RVLocs[0].getLocReg());
2075       UsedRegs.push_back(RVLocs[0].getLocReg());
2076 
2077       // Finally update the result.
2078       updateValueMap(I, ResultReg);
2079     }
2080   }
2081 
2082   return true;
2083 }
2084 
2085 bool ARMFastISel::SelectRet(const Instruction *I) {
2086   const ReturnInst *Ret = cast<ReturnInst>(I);
2087   const Function &F = *I->getParent()->getParent();
2088 
2089   if (!FuncInfo.CanLowerReturn)
2090     return false;
2091 
2092   if (TLI.supportSwiftError() &&
2093       F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2094     return false;
2095 
2096   if (TLI.supportSplitCSR(FuncInfo.MF))
2097     return false;
2098 
2099   // Build a list of return value registers.
2100   SmallVector<unsigned, 4> RetRegs;
2101 
2102   CallingConv::ID CC = F.getCallingConv();
2103   if (Ret->getNumOperands() > 0) {
2104     SmallVector<ISD::OutputArg, 4> Outs;
2105     GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
2106 
2107     // Analyze operands of the call, assigning locations to each operand.
2108     SmallVector<CCValAssign, 16> ValLocs;
2109     CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
2110     CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */,
2111                                                  F.isVarArg()));
2112 
2113     const Value *RV = Ret->getOperand(0);
2114     unsigned Reg = getRegForValue(RV);
2115     if (Reg == 0)
2116       return false;
2117 
2118     // Only handle a single return value for now.
2119     if (ValLocs.size() != 1)
2120       return false;
2121 
2122     CCValAssign &VA = ValLocs[0];
2123 
2124     // Don't bother handling odd stuff for now.
2125     if (VA.getLocInfo() != CCValAssign::Full)
2126       return false;
2127     // Only handle register returns for now.
2128     if (!VA.isRegLoc())
2129       return false;
2130 
2131     unsigned SrcReg = Reg + VA.getValNo();
2132     EVT RVEVT = TLI.getValueType(DL, RV->getType());
2133     if (!RVEVT.isSimple()) return false;
2134     MVT RVVT = RVEVT.getSimpleVT();
2135     MVT DestVT = VA.getValVT();
2136     // Special handling for extended integers.
2137     if (RVVT != DestVT) {
2138       if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2139         return false;
2140 
2141       assert(DestVT == MVT::i32 && "ARM should always ext to i32");
2142 
2143       // Perform extension if flagged as either zext or sext.  Otherwise, do
2144       // nothing.
2145       if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2146         SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2147         if (SrcReg == 0) return false;
2148       }
2149     }
2150 
2151     // Make the copy.
2152     Register DstReg = VA.getLocReg();
2153     const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
2154     // Avoid a cross-class copy. This is very unlikely.
2155     if (!SrcRC->contains(DstReg))
2156       return false;
2157     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2158             TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
2159 
2160     // Add register to return instruction.
2161     RetRegs.push_back(VA.getLocReg());
2162   }
2163 
2164   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2165                                     TII.get(Subtarget->getReturnOpcode()));
2166   AddOptionalDefs(MIB);
2167   for (unsigned R : RetRegs)
2168     MIB.addReg(R, RegState::Implicit);
2169   return true;
2170 }
2171 
2172 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
2173   if (UseReg)
2174     return isThumb2 ? ARM::tBLXr : ARM::BLX;
2175   else
2176     return isThumb2 ? ARM::tBL : ARM::BL;
2177 }
2178 
2179 unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
2180   // Manually compute the global's type to avoid building it when unnecessary.
2181   Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0);
2182   EVT LCREVT = TLI.getValueType(DL, GVTy);
2183   if (!LCREVT.isSimple()) return 0;
2184 
2185   GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false,
2186                                        GlobalValue::ExternalLinkage, nullptr,
2187                                        Name);
2188   assert(GV->getType() == GVTy && "We miscomputed the type for the global!");
2189   return ARMMaterializeGV(GV, LCREVT.getSimpleVT());
2190 }
2191 
2192 // A quick function that will emit a call for a named libcall in F with the
2193 // vector of passed arguments for the Instruction in I. We can assume that we
2194 // can emit a call for any libcall we can produce. This is an abridged version
2195 // of the full call infrastructure since we won't need to worry about things
2196 // like computed function pointers or strange arguments at call sites.
2197 // TODO: Try to unify this and the normal call bits for ARM, then try to unify
2198 // with X86.
2199 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
2200   CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
2201 
2202   // Handle *simple* calls for now.
2203   Type *RetTy = I->getType();
2204   MVT RetVT;
2205   if (RetTy->isVoidTy())
2206     RetVT = MVT::isVoid;
2207   else if (!isTypeLegal(RetTy, RetVT))
2208     return false;
2209 
2210   // Can't handle non-double multi-reg retvals.
2211   if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2212     SmallVector<CCValAssign, 16> RVLocs;
2213     CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
2214     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false));
2215     if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2216       return false;
2217   }
2218 
2219   // Set up the argument vectors.
2220   SmallVector<Value*, 8> Args;
2221   SmallVector<Register, 8> ArgRegs;
2222   SmallVector<MVT, 8> ArgVTs;
2223   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2224   Args.reserve(I->getNumOperands());
2225   ArgRegs.reserve(I->getNumOperands());
2226   ArgVTs.reserve(I->getNumOperands());
2227   ArgFlags.reserve(I->getNumOperands());
2228   for (Value *Op :  I->operands()) {
2229     unsigned Arg = getRegForValue(Op);
2230     if (Arg == 0) return false;
2231 
2232     Type *ArgTy = Op->getType();
2233     MVT ArgVT;
2234     if (!isTypeLegal(ArgTy, ArgVT)) return false;
2235 
2236     ISD::ArgFlagsTy Flags;
2237     Flags.setOrigAlign(Align(DL.getABITypeAlignment(ArgTy)));
2238 
2239     Args.push_back(Op);
2240     ArgRegs.push_back(Arg);
2241     ArgVTs.push_back(ArgVT);
2242     ArgFlags.push_back(Flags);
2243   }
2244 
2245   // Handle the arguments now that we've gotten them.
2246   SmallVector<Register, 4> RegArgs;
2247   unsigned NumBytes;
2248   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2249                        RegArgs, CC, NumBytes, false))
2250     return false;
2251 
2252   Register CalleeReg;
2253   if (Subtarget->genLongCalls()) {
2254     CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
2255     if (CalleeReg == 0) return false;
2256   }
2257 
2258   // Issue the call.
2259   unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2260   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2261                                     DbgLoc, TII.get(CallOpc));
2262   // BL / BLX don't take a predicate, but tBL / tBLX do.
2263   if (isThumb2)
2264     MIB.add(predOps(ARMCC::AL));
2265   if (Subtarget->genLongCalls())
2266     MIB.addReg(CalleeReg);
2267   else
2268     MIB.addExternalSymbol(TLI.getLibcallName(Call));
2269 
2270   // Add implicit physical register uses to the call.
2271   for (Register R : RegArgs)
2272     MIB.addReg(R, RegState::Implicit);
2273 
2274   // Add a register mask with the call-preserved registers.
2275   // Proper defs for return values will be added by setPhysRegsDeadExcept().
2276   MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2277 
2278   // Finish off the call including any return values.
2279   SmallVector<Register, 4> UsedRegs;
2280   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false;
2281 
2282   // Set all unused physreg defs as dead.
2283   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2284 
2285   return true;
2286 }
2287 
2288 bool ARMFastISel::SelectCall(const Instruction *I,
2289                              const char *IntrMemName = nullptr) {
2290   const CallInst *CI = cast<CallInst>(I);
2291   const Value *Callee = CI->getCalledOperand();
2292 
2293   // Can't handle inline asm.
2294   if (isa<InlineAsm>(Callee)) return false;
2295 
2296   // Allow SelectionDAG isel to handle tail calls.
2297   if (CI->isTailCall()) return false;
2298 
2299   // Check the calling convention.
2300   CallingConv::ID CC = CI->getCallingConv();
2301 
2302   // TODO: Avoid some calling conventions?
2303 
2304   FunctionType *FTy = CI->getFunctionType();
2305   bool isVarArg = FTy->isVarArg();
2306 
2307   // Handle *simple* calls for now.
2308   Type *RetTy = I->getType();
2309   MVT RetVT;
2310   if (RetTy->isVoidTy())
2311     RetVT = MVT::isVoid;
2312   else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2313            RetVT != MVT::i8  && RetVT != MVT::i1)
2314     return false;
2315 
2316   // Can't handle non-double multi-reg retvals.
2317   if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2318       RetVT != MVT::i16 && RetVT != MVT::i32) {
2319     SmallVector<CCValAssign, 16> RVLocs;
2320     CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2321     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2322     if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2323       return false;
2324   }
2325 
2326   // Set up the argument vectors.
2327   SmallVector<Value*, 8> Args;
2328   SmallVector<Register, 8> ArgRegs;
2329   SmallVector<MVT, 8> ArgVTs;
2330   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2331   unsigned arg_size = CI->arg_size();
2332   Args.reserve(arg_size);
2333   ArgRegs.reserve(arg_size);
2334   ArgVTs.reserve(arg_size);
2335   ArgFlags.reserve(arg_size);
2336   for (auto ArgI = CI->arg_begin(), ArgE = CI->arg_end(); ArgI != ArgE; ++ArgI) {
2337     // If we're lowering a memory intrinsic instead of a regular call, skip the
2338     // last argument, which shouldn't be passed to the underlying function.
2339     if (IntrMemName && ArgE - ArgI <= 1)
2340       break;
2341 
2342     ISD::ArgFlagsTy Flags;
2343     unsigned ArgIdx = ArgI - CI->arg_begin();
2344     if (CI->paramHasAttr(ArgIdx, Attribute::SExt))
2345       Flags.setSExt();
2346     if (CI->paramHasAttr(ArgIdx, Attribute::ZExt))
2347       Flags.setZExt();
2348 
2349     // FIXME: Only handle *easy* calls for now.
2350     if (CI->paramHasAttr(ArgIdx, Attribute::InReg) ||
2351         CI->paramHasAttr(ArgIdx, Attribute::StructRet) ||
2352         CI->paramHasAttr(ArgIdx, Attribute::SwiftSelf) ||
2353         CI->paramHasAttr(ArgIdx, Attribute::SwiftError) ||
2354         CI->paramHasAttr(ArgIdx, Attribute::Nest) ||
2355         CI->paramHasAttr(ArgIdx, Attribute::ByVal))
2356       return false;
2357 
2358     Type *ArgTy = (*ArgI)->getType();
2359     MVT ArgVT;
2360     if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2361         ArgVT != MVT::i1)
2362       return false;
2363 
2364     Register Arg = getRegForValue(*ArgI);
2365     if (!Arg.isValid())
2366       return false;
2367 
2368     Flags.setOrigAlign(Align(DL.getABITypeAlignment(ArgTy)));
2369 
2370     Args.push_back(*ArgI);
2371     ArgRegs.push_back(Arg);
2372     ArgVTs.push_back(ArgVT);
2373     ArgFlags.push_back(Flags);
2374   }
2375 
2376   // Handle the arguments now that we've gotten them.
2377   SmallVector<Register, 4> RegArgs;
2378   unsigned NumBytes;
2379   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2380                        RegArgs, CC, NumBytes, isVarArg))
2381     return false;
2382 
2383   bool UseReg = false;
2384   const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2385   if (!GV || Subtarget->genLongCalls()) UseReg = true;
2386 
2387   Register CalleeReg;
2388   if (UseReg) {
2389     if (IntrMemName)
2390       CalleeReg = getLibcallReg(IntrMemName);
2391     else
2392       CalleeReg = getRegForValue(Callee);
2393 
2394     if (CalleeReg == 0) return false;
2395   }
2396 
2397   // Issue the call.
2398   unsigned CallOpc = ARMSelectCallOp(UseReg);
2399   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2400                                     DbgLoc, TII.get(CallOpc));
2401 
2402   // ARM calls don't take a predicate, but tBL / tBLX do.
2403   if(isThumb2)
2404     MIB.add(predOps(ARMCC::AL));
2405   if (UseReg)
2406     MIB.addReg(CalleeReg);
2407   else if (!IntrMemName)
2408     MIB.addGlobalAddress(GV, 0, 0);
2409   else
2410     MIB.addExternalSymbol(IntrMemName, 0);
2411 
2412   // Add implicit physical register uses to the call.
2413   for (Register R : RegArgs)
2414     MIB.addReg(R, RegState::Implicit);
2415 
2416   // Add a register mask with the call-preserved registers.
2417   // Proper defs for return values will be added by setPhysRegsDeadExcept().
2418   MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2419 
2420   // Finish off the call including any return values.
2421   SmallVector<Register, 4> UsedRegs;
2422   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
2423     return false;
2424 
2425   // Set all unused physreg defs as dead.
2426   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2427 
2428   return true;
2429 }
2430 
2431 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2432   return Len <= 16;
2433 }
2434 
2435 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
2436                                         uint64_t Len, unsigned Alignment) {
2437   // Make sure we don't bloat code by inlining very large memcpy's.
2438   if (!ARMIsMemCpySmall(Len))
2439     return false;
2440 
2441   while (Len) {
2442     MVT VT;
2443     if (!Alignment || Alignment >= 4) {
2444       if (Len >= 4)
2445         VT = MVT::i32;
2446       else if (Len >= 2)
2447         VT = MVT::i16;
2448       else {
2449         assert(Len == 1 && "Expected a length of 1!");
2450         VT = MVT::i8;
2451       }
2452     } else {
2453       // Bound based on alignment.
2454       if (Len >= 2 && Alignment == 2)
2455         VT = MVT::i16;
2456       else {
2457         VT = MVT::i8;
2458       }
2459     }
2460 
2461     bool RV;
2462     Register ResultReg;
2463     RV = ARMEmitLoad(VT, ResultReg, Src);
2464     assert(RV && "Should be able to handle this load.");
2465     RV = ARMEmitStore(VT, ResultReg, Dest);
2466     assert(RV && "Should be able to handle this store.");
2467     (void)RV;
2468 
2469     unsigned Size = VT.getSizeInBits()/8;
2470     Len -= Size;
2471     Dest.Offset += Size;
2472     Src.Offset += Size;
2473   }
2474 
2475   return true;
2476 }
2477 
2478 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
2479   // FIXME: Handle more intrinsics.
2480   switch (I.getIntrinsicID()) {
2481   default: return false;
2482   case Intrinsic::frameaddress: {
2483     MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
2484     MFI.setFrameAddressIsTaken(true);
2485 
2486     unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2487     const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
2488                                              : &ARM::GPRRegClass;
2489 
2490     const ARMBaseRegisterInfo *RegInfo =
2491         static_cast<const ARMBaseRegisterInfo *>(Subtarget->getRegisterInfo());
2492     Register FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
2493     unsigned SrcReg = FramePtr;
2494 
2495     // Recursively load frame address
2496     // ldr r0 [fp]
2497     // ldr r0 [r0]
2498     // ldr r0 [r0]
2499     // ...
2500     unsigned DestReg;
2501     unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
2502     while (Depth--) {
2503       DestReg = createResultReg(RC);
2504       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2505                               TII.get(LdrOpc), DestReg)
2506                       .addReg(SrcReg).addImm(0));
2507       SrcReg = DestReg;
2508     }
2509     updateValueMap(&I, SrcReg);
2510     return true;
2511   }
2512   case Intrinsic::memcpy:
2513   case Intrinsic::memmove: {
2514     const MemTransferInst &MTI = cast<MemTransferInst>(I);
2515     // Don't handle volatile.
2516     if (MTI.isVolatile())
2517       return false;
2518 
2519     // Disable inlining for memmove before calls to ComputeAddress.  Otherwise,
2520     // we would emit dead code because we don't currently handle memmoves.
2521     bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
2522     if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
2523       // Small memcpy's are common enough that we want to do them without a call
2524       // if possible.
2525       uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
2526       if (ARMIsMemCpySmall(Len)) {
2527         Address Dest, Src;
2528         if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
2529             !ARMComputeAddress(MTI.getRawSource(), Src))
2530           return false;
2531         unsigned Alignment = MinAlign(MTI.getDestAlignment(),
2532                                       MTI.getSourceAlignment());
2533         if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2534           return true;
2535       }
2536     }
2537 
2538     if (!MTI.getLength()->getType()->isIntegerTy(32))
2539       return false;
2540 
2541     if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
2542       return false;
2543 
2544     const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
2545     return SelectCall(&I, IntrMemName);
2546   }
2547   case Intrinsic::memset: {
2548     const MemSetInst &MSI = cast<MemSetInst>(I);
2549     // Don't handle volatile.
2550     if (MSI.isVolatile())
2551       return false;
2552 
2553     if (!MSI.getLength()->getType()->isIntegerTy(32))
2554       return false;
2555 
2556     if (MSI.getDestAddressSpace() > 255)
2557       return false;
2558 
2559     return SelectCall(&I, "memset");
2560   }
2561   case Intrinsic::trap: {
2562     unsigned Opcode;
2563     if (Subtarget->isThumb())
2564       Opcode = ARM::tTRAP;
2565     else
2566       Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
2567     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode));
2568     return true;
2569   }
2570   }
2571 }
2572 
2573 bool ARMFastISel::SelectTrunc(const Instruction *I) {
2574   // The high bits for a type smaller than the register size are assumed to be
2575   // undefined.
2576   Value *Op = I->getOperand(0);
2577 
2578   EVT SrcVT, DestVT;
2579   SrcVT = TLI.getValueType(DL, Op->getType(), true);
2580   DestVT = TLI.getValueType(DL, I->getType(), true);
2581 
2582   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2583     return false;
2584   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2585     return false;
2586 
2587   unsigned SrcReg = getRegForValue(Op);
2588   if (!SrcReg) return false;
2589 
2590   // Because the high bits are undefined, a truncate doesn't generate
2591   // any code.
2592   updateValueMap(I, SrcReg);
2593   return true;
2594 }
2595 
2596 unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
2597                                     bool isZExt) {
2598   if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2599     return 0;
2600   if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
2601     return 0;
2602 
2603   // Table of which combinations can be emitted as a single instruction,
2604   // and which will require two.
2605   static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2606     //            ARM                     Thumb
2607     //           !hasV6Ops  hasV6Ops     !hasV6Ops  hasV6Ops
2608     //    ext:     s  z      s  z          s  z      s  z
2609     /*  1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2610     /*  8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2611     /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2612   };
2613 
2614   // Target registers for:
2615   //  - For ARM can never be PC.
2616   //  - For 16-bit Thumb are restricted to lower 8 registers.
2617   //  - For 32-bit Thumb are restricted to non-SP and non-PC.
2618   static const TargetRegisterClass *RCTbl[2][2] = {
2619     // Instructions: Two                     Single
2620     /* ARM      */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2621     /* Thumb    */ { &ARM::tGPRRegClass,    &ARM::rGPRRegClass    }
2622   };
2623 
2624   // Table governing the instruction(s) to be emitted.
2625   static const struct InstructionTable {
2626     uint32_t Opc   : 16;
2627     uint32_t hasS  :  1; // Some instructions have an S bit, always set it to 0.
2628     uint32_t Shift :  7; // For shift operand addressing mode, used by MOVsi.
2629     uint32_t Imm   :  8; // All instructions have either a shift or a mask.
2630   } IT[2][2][3][2] = {
2631     { // Two instructions (first is left shift, second is in this table).
2632       { // ARM                Opc           S  Shift             Imm
2633         /*  1 bit sext */ { { ARM::MOVsi  , 1, ARM_AM::asr     ,  31 },
2634         /*  1 bit zext */   { ARM::MOVsi  , 1, ARM_AM::lsr     ,  31 } },
2635         /*  8 bit sext */ { { ARM::MOVsi  , 1, ARM_AM::asr     ,  24 },
2636         /*  8 bit zext */   { ARM::MOVsi  , 1, ARM_AM::lsr     ,  24 } },
2637         /* 16 bit sext */ { { ARM::MOVsi  , 1, ARM_AM::asr     ,  16 },
2638         /* 16 bit zext */   { ARM::MOVsi  , 1, ARM_AM::lsr     ,  16 } }
2639       },
2640       { // Thumb              Opc           S  Shift             Imm
2641         /*  1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift,  31 },
2642         /*  1 bit zext */   { ARM::tLSRri , 0, ARM_AM::no_shift,  31 } },
2643         /*  8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift,  24 },
2644         /*  8 bit zext */   { ARM::tLSRri , 0, ARM_AM::no_shift,  24 } },
2645         /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift,  16 },
2646         /* 16 bit zext */   { ARM::tLSRri , 0, ARM_AM::no_shift,  16 } }
2647       }
2648     },
2649     { // Single instruction.
2650       { // ARM                Opc           S  Shift             Imm
2651         /*  1 bit sext */ { { ARM::KILL   , 0, ARM_AM::no_shift,   0 },
2652         /*  1 bit zext */   { ARM::ANDri  , 1, ARM_AM::no_shift,   1 } },
2653         /*  8 bit sext */ { { ARM::SXTB   , 0, ARM_AM::no_shift,   0 },
2654         /*  8 bit zext */   { ARM::ANDri  , 1, ARM_AM::no_shift, 255 } },
2655         /* 16 bit sext */ { { ARM::SXTH   , 0, ARM_AM::no_shift,   0 },
2656         /* 16 bit zext */   { ARM::UXTH   , 0, ARM_AM::no_shift,   0 } }
2657       },
2658       { // Thumb              Opc           S  Shift             Imm
2659         /*  1 bit sext */ { { ARM::KILL   , 0, ARM_AM::no_shift,   0 },
2660         /*  1 bit zext */   { ARM::t2ANDri, 1, ARM_AM::no_shift,   1 } },
2661         /*  8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift,   0 },
2662         /*  8 bit zext */   { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } },
2663         /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift,   0 },
2664         /* 16 bit zext */   { ARM::t2UXTH , 0, ARM_AM::no_shift,   0 } }
2665       }
2666     }
2667   };
2668 
2669   unsigned SrcBits = SrcVT.getSizeInBits();
2670   unsigned DestBits = DestVT.getSizeInBits();
2671   (void) DestBits;
2672   assert((SrcBits < DestBits) && "can only extend to larger types");
2673   assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2674          "other sizes unimplemented");
2675   assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2676          "other sizes unimplemented");
2677 
2678   bool hasV6Ops = Subtarget->hasV6Ops();
2679   unsigned Bitness = SrcBits / 8;  // {1,8,16}=>{0,1,2}
2680   assert((Bitness < 3) && "sanity-check table bounds");
2681 
2682   bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2683   const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];
2684   const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt];
2685   unsigned Opc = ITP->Opc;
2686   assert(ARM::KILL != Opc && "Invalid table entry");
2687   unsigned hasS = ITP->hasS;
2688   ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift;
2689   assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) &&
2690          "only MOVsi has shift operand addressing mode");
2691   unsigned Imm = ITP->Imm;
2692 
2693   // 16-bit Thumb instructions always set CPSR (unless they're in an IT block).
2694   bool setsCPSR = &ARM::tGPRRegClass == RC;
2695   unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2696   unsigned ResultReg;
2697   // MOVsi encodes shift and immediate in shift operand addressing mode.
2698   // The following condition has the same value when emitting two
2699   // instruction sequences: both are shifts.
2700   bool ImmIsSO = (Shift != ARM_AM::no_shift);
2701 
2702   // Either one or two instructions are emitted.
2703   // They're always of the form:
2704   //   dst = in OP imm
2705   // CPSR is set only by 16-bit Thumb instructions.
2706   // Predicate, if any, is AL.
2707   // S bit, if available, is always 0.
2708   // When two are emitted the first's result will feed as the second's input,
2709   // that value is then dead.
2710   unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2711   for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
2712     ResultReg = createResultReg(RC);
2713     bool isLsl = (0 == Instr) && !isSingleInstr;
2714     unsigned Opcode = isLsl ? LSLOpc : Opc;
2715     ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift;
2716     unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm;
2717     bool isKill = 1 == Instr;
2718     MachineInstrBuilder MIB = BuildMI(
2719         *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg);
2720     if (setsCPSR)
2721       MIB.addReg(ARM::CPSR, RegState::Define);
2722     SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR);
2723     MIB.addReg(SrcReg, isKill * RegState::Kill)
2724         .addImm(ImmEnc)
2725         .add(predOps(ARMCC::AL));
2726     if (hasS)
2727       MIB.add(condCodeOp());
2728     // Second instruction consumes the first's result.
2729     SrcReg = ResultReg;
2730   }
2731 
2732   return ResultReg;
2733 }
2734 
2735 bool ARMFastISel::SelectIntExt(const Instruction *I) {
2736   // On ARM, in general, integer casts don't involve legal types; this code
2737   // handles promotable integers.
2738   Type *DestTy = I->getType();
2739   Value *Src = I->getOperand(0);
2740   Type *SrcTy = Src->getType();
2741 
2742   bool isZExt = isa<ZExtInst>(I);
2743   unsigned SrcReg = getRegForValue(Src);
2744   if (!SrcReg) return false;
2745 
2746   EVT SrcEVT, DestEVT;
2747   SrcEVT = TLI.getValueType(DL, SrcTy, true);
2748   DestEVT = TLI.getValueType(DL, DestTy, true);
2749   if (!SrcEVT.isSimple()) return false;
2750   if (!DestEVT.isSimple()) return false;
2751 
2752   MVT SrcVT = SrcEVT.getSimpleVT();
2753   MVT DestVT = DestEVT.getSimpleVT();
2754   unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2755   if (ResultReg == 0) return false;
2756   updateValueMap(I, ResultReg);
2757   return true;
2758 }
2759 
2760 bool ARMFastISel::SelectShift(const Instruction *I,
2761                               ARM_AM::ShiftOpc ShiftTy) {
2762   // We handle thumb2 mode by target independent selector
2763   // or SelectionDAG ISel.
2764   if (isThumb2)
2765     return false;
2766 
2767   // Only handle i32 now.
2768   EVT DestVT = TLI.getValueType(DL, I->getType(), true);
2769   if (DestVT != MVT::i32)
2770     return false;
2771 
2772   unsigned Opc = ARM::MOVsr;
2773   unsigned ShiftImm;
2774   Value *Src2Value = I->getOperand(1);
2775   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2776     ShiftImm = CI->getZExtValue();
2777 
2778     // Fall back to selection DAG isel if the shift amount
2779     // is zero or greater than the width of the value type.
2780     if (ShiftImm == 0 || ShiftImm >=32)
2781       return false;
2782 
2783     Opc = ARM::MOVsi;
2784   }
2785 
2786   Value *Src1Value = I->getOperand(0);
2787   unsigned Reg1 = getRegForValue(Src1Value);
2788   if (Reg1 == 0) return false;
2789 
2790   unsigned Reg2 = 0;
2791   if (Opc == ARM::MOVsr) {
2792     Reg2 = getRegForValue(Src2Value);
2793     if (Reg2 == 0) return false;
2794   }
2795 
2796   unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2797   if(ResultReg == 0) return false;
2798 
2799   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2800                                     TII.get(Opc), ResultReg)
2801                             .addReg(Reg1);
2802 
2803   if (Opc == ARM::MOVsi)
2804     MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
2805   else if (Opc == ARM::MOVsr) {
2806     MIB.addReg(Reg2);
2807     MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
2808   }
2809 
2810   AddOptionalDefs(MIB);
2811   updateValueMap(I, ResultReg);
2812   return true;
2813 }
2814 
2815 // TODO: SoftFP support.
2816 bool ARMFastISel::fastSelectInstruction(const Instruction *I) {
2817   switch (I->getOpcode()) {
2818     case Instruction::Load:
2819       return SelectLoad(I);
2820     case Instruction::Store:
2821       return SelectStore(I);
2822     case Instruction::Br:
2823       return SelectBranch(I);
2824     case Instruction::IndirectBr:
2825       return SelectIndirectBr(I);
2826     case Instruction::ICmp:
2827     case Instruction::FCmp:
2828       return SelectCmp(I);
2829     case Instruction::FPExt:
2830       return SelectFPExt(I);
2831     case Instruction::FPTrunc:
2832       return SelectFPTrunc(I);
2833     case Instruction::SIToFP:
2834       return SelectIToFP(I, /*isSigned*/ true);
2835     case Instruction::UIToFP:
2836       return SelectIToFP(I, /*isSigned*/ false);
2837     case Instruction::FPToSI:
2838       return SelectFPToI(I, /*isSigned*/ true);
2839     case Instruction::FPToUI:
2840       return SelectFPToI(I, /*isSigned*/ false);
2841     case Instruction::Add:
2842       return SelectBinaryIntOp(I, ISD::ADD);
2843     case Instruction::Or:
2844       return SelectBinaryIntOp(I, ISD::OR);
2845     case Instruction::Sub:
2846       return SelectBinaryIntOp(I, ISD::SUB);
2847     case Instruction::FAdd:
2848       return SelectBinaryFPOp(I, ISD::FADD);
2849     case Instruction::FSub:
2850       return SelectBinaryFPOp(I, ISD::FSUB);
2851     case Instruction::FMul:
2852       return SelectBinaryFPOp(I, ISD::FMUL);
2853     case Instruction::SDiv:
2854       return SelectDiv(I, /*isSigned*/ true);
2855     case Instruction::UDiv:
2856       return SelectDiv(I, /*isSigned*/ false);
2857     case Instruction::SRem:
2858       return SelectRem(I, /*isSigned*/ true);
2859     case Instruction::URem:
2860       return SelectRem(I, /*isSigned*/ false);
2861     case Instruction::Call:
2862       if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2863         return SelectIntrinsicCall(*II);
2864       return SelectCall(I);
2865     case Instruction::Select:
2866       return SelectSelect(I);
2867     case Instruction::Ret:
2868       return SelectRet(I);
2869     case Instruction::Trunc:
2870       return SelectTrunc(I);
2871     case Instruction::ZExt:
2872     case Instruction::SExt:
2873       return SelectIntExt(I);
2874     case Instruction::Shl:
2875       return SelectShift(I, ARM_AM::lsl);
2876     case Instruction::LShr:
2877       return SelectShift(I, ARM_AM::lsr);
2878     case Instruction::AShr:
2879       return SelectShift(I, ARM_AM::asr);
2880     default: break;
2881   }
2882   return false;
2883 }
2884 
2885 // This table describes sign- and zero-extend instructions which can be
2886 // folded into a preceding load. All of these extends have an immediate
2887 // (sometimes a mask and sometimes a shift) that's applied after
2888 // extension.
2889 static const struct FoldableLoadExtendsStruct {
2890   uint16_t Opc[2];  // ARM, Thumb.
2891   uint8_t ExpectedImm;
2892   uint8_t isZExt     : 1;
2893   uint8_t ExpectedVT : 7;
2894 } FoldableLoadExtends[] = {
2895   { { ARM::SXTH,  ARM::t2SXTH  },   0, 0, MVT::i16 },
2896   { { ARM::UXTH,  ARM::t2UXTH  },   0, 1, MVT::i16 },
2897   { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8  },
2898   { { ARM::SXTB,  ARM::t2SXTB  },   0, 0, MVT::i8  },
2899   { { ARM::UXTB,  ARM::t2UXTB  },   0, 1, MVT::i8  }
2900 };
2901 
2902 /// The specified machine instr operand is a vreg, and that
2903 /// vreg is being provided by the specified load instruction.  If possible,
2904 /// try to fold the load as an operand to the instruction, returning true if
2905 /// successful.
2906 bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
2907                                       const LoadInst *LI) {
2908   // Verify we have a legal type before going any further.
2909   MVT VT;
2910   if (!isLoadTypeLegal(LI->getType(), VT))
2911     return false;
2912 
2913   // Combine load followed by zero- or sign-extend.
2914   // ldrb r1, [r0]       ldrb r1, [r0]
2915   // uxtb r2, r1     =>
2916   // mov  r3, r2         mov  r3, r1
2917   if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm())
2918     return false;
2919   const uint64_t Imm = MI->getOperand(2).getImm();
2920 
2921   bool Found = false;
2922   bool isZExt;
2923   for (const FoldableLoadExtendsStruct &FLE : FoldableLoadExtends) {
2924     if (FLE.Opc[isThumb2] == MI->getOpcode() &&
2925         (uint64_t)FLE.ExpectedImm == Imm &&
2926         MVT((MVT::SimpleValueType)FLE.ExpectedVT) == VT) {
2927       Found = true;
2928       isZExt = FLE.isZExt;
2929     }
2930   }
2931   if (!Found) return false;
2932 
2933   // See if we can handle this address.
2934   Address Addr;
2935   if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
2936 
2937   Register ResultReg = MI->getOperand(0).getReg();
2938   if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false))
2939     return false;
2940   MachineBasicBlock::iterator I(MI);
2941   removeDeadCode(I, std::next(I));
2942   return true;
2943 }
2944 
2945 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {
2946   bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
2947 
2948   LLVMContext *Context = &MF->getFunction().getContext();
2949   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2950   unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2951   ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
2952       GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj,
2953       UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier,
2954       /*AddCurrentAddress=*/UseGOT_PREL);
2955 
2956   unsigned ConstAlign =
2957       MF->getDataLayout().getPrefTypeAlignment(Type::getInt32PtrTy(*Context));
2958   unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
2959   MachineMemOperand *CPMMO =
2960       MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
2961                                MachineMemOperand::MOLoad, 4, Align(4));
2962 
2963   Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
2964   unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
2965   MachineInstrBuilder MIB =
2966       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), TempReg)
2967           .addConstantPoolIndex(Idx)
2968           .addMemOperand(CPMMO);
2969   if (Opc == ARM::LDRcp)
2970     MIB.addImm(0);
2971   MIB.add(predOps(ARMCC::AL));
2972 
2973   // Fix the address by adding pc.
2974   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
2975   Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
2976                                                           : ARM::PICADD;
2977   DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0);
2978   MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
2979             .addReg(TempReg)
2980             .addImm(ARMPCLabelIndex);
2981 
2982   if (!Subtarget->isThumb())
2983     MIB.add(predOps(ARMCC::AL));
2984 
2985   if (UseGOT_PREL && Subtarget->isThumb()) {
2986     unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
2987     MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2988                   TII.get(ARM::t2LDRi12), NewDestReg)
2989               .addReg(DestReg)
2990               .addImm(0);
2991     DestReg = NewDestReg;
2992     AddOptionalDefs(MIB);
2993   }
2994   return DestReg;
2995 }
2996 
2997 bool ARMFastISel::fastLowerArguments() {
2998   if (!FuncInfo.CanLowerReturn)
2999     return false;
3000 
3001   const Function *F = FuncInfo.Fn;
3002   if (F->isVarArg())
3003     return false;
3004 
3005   CallingConv::ID CC = F->getCallingConv();
3006   switch (CC) {
3007   default:
3008     return false;
3009   case CallingConv::Fast:
3010   case CallingConv::C:
3011   case CallingConv::ARM_AAPCS_VFP:
3012   case CallingConv::ARM_AAPCS:
3013   case CallingConv::ARM_APCS:
3014   case CallingConv::Swift:
3015     break;
3016   }
3017 
3018   // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments
3019   // which are passed in r0 - r3.
3020   for (const Argument &Arg : F->args()) {
3021     if (Arg.getArgNo() >= 4)
3022       return false;
3023 
3024     if (Arg.hasAttribute(Attribute::InReg) ||
3025         Arg.hasAttribute(Attribute::StructRet) ||
3026         Arg.hasAttribute(Attribute::SwiftSelf) ||
3027         Arg.hasAttribute(Attribute::SwiftError) ||
3028         Arg.hasAttribute(Attribute::ByVal))
3029       return false;
3030 
3031     Type *ArgTy = Arg.getType();
3032     if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
3033       return false;
3034 
3035     EVT ArgVT = TLI.getValueType(DL, ArgTy);
3036     if (!ArgVT.isSimple()) return false;
3037     switch (ArgVT.getSimpleVT().SimpleTy) {
3038     case MVT::i8:
3039     case MVT::i16:
3040     case MVT::i32:
3041       break;
3042     default:
3043       return false;
3044     }
3045   }
3046 
3047   static const MCPhysReg GPRArgRegs[] = {
3048     ARM::R0, ARM::R1, ARM::R2, ARM::R3
3049   };
3050 
3051   const TargetRegisterClass *RC = &ARM::rGPRRegClass;
3052   for (const Argument &Arg : F->args()) {
3053     unsigned ArgNo = Arg.getArgNo();
3054     unsigned SrcReg = GPRArgRegs[ArgNo];
3055     unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3056     // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3057     // Without this, EmitLiveInCopies may eliminate the livein if its only
3058     // use is a bitcast (which isn't turned into an instruction).
3059     unsigned ResultReg = createResultReg(RC);
3060     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3061             TII.get(TargetOpcode::COPY),
3062             ResultReg).addReg(DstReg, getKillRegState(true));
3063     updateValueMap(&Arg, ResultReg);
3064   }
3065 
3066   return true;
3067 }
3068 
3069 namespace llvm {
3070 
3071   FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
3072                                 const TargetLibraryInfo *libInfo) {
3073     if (funcInfo.MF->getSubtarget<ARMSubtarget>().useFastISel())
3074       return new ARMFastISel(funcInfo, libInfo);
3075 
3076     return nullptr;
3077   }
3078 
3079 } // end namespace llvm
3080