1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the ARM-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // ARMGenFastISel.inc, which is #included here.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "ARM.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMCallingConv.h"
19 #include "ARMRegisterInfo.h"
20 #include "ARMTargetMachine.h"
21 #include "ARMSubtarget.h"
22 #include "ARMConstantPoolValue.h"
23 #include "MCTargetDesc/ARMAddressingModes.h"
24 #include "llvm/CallingConv.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/GlobalVariable.h"
27 #include "llvm/Instructions.h"
28 #include "llvm/IntrinsicInst.h"
29 #include "llvm/Module.h"
30 #include "llvm/Operator.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/FastISel.h"
33 #include "llvm/CodeGen/FunctionLoweringInfo.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineModuleInfo.h"
36 #include "llvm/CodeGen/MachineConstantPool.h"
37 #include "llvm/CodeGen/MachineFrameInfo.h"
38 #include "llvm/CodeGen/MachineMemOperand.h"
39 #include "llvm/CodeGen/MachineRegisterInfo.h"
40 #include "llvm/Support/CallSite.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/GetElementPtrTypeIterator.h"
44 #include "llvm/Target/TargetData.h"
45 #include "llvm/Target/TargetInstrInfo.h"
46 #include "llvm/Target/TargetLowering.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include "llvm/Target/TargetOptions.h"
49 using namespace llvm;
50 
51 static cl::opt<bool>
52 DisableARMFastISel("disable-arm-fast-isel",
53                     cl::desc("Turn off experimental ARM fast-isel support"),
54                     cl::init(false), cl::Hidden);
55 
56 extern cl::opt<bool> EnableARMLongCalls;
57 
58 namespace {
59 
60   // All possible address modes, plus some.
61   typedef struct Address {
62     enum {
63       RegBase,
64       FrameIndexBase
65     } BaseType;
66 
67     union {
68       unsigned Reg;
69       int FI;
70     } Base;
71 
72     int Offset;
73 
74     // Innocuous defaults for our address.
75     Address()
76      : BaseType(RegBase), Offset(0) {
77        Base.Reg = 0;
78      }
79   } Address;
80 
81 class ARMFastISel : public FastISel {
82 
83   /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
84   /// make the right decision when generating code for different targets.
85   const ARMSubtarget *Subtarget;
86   const TargetMachine &TM;
87   const TargetInstrInfo &TII;
88   const TargetLowering &TLI;
89   ARMFunctionInfo *AFI;
90 
91   // Convenience variables to avoid some queries.
92   bool isThumb2;
93   LLVMContext *Context;
94 
95   public:
96     explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
97     : FastISel(funcInfo),
98       TM(funcInfo.MF->getTarget()),
99       TII(*TM.getInstrInfo()),
100       TLI(*TM.getTargetLowering()) {
101       Subtarget = &TM.getSubtarget<ARMSubtarget>();
102       AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
103       isThumb2 = AFI->isThumbFunction();
104       Context = &funcInfo.Fn->getContext();
105     }
106 
107     // Code from FastISel.cpp.
108     virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
109                                    const TargetRegisterClass *RC);
110     virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
111                                     const TargetRegisterClass *RC,
112                                     unsigned Op0, bool Op0IsKill);
113     virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
114                                      const TargetRegisterClass *RC,
115                                      unsigned Op0, bool Op0IsKill,
116                                      unsigned Op1, bool Op1IsKill);
117     virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
118                                       const TargetRegisterClass *RC,
119                                       unsigned Op0, bool Op0IsKill,
120                                       unsigned Op1, bool Op1IsKill,
121                                       unsigned Op2, bool Op2IsKill);
122     virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
123                                      const TargetRegisterClass *RC,
124                                      unsigned Op0, bool Op0IsKill,
125                                      uint64_t Imm);
126     virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
127                                      const TargetRegisterClass *RC,
128                                      unsigned Op0, bool Op0IsKill,
129                                      const ConstantFP *FPImm);
130     virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
131                                       const TargetRegisterClass *RC,
132                                       unsigned Op0, bool Op0IsKill,
133                                       unsigned Op1, bool Op1IsKill,
134                                       uint64_t Imm);
135     virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
136                                     const TargetRegisterClass *RC,
137                                     uint64_t Imm);
138     virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode,
139                                      const TargetRegisterClass *RC,
140                                      uint64_t Imm1, uint64_t Imm2);
141 
142     virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
143                                                 unsigned Op0, bool Op0IsKill,
144                                                 uint32_t Idx);
145 
146     // Backend specific FastISel code.
147     virtual bool TargetSelectInstruction(const Instruction *I);
148     virtual unsigned TargetMaterializeConstant(const Constant *C);
149     virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
150     virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
151                                const LoadInst *LI);
152 
153   #include "ARMGenFastISel.inc"
154 
155     // Instruction selection routines.
156   private:
157     bool SelectLoad(const Instruction *I);
158     bool SelectStore(const Instruction *I);
159     bool SelectBranch(const Instruction *I);
160     bool SelectIndirectBr(const Instruction *I);
161     bool SelectCmp(const Instruction *I);
162     bool SelectFPExt(const Instruction *I);
163     bool SelectFPTrunc(const Instruction *I);
164     bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
165     bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);
166     bool SelectIToFP(const Instruction *I, bool isSigned);
167     bool SelectFPToI(const Instruction *I, bool isSigned);
168     bool SelectDiv(const Instruction *I, bool isSigned);
169     bool SelectRem(const Instruction *I, bool isSigned);
170     bool SelectCall(const Instruction *I, const char *IntrMemName);
171     bool SelectIntrinsicCall(const IntrinsicInst &I);
172     bool SelectSelect(const Instruction *I);
173     bool SelectRet(const Instruction *I);
174     bool SelectTrunc(const Instruction *I);
175     bool SelectIntExt(const Instruction *I);
176 
177     // Utility routines.
178   private:
179     bool isTypeLegal(Type *Ty, MVT &VT);
180     bool isLoadTypeLegal(Type *Ty, MVT &VT);
181     bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
182                     bool isZExt);
183     bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
184                      unsigned Alignment = 0, bool isZExt = true,
185                      bool allocReg = true);
186 
187     bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
188                       unsigned Alignment = 0);
189     bool ARMComputeAddress(const Value *Obj, Address &Addr);
190     void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
191     bool ARMIsMemCpySmall(uint64_t Len);
192     bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len);
193     unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
194     unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
195     unsigned ARMMaterializeInt(const Constant *C, EVT VT);
196     unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
197     unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
198     unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
199     unsigned ARMSelectCallOp(const GlobalValue *GV);
200 
201     // Call handling routines.
202   private:
203     CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return);
204     bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
205                          SmallVectorImpl<unsigned> &ArgRegs,
206                          SmallVectorImpl<MVT> &ArgVTs,
207                          SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
208                          SmallVectorImpl<unsigned> &RegArgs,
209                          CallingConv::ID CC,
210                          unsigned &NumBytes);
211     bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
212                     const Instruction *I, CallingConv::ID CC,
213                     unsigned &NumBytes);
214     bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
215 
216     // OptionalDef handling routines.
217   private:
218     bool isARMNEONPred(const MachineInstr *MI);
219     bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
220     const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
221     void AddLoadStoreOperands(EVT VT, Address &Addr,
222                               const MachineInstrBuilder &MIB,
223                               unsigned Flags, bool useAM3);
224 };
225 
226 } // end anonymous namespace
227 
228 #include "ARMGenCallingConv.inc"
229 
230 // DefinesOptionalPredicate - This is different from DefinesPredicate in that
231 // we don't care about implicit defs here, just places we'll need to add a
232 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
233 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
234   if (!MI->hasOptionalDef())
235     return false;
236 
237   // Look to see if our OptionalDef is defining CPSR or CCR.
238   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239     const MachineOperand &MO = MI->getOperand(i);
240     if (!MO.isReg() || !MO.isDef()) continue;
241     if (MO.getReg() == ARM::CPSR)
242       *CPSR = true;
243   }
244   return true;
245 }
246 
247 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
248   const MCInstrDesc &MCID = MI->getDesc();
249 
250   // If we're a thumb2 or not NEON function we were handled via isPredicable.
251   if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
252        AFI->isThumb2Function())
253     return false;
254 
255   for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
256     if (MCID.OpInfo[i].isPredicate())
257       return true;
258 
259   return false;
260 }
261 
262 // If the machine is predicable go ahead and add the predicate operands, if
263 // it needs default CC operands add those.
264 // TODO: If we want to support thumb1 then we'll need to deal with optional
265 // CPSR defs that need to be added before the remaining operands. See s_cc_out
266 // for descriptions why.
267 const MachineInstrBuilder &
268 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
269   MachineInstr *MI = &*MIB;
270 
271   // Do we use a predicate? or...
272   // Are we NEON in ARM mode and have a predicate operand? If so, I know
273   // we're not predicable but add it anyways.
274   if (TII.isPredicable(MI) || isARMNEONPred(MI))
275     AddDefaultPred(MIB);
276 
277   // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
278   // defines CPSR. All other OptionalDefines in ARM are the CCR register.
279   bool CPSR = false;
280   if (DefinesOptionalPredicate(MI, &CPSR)) {
281     if (CPSR)
282       AddDefaultT1CC(MIB);
283     else
284       AddDefaultCC(MIB);
285   }
286   return MIB;
287 }
288 
289 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
290                                     const TargetRegisterClass* RC) {
291   unsigned ResultReg = createResultReg(RC);
292   const MCInstrDesc &II = TII.get(MachineInstOpcode);
293 
294   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
295   return ResultReg;
296 }
297 
298 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
299                                      const TargetRegisterClass *RC,
300                                      unsigned Op0, bool Op0IsKill) {
301   unsigned ResultReg = createResultReg(RC);
302   const MCInstrDesc &II = TII.get(MachineInstOpcode);
303 
304   if (II.getNumDefs() >= 1)
305     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
306                    .addReg(Op0, Op0IsKill * RegState::Kill));
307   else {
308     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
309                    .addReg(Op0, Op0IsKill * RegState::Kill));
310     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
311                    TII.get(TargetOpcode::COPY), ResultReg)
312                    .addReg(II.ImplicitDefs[0]));
313   }
314   return ResultReg;
315 }
316 
317 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
318                                       const TargetRegisterClass *RC,
319                                       unsigned Op0, bool Op0IsKill,
320                                       unsigned Op1, bool Op1IsKill) {
321   unsigned ResultReg = createResultReg(RC);
322   const MCInstrDesc &II = TII.get(MachineInstOpcode);
323 
324   if (II.getNumDefs() >= 1)
325     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
326                    .addReg(Op0, Op0IsKill * RegState::Kill)
327                    .addReg(Op1, Op1IsKill * RegState::Kill));
328   else {
329     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
330                    .addReg(Op0, Op0IsKill * RegState::Kill)
331                    .addReg(Op1, Op1IsKill * RegState::Kill));
332     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
333                            TII.get(TargetOpcode::COPY), ResultReg)
334                    .addReg(II.ImplicitDefs[0]));
335   }
336   return ResultReg;
337 }
338 
339 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
340                                        const TargetRegisterClass *RC,
341                                        unsigned Op0, bool Op0IsKill,
342                                        unsigned Op1, bool Op1IsKill,
343                                        unsigned Op2, bool Op2IsKill) {
344   unsigned ResultReg = createResultReg(RC);
345   const MCInstrDesc &II = TII.get(MachineInstOpcode);
346 
347   if (II.getNumDefs() >= 1)
348     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
349                    .addReg(Op0, Op0IsKill * RegState::Kill)
350                    .addReg(Op1, Op1IsKill * RegState::Kill)
351                    .addReg(Op2, Op2IsKill * RegState::Kill));
352   else {
353     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
354                    .addReg(Op0, Op0IsKill * RegState::Kill)
355                    .addReg(Op1, Op1IsKill * RegState::Kill)
356                    .addReg(Op2, Op2IsKill * RegState::Kill));
357     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
358                            TII.get(TargetOpcode::COPY), ResultReg)
359                    .addReg(II.ImplicitDefs[0]));
360   }
361   return ResultReg;
362 }
363 
364 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
365                                       const TargetRegisterClass *RC,
366                                       unsigned Op0, bool Op0IsKill,
367                                       uint64_t Imm) {
368   unsigned ResultReg = createResultReg(RC);
369   const MCInstrDesc &II = TII.get(MachineInstOpcode);
370 
371   if (II.getNumDefs() >= 1)
372     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
373                    .addReg(Op0, Op0IsKill * RegState::Kill)
374                    .addImm(Imm));
375   else {
376     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
377                    .addReg(Op0, Op0IsKill * RegState::Kill)
378                    .addImm(Imm));
379     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
380                            TII.get(TargetOpcode::COPY), ResultReg)
381                    .addReg(II.ImplicitDefs[0]));
382   }
383   return ResultReg;
384 }
385 
386 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
387                                       const TargetRegisterClass *RC,
388                                       unsigned Op0, bool Op0IsKill,
389                                       const ConstantFP *FPImm) {
390   unsigned ResultReg = createResultReg(RC);
391   const MCInstrDesc &II = TII.get(MachineInstOpcode);
392 
393   if (II.getNumDefs() >= 1)
394     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
395                    .addReg(Op0, Op0IsKill * RegState::Kill)
396                    .addFPImm(FPImm));
397   else {
398     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
399                    .addReg(Op0, Op0IsKill * RegState::Kill)
400                    .addFPImm(FPImm));
401     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
402                            TII.get(TargetOpcode::COPY), ResultReg)
403                    .addReg(II.ImplicitDefs[0]));
404   }
405   return ResultReg;
406 }
407 
408 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
409                                        const TargetRegisterClass *RC,
410                                        unsigned Op0, bool Op0IsKill,
411                                        unsigned Op1, bool Op1IsKill,
412                                        uint64_t Imm) {
413   unsigned ResultReg = createResultReg(RC);
414   const MCInstrDesc &II = TII.get(MachineInstOpcode);
415 
416   if (II.getNumDefs() >= 1)
417     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
418                    .addReg(Op0, Op0IsKill * RegState::Kill)
419                    .addReg(Op1, Op1IsKill * RegState::Kill)
420                    .addImm(Imm));
421   else {
422     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
423                    .addReg(Op0, Op0IsKill * RegState::Kill)
424                    .addReg(Op1, Op1IsKill * RegState::Kill)
425                    .addImm(Imm));
426     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
427                            TII.get(TargetOpcode::COPY), ResultReg)
428                    .addReg(II.ImplicitDefs[0]));
429   }
430   return ResultReg;
431 }
432 
433 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
434                                      const TargetRegisterClass *RC,
435                                      uint64_t Imm) {
436   unsigned ResultReg = createResultReg(RC);
437   const MCInstrDesc &II = TII.get(MachineInstOpcode);
438 
439   if (II.getNumDefs() >= 1)
440     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
441                    .addImm(Imm));
442   else {
443     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
444                    .addImm(Imm));
445     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
446                            TII.get(TargetOpcode::COPY), ResultReg)
447                    .addReg(II.ImplicitDefs[0]));
448   }
449   return ResultReg;
450 }
451 
452 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
453                                       const TargetRegisterClass *RC,
454                                       uint64_t Imm1, uint64_t Imm2) {
455   unsigned ResultReg = createResultReg(RC);
456   const MCInstrDesc &II = TII.get(MachineInstOpcode);
457 
458   if (II.getNumDefs() >= 1)
459     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
460                     .addImm(Imm1).addImm(Imm2));
461   else {
462     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
463                     .addImm(Imm1).addImm(Imm2));
464     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
465                             TII.get(TargetOpcode::COPY),
466                             ResultReg)
467                     .addReg(II.ImplicitDefs[0]));
468   }
469   return ResultReg;
470 }
471 
472 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
473                                                  unsigned Op0, bool Op0IsKill,
474                                                  uint32_t Idx) {
475   unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
476   assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
477          "Cannot yet extract from physregs");
478   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
479                          DL, TII.get(TargetOpcode::COPY), ResultReg)
480                  .addReg(Op0, getKillRegState(Op0IsKill), Idx));
481   return ResultReg;
482 }
483 
484 // TODO: Don't worry about 64-bit now, but when this is fixed remove the
485 // checks from the various callers.
486 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
487   if (VT == MVT::f64) return 0;
488 
489   unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
490   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
491                           TII.get(ARM::VMOVRS), MoveReg)
492                   .addReg(SrcReg));
493   return MoveReg;
494 }
495 
496 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
497   if (VT == MVT::i64) return 0;
498 
499   unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
500   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
501                           TII.get(ARM::VMOVSR), MoveReg)
502                   .addReg(SrcReg));
503   return MoveReg;
504 }
505 
506 // For double width floating point we need to materialize two constants
507 // (the high and the low) into integer registers then use a move to get
508 // the combined constant into an FP reg.
509 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
510   const APFloat Val = CFP->getValueAPF();
511   bool is64bit = VT == MVT::f64;
512 
513   // This checks to see if we can use VFP3 instructions to materialize
514   // a constant, otherwise we have to go through the constant pool.
515   if (TLI.isFPImmLegal(Val, VT)) {
516     int Imm;
517     unsigned Opc;
518     if (is64bit) {
519       Imm = ARM_AM::getFP64Imm(Val);
520       Opc = ARM::FCONSTD;
521     } else {
522       Imm = ARM_AM::getFP32Imm(Val);
523       Opc = ARM::FCONSTS;
524     }
525     unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
526     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
527                             DestReg)
528                     .addImm(Imm));
529     return DestReg;
530   }
531 
532   // Require VFP2 for loading fp constants.
533   if (!Subtarget->hasVFP2()) return false;
534 
535   // MachineConstantPool wants an explicit alignment.
536   unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
537   if (Align == 0) {
538     // TODO: Figure out if this is correct.
539     Align = TD.getTypeAllocSize(CFP->getType());
540   }
541   unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
542   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
543   unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
544 
545   // The extra reg is for addrmode5.
546   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
547                           DestReg)
548                   .addConstantPoolIndex(Idx)
549                   .addReg(0));
550   return DestReg;
551 }
552 
553 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
554 
555   if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
556     return false;
557 
558   // If we can do this in a single instruction without a constant pool entry
559   // do so now.
560   const ConstantInt *CI = cast<ConstantInt>(C);
561   if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
562     unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
563     unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
564     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
565                             TII.get(Opc), ImmReg)
566                     .addImm(CI->getZExtValue()));
567     return ImmReg;
568   }
569 
570   // Use MVN to emit negative constants.
571   if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
572     unsigned Imm = (unsigned)~(CI->getSExtValue());
573     bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
574       (ARM_AM::getSOImmVal(Imm) != -1);
575     if (UseImm) {
576       unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
577       unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
578       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
579                               TII.get(Opc), ImmReg)
580                       .addImm(Imm));
581       return ImmReg;
582     }
583   }
584 
585   // Load from constant pool.  For now 32-bit only.
586   if (VT != MVT::i32)
587     return false;
588 
589   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
590 
591   // MachineConstantPool wants an explicit alignment.
592   unsigned Align = TD.getPrefTypeAlignment(C->getType());
593   if (Align == 0) {
594     // TODO: Figure out if this is correct.
595     Align = TD.getTypeAllocSize(C->getType());
596   }
597   unsigned Idx = MCP.getConstantPoolIndex(C, Align);
598 
599   if (isThumb2)
600     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
601                             TII.get(ARM::t2LDRpci), DestReg)
602                     .addConstantPoolIndex(Idx));
603   else
604     // The extra immediate is for addrmode2.
605     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
606                             TII.get(ARM::LDRcp), DestReg)
607                     .addConstantPoolIndex(Idx)
608                     .addImm(0));
609 
610   return DestReg;
611 }
612 
613 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
614   // For now 32-bit only.
615   if (VT != MVT::i32) return 0;
616 
617   Reloc::Model RelocM = TM.getRelocationModel();
618 
619   // TODO: Need more magic for ARM PIC.
620   if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0;
621 
622   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
623 
624   // Use movw+movt when possible, it avoids constant pool entries.
625   // Darwin targets don't support movt with Reloc::Static, see
626   // ARMTargetLowering::LowerGlobalAddressDarwin.  Other targets only support
627   // static movt relocations.
628   if (Subtarget->useMovt() &&
629       Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) {
630     unsigned Opc;
631     switch (RelocM) {
632     case Reloc::PIC_:
633       Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
634       break;
635     case Reloc::DynamicNoPIC:
636       Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn;
637       break;
638     default:
639       Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
640       break;
641     }
642     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
643                             DestReg).addGlobalAddress(GV));
644   } else {
645     // MachineConstantPool wants an explicit alignment.
646     unsigned Align = TD.getPrefTypeAlignment(GV->getType());
647     if (Align == 0) {
648       // TODO: Figure out if this is correct.
649       Align = TD.getTypeAllocSize(GV->getType());
650     }
651 
652     // Grab index.
653     unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 :
654       (Subtarget->isThumb() ? 4 : 8);
655     unsigned Id = AFI->createPICLabelUId();
656     ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
657                                                                 ARMCP::CPValue,
658                                                                 PCAdj);
659     unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
660 
661     // Load value.
662     MachineInstrBuilder MIB;
663     if (isThumb2) {
664       unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
665       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
666         .addConstantPoolIndex(Idx);
667       if (RelocM == Reloc::PIC_)
668         MIB.addImm(Id);
669     } else {
670       // The extra immediate is for addrmode2.
671       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
672                     DestReg)
673         .addConstantPoolIndex(Idx)
674         .addImm(0);
675     }
676     AddOptionalDefs(MIB);
677   }
678 
679   if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) {
680     MachineInstrBuilder MIB;
681     unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
682     if (isThumb2)
683       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
684                     TII.get(ARM::t2LDRi12), NewDestReg)
685             .addReg(DestReg)
686             .addImm(0);
687     else
688       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12),
689                     NewDestReg)
690             .addReg(DestReg)
691             .addImm(0);
692     DestReg = NewDestReg;
693     AddOptionalDefs(MIB);
694   }
695 
696   return DestReg;
697 }
698 
699 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
700   EVT VT = TLI.getValueType(C->getType(), true);
701 
702   // Only handle simple types.
703   if (!VT.isSimple()) return 0;
704 
705   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
706     return ARMMaterializeFP(CFP, VT);
707   else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
708     return ARMMaterializeGV(GV, VT);
709   else if (isa<ConstantInt>(C))
710     return ARMMaterializeInt(C, VT);
711 
712   return 0;
713 }
714 
715 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
716 
717 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
718   // Don't handle dynamic allocas.
719   if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
720 
721   MVT VT;
722   if (!isLoadTypeLegal(AI->getType(), VT)) return false;
723 
724   DenseMap<const AllocaInst*, int>::iterator SI =
725     FuncInfo.StaticAllocaMap.find(AI);
726 
727   // This will get lowered later into the correct offsets and registers
728   // via rewriteXFrameIndex.
729   if (SI != FuncInfo.StaticAllocaMap.end()) {
730     TargetRegisterClass* RC = TLI.getRegClassFor(VT);
731     unsigned ResultReg = createResultReg(RC);
732     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
733     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
734                             TII.get(Opc), ResultReg)
735                             .addFrameIndex(SI->second)
736                             .addImm(0));
737     return ResultReg;
738   }
739 
740   return 0;
741 }
742 
743 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
744   EVT evt = TLI.getValueType(Ty, true);
745 
746   // Only handle simple types.
747   if (evt == MVT::Other || !evt.isSimple()) return false;
748   VT = evt.getSimpleVT();
749 
750   // Handle all legal types, i.e. a register that will directly hold this
751   // value.
752   return TLI.isTypeLegal(VT);
753 }
754 
755 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
756   if (isTypeLegal(Ty, VT)) return true;
757 
758   // If this is a type than can be sign or zero-extended to a basic operation
759   // go ahead and accept it now.
760   if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
761     return true;
762 
763   return false;
764 }
765 
766 // Computes the address to get to an object.
767 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
768   // Some boilerplate from the X86 FastISel.
769   const User *U = NULL;
770   unsigned Opcode = Instruction::UserOp1;
771   if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
772     // Don't walk into other basic blocks unless the object is an alloca from
773     // another block, otherwise it may not have a virtual register assigned.
774     if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
775         FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
776       Opcode = I->getOpcode();
777       U = I;
778     }
779   } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
780     Opcode = C->getOpcode();
781     U = C;
782   }
783 
784   if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
785     if (Ty->getAddressSpace() > 255)
786       // Fast instruction selection doesn't support the special
787       // address spaces.
788       return false;
789 
790   switch (Opcode) {
791     default:
792     break;
793     case Instruction::BitCast: {
794       // Look through bitcasts.
795       return ARMComputeAddress(U->getOperand(0), Addr);
796     }
797     case Instruction::IntToPtr: {
798       // Look past no-op inttoptrs.
799       if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
800         return ARMComputeAddress(U->getOperand(0), Addr);
801       break;
802     }
803     case Instruction::PtrToInt: {
804       // Look past no-op ptrtoints.
805       if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
806         return ARMComputeAddress(U->getOperand(0), Addr);
807       break;
808     }
809     case Instruction::GetElementPtr: {
810       Address SavedAddr = Addr;
811       int TmpOffset = Addr.Offset;
812 
813       // Iterate through the GEP folding the constants into offsets where
814       // we can.
815       gep_type_iterator GTI = gep_type_begin(U);
816       for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
817            i != e; ++i, ++GTI) {
818         const Value *Op = *i;
819         if (StructType *STy = dyn_cast<StructType>(*GTI)) {
820           const StructLayout *SL = TD.getStructLayout(STy);
821           unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
822           TmpOffset += SL->getElementOffset(Idx);
823         } else {
824           uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
825           for (;;) {
826             if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
827               // Constant-offset addressing.
828               TmpOffset += CI->getSExtValue() * S;
829               break;
830             }
831             if (isa<AddOperator>(Op) &&
832                 (!isa<Instruction>(Op) ||
833                  FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
834                  == FuncInfo.MBB) &&
835                 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
836               // An add (in the same block) with a constant operand. Fold the
837               // constant.
838               ConstantInt *CI =
839               cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
840               TmpOffset += CI->getSExtValue() * S;
841               // Iterate on the other operand.
842               Op = cast<AddOperator>(Op)->getOperand(0);
843               continue;
844             }
845             // Unsupported
846             goto unsupported_gep;
847           }
848         }
849       }
850 
851       // Try to grab the base operand now.
852       Addr.Offset = TmpOffset;
853       if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
854 
855       // We failed, restore everything and try the other options.
856       Addr = SavedAddr;
857 
858       unsupported_gep:
859       break;
860     }
861     case Instruction::Alloca: {
862       const AllocaInst *AI = cast<AllocaInst>(Obj);
863       DenseMap<const AllocaInst*, int>::iterator SI =
864         FuncInfo.StaticAllocaMap.find(AI);
865       if (SI != FuncInfo.StaticAllocaMap.end()) {
866         Addr.BaseType = Address::FrameIndexBase;
867         Addr.Base.FI = SI->second;
868         return true;
869       }
870       break;
871     }
872   }
873 
874   // Try to get this in a register if nothing else has worked.
875   if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
876   return Addr.Base.Reg != 0;
877 }
878 
879 void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
880 
881   assert(VT.isSimple() && "Non-simple types are invalid here!");
882 
883   bool needsLowering = false;
884   switch (VT.getSimpleVT().SimpleTy) {
885     default: llvm_unreachable("Unhandled load/store type!");
886     case MVT::i1:
887     case MVT::i8:
888     case MVT::i16:
889     case MVT::i32:
890       if (!useAM3) {
891         // Integer loads/stores handle 12-bit offsets.
892         needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
893         // Handle negative offsets.
894         if (needsLowering && isThumb2)
895           needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
896                             Addr.Offset > -256);
897       } else {
898         // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
899         needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
900       }
901       break;
902     case MVT::f32:
903     case MVT::f64:
904       // Floating point operands handle 8-bit offsets.
905       needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
906       break;
907   }
908 
909   // If this is a stack pointer and the offset needs to be simplified then
910   // put the alloca address into a register, set the base type back to
911   // register and continue. This should almost never happen.
912   if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
913     TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass :
914                               ARM::GPRRegisterClass;
915     unsigned ResultReg = createResultReg(RC);
916     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
917     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
918                             TII.get(Opc), ResultReg)
919                             .addFrameIndex(Addr.Base.FI)
920                             .addImm(0));
921     Addr.Base.Reg = ResultReg;
922     Addr.BaseType = Address::RegBase;
923   }
924 
925   // Since the offset is too large for the load/store instruction
926   // get the reg+offset into a register.
927   if (needsLowering) {
928     Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
929                                  /*Op0IsKill*/false, Addr.Offset, MVT::i32);
930     Addr.Offset = 0;
931   }
932 }
933 
934 void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
935                                        const MachineInstrBuilder &MIB,
936                                        unsigned Flags, bool useAM3) {
937   // addrmode5 output depends on the selection dag addressing dividing the
938   // offset by 4 that it then later multiplies. Do this here as well.
939   if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
940       VT.getSimpleVT().SimpleTy == MVT::f64)
941     Addr.Offset /= 4;
942 
943   // Frame base works a bit differently. Handle it separately.
944   if (Addr.BaseType == Address::FrameIndexBase) {
945     int FI = Addr.Base.FI;
946     int Offset = Addr.Offset;
947     MachineMemOperand *MMO =
948           FuncInfo.MF->getMachineMemOperand(
949                                   MachinePointerInfo::getFixedStack(FI, Offset),
950                                   Flags,
951                                   MFI.getObjectSize(FI),
952                                   MFI.getObjectAlignment(FI));
953     // Now add the rest of the operands.
954     MIB.addFrameIndex(FI);
955 
956     // ARM halfword load/stores and signed byte loads need an additional
957     // operand.
958     if (useAM3) {
959       signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
960       MIB.addReg(0);
961       MIB.addImm(Imm);
962     } else {
963       MIB.addImm(Addr.Offset);
964     }
965     MIB.addMemOperand(MMO);
966   } else {
967     // Now add the rest of the operands.
968     MIB.addReg(Addr.Base.Reg);
969 
970     // ARM halfword load/stores and signed byte loads need an additional
971     // operand.
972     if (useAM3) {
973       signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
974       MIB.addReg(0);
975       MIB.addImm(Imm);
976     } else {
977       MIB.addImm(Addr.Offset);
978     }
979   }
980   AddOptionalDefs(MIB);
981 }
982 
983 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
984                               unsigned Alignment, bool isZExt, bool allocReg) {
985   assert(VT.isSimple() && "Non-simple types are invalid here!");
986   unsigned Opc;
987   bool useAM3 = false;
988   bool needVMOV = false;
989   TargetRegisterClass *RC;
990   switch (VT.getSimpleVT().SimpleTy) {
991     // This is mostly going to be Neon/vector support.
992     default: return false;
993     case MVT::i1:
994     case MVT::i8:
995       if (isThumb2) {
996         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
997           Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
998         else
999           Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
1000       } else {
1001         if (isZExt) {
1002           Opc = ARM::LDRBi12;
1003         } else {
1004           Opc = ARM::LDRSB;
1005           useAM3 = true;
1006         }
1007       }
1008       RC = ARM::GPRRegisterClass;
1009       break;
1010     case MVT::i16:
1011       if (isThumb2) {
1012         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1013           Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
1014         else
1015           Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
1016       } else {
1017         Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
1018         useAM3 = true;
1019       }
1020       RC = ARM::GPRRegisterClass;
1021       break;
1022     case MVT::i32:
1023       if (isThumb2) {
1024         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1025           Opc = ARM::t2LDRi8;
1026         else
1027           Opc = ARM::t2LDRi12;
1028       } else {
1029         Opc = ARM::LDRi12;
1030       }
1031       RC = ARM::GPRRegisterClass;
1032       break;
1033     case MVT::f32:
1034       if (!Subtarget->hasVFP2()) return false;
1035       // Unaligned loads need special handling. Floats require word-alignment.
1036       if (Alignment && Alignment < 4) {
1037         needVMOV = true;
1038         VT = MVT::i32;
1039         Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
1040         RC = ARM::GPRRegisterClass;
1041       } else {
1042         Opc = ARM::VLDRS;
1043         RC = TLI.getRegClassFor(VT);
1044       }
1045       break;
1046     case MVT::f64:
1047       if (!Subtarget->hasVFP2()) return false;
1048       // FIXME: Unaligned loads need special handling.  Doublewords require
1049       // word-alignment.
1050       if (Alignment && Alignment < 4)
1051         return false;
1052 
1053       Opc = ARM::VLDRD;
1054       RC = TLI.getRegClassFor(VT);
1055       break;
1056   }
1057   // Simplify this down to something we can handle.
1058   ARMSimplifyAddress(Addr, VT, useAM3);
1059 
1060   // Create the base instruction, then add the operands.
1061   if (allocReg)
1062     ResultReg = createResultReg(RC);
1063   assert (ResultReg > 255 && "Expected an allocated virtual register.");
1064   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1065                                     TII.get(Opc), ResultReg);
1066   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
1067 
1068   // If we had an unaligned load of a float we've converted it to an regular
1069   // load.  Now we must move from the GRP to the FP register.
1070   if (needVMOV) {
1071     unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1072     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1073                             TII.get(ARM::VMOVSR), MoveReg)
1074                     .addReg(ResultReg));
1075     ResultReg = MoveReg;
1076   }
1077   return true;
1078 }
1079 
1080 bool ARMFastISel::SelectLoad(const Instruction *I) {
1081   // Atomic loads need special handling.
1082   if (cast<LoadInst>(I)->isAtomic())
1083     return false;
1084 
1085   // Verify we have a legal type before going any further.
1086   MVT VT;
1087   if (!isLoadTypeLegal(I->getType(), VT))
1088     return false;
1089 
1090   // See if we can handle this address.
1091   Address Addr;
1092   if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
1093 
1094   unsigned ResultReg;
1095   if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
1096     return false;
1097   UpdateValueMap(I, ResultReg);
1098   return true;
1099 }
1100 
1101 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
1102                                unsigned Alignment) {
1103   unsigned StrOpc;
1104   bool useAM3 = false;
1105   switch (VT.getSimpleVT().SimpleTy) {
1106     // This is mostly going to be Neon/vector support.
1107     default: return false;
1108     case MVT::i1: {
1109       unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass :
1110                                                ARM::GPRRegisterClass);
1111       unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1112       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1113                               TII.get(Opc), Res)
1114                       .addReg(SrcReg).addImm(1));
1115       SrcReg = Res;
1116     } // Fallthrough here.
1117     case MVT::i8:
1118       if (isThumb2) {
1119         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1120           StrOpc = ARM::t2STRBi8;
1121         else
1122           StrOpc = ARM::t2STRBi12;
1123       } else {
1124         StrOpc = ARM::STRBi12;
1125       }
1126       break;
1127     case MVT::i16:
1128       if (isThumb2) {
1129         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1130           StrOpc = ARM::t2STRHi8;
1131         else
1132           StrOpc = ARM::t2STRHi12;
1133       } else {
1134         StrOpc = ARM::STRH;
1135         useAM3 = true;
1136       }
1137       break;
1138     case MVT::i32:
1139       if (isThumb2) {
1140         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1141           StrOpc = ARM::t2STRi8;
1142         else
1143           StrOpc = ARM::t2STRi12;
1144       } else {
1145         StrOpc = ARM::STRi12;
1146       }
1147       break;
1148     case MVT::f32:
1149       if (!Subtarget->hasVFP2()) return false;
1150       // Unaligned stores need special handling. Floats require word-alignment.
1151       if (Alignment && Alignment < 4) {
1152         unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1153         AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1154                                 TII.get(ARM::VMOVRS), MoveReg)
1155                         .addReg(SrcReg));
1156         SrcReg = MoveReg;
1157         VT = MVT::i32;
1158         StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1159       } else {
1160         StrOpc = ARM::VSTRS;
1161       }
1162       break;
1163     case MVT::f64:
1164       if (!Subtarget->hasVFP2()) return false;
1165       // FIXME: Unaligned stores need special handling.  Doublewords require
1166       // word-alignment.
1167       if (Alignment && Alignment < 4)
1168           return false;
1169 
1170       StrOpc = ARM::VSTRD;
1171       break;
1172   }
1173   // Simplify this down to something we can handle.
1174   ARMSimplifyAddress(Addr, VT, useAM3);
1175 
1176   // Create the base instruction, then add the operands.
1177   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1178                                     TII.get(StrOpc))
1179                             .addReg(SrcReg);
1180   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
1181   return true;
1182 }
1183 
1184 bool ARMFastISel::SelectStore(const Instruction *I) {
1185   Value *Op0 = I->getOperand(0);
1186   unsigned SrcReg = 0;
1187 
1188   // Atomic stores need special handling.
1189   if (cast<StoreInst>(I)->isAtomic())
1190     return false;
1191 
1192   // Verify we have a legal type before going any further.
1193   MVT VT;
1194   if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
1195     return false;
1196 
1197   // Get the value to be stored into a register.
1198   SrcReg = getRegForValue(Op0);
1199   if (SrcReg == 0) return false;
1200 
1201   // See if we can handle this address.
1202   Address Addr;
1203   if (!ARMComputeAddress(I->getOperand(1), Addr))
1204     return false;
1205 
1206   if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
1207     return false;
1208   return true;
1209 }
1210 
1211 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
1212   switch (Pred) {
1213     // Needs two compares...
1214     case CmpInst::FCMP_ONE:
1215     case CmpInst::FCMP_UEQ:
1216     default:
1217       // AL is our "false" for now. The other two need more compares.
1218       return ARMCC::AL;
1219     case CmpInst::ICMP_EQ:
1220     case CmpInst::FCMP_OEQ:
1221       return ARMCC::EQ;
1222     case CmpInst::ICMP_SGT:
1223     case CmpInst::FCMP_OGT:
1224       return ARMCC::GT;
1225     case CmpInst::ICMP_SGE:
1226     case CmpInst::FCMP_OGE:
1227       return ARMCC::GE;
1228     case CmpInst::ICMP_UGT:
1229     case CmpInst::FCMP_UGT:
1230       return ARMCC::HI;
1231     case CmpInst::FCMP_OLT:
1232       return ARMCC::MI;
1233     case CmpInst::ICMP_ULE:
1234     case CmpInst::FCMP_OLE:
1235       return ARMCC::LS;
1236     case CmpInst::FCMP_ORD:
1237       return ARMCC::VC;
1238     case CmpInst::FCMP_UNO:
1239       return ARMCC::VS;
1240     case CmpInst::FCMP_UGE:
1241       return ARMCC::PL;
1242     case CmpInst::ICMP_SLT:
1243     case CmpInst::FCMP_ULT:
1244       return ARMCC::LT;
1245     case CmpInst::ICMP_SLE:
1246     case CmpInst::FCMP_ULE:
1247       return ARMCC::LE;
1248     case CmpInst::FCMP_UNE:
1249     case CmpInst::ICMP_NE:
1250       return ARMCC::NE;
1251     case CmpInst::ICMP_UGE:
1252       return ARMCC::HS;
1253     case CmpInst::ICMP_ULT:
1254       return ARMCC::LO;
1255   }
1256 }
1257 
1258 bool ARMFastISel::SelectBranch(const Instruction *I) {
1259   const BranchInst *BI = cast<BranchInst>(I);
1260   MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1261   MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1262 
1263   // Simple branch support.
1264 
1265   // If we can, avoid recomputing the compare - redoing it could lead to wonky
1266   // behavior.
1267   if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1268     if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
1269 
1270       // Get the compare predicate.
1271       // Try to take advantage of fallthrough opportunities.
1272       CmpInst::Predicate Predicate = CI->getPredicate();
1273       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1274         std::swap(TBB, FBB);
1275         Predicate = CmpInst::getInversePredicate(Predicate);
1276       }
1277 
1278       ARMCC::CondCodes ARMPred = getComparePred(Predicate);
1279 
1280       // We may not handle every CC for now.
1281       if (ARMPred == ARMCC::AL) return false;
1282 
1283       // Emit the compare.
1284       if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1285         return false;
1286 
1287       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1288       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1289       .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
1290       FastEmitBranch(FBB, DL);
1291       FuncInfo.MBB->addSuccessor(TBB);
1292       return true;
1293     }
1294   } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1295     MVT SourceVT;
1296     if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1297         (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1298       unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1299       unsigned OpReg = getRegForValue(TI->getOperand(0));
1300       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1301                               TII.get(TstOpc))
1302                       .addReg(OpReg).addImm(1));
1303 
1304       unsigned CCMode = ARMCC::NE;
1305       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1306         std::swap(TBB, FBB);
1307         CCMode = ARMCC::EQ;
1308       }
1309 
1310       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1311       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1312       .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1313 
1314       FastEmitBranch(FBB, DL);
1315       FuncInfo.MBB->addSuccessor(TBB);
1316       return true;
1317     }
1318   } else if (const ConstantInt *CI =
1319              dyn_cast<ConstantInt>(BI->getCondition())) {
1320     uint64_t Imm = CI->getZExtValue();
1321     MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
1322     FastEmitBranch(Target, DL);
1323     return true;
1324   }
1325 
1326   unsigned CmpReg = getRegForValue(BI->getCondition());
1327   if (CmpReg == 0) return false;
1328 
1329   // We've been divorced from our compare!  Our block was split, and
1330   // now our compare lives in a predecessor block.  We musn't
1331   // re-compare here, as the children of the compare aren't guaranteed
1332   // live across the block boundary (we *could* check for this).
1333   // Regardless, the compare has been done in the predecessor block,
1334   // and it left a value for us in a virtual register.  Ergo, we test
1335   // the one-bit value left in the virtual register.
1336   unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1337   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
1338                   .addReg(CmpReg).addImm(1));
1339 
1340   unsigned CCMode = ARMCC::NE;
1341   if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1342     std::swap(TBB, FBB);
1343     CCMode = ARMCC::EQ;
1344   }
1345 
1346   unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1347   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1348                   .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1349   FastEmitBranch(FBB, DL);
1350   FuncInfo.MBB->addSuccessor(TBB);
1351   return true;
1352 }
1353 
1354 bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
1355   unsigned AddrReg = getRegForValue(I->getOperand(0));
1356   if (AddrReg == 0) return false;
1357 
1358   unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1359   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
1360                   .addReg(AddrReg));
1361   return true;
1362 }
1363 
1364 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
1365                              bool isZExt) {
1366   Type *Ty = Src1Value->getType();
1367   EVT SrcVT = TLI.getValueType(Ty, true);
1368   if (!SrcVT.isSimple()) return false;
1369 
1370   bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
1371   if (isFloat && !Subtarget->hasVFP2())
1372     return false;
1373 
1374   // Check to see if the 2nd operand is a constant that we can encode directly
1375   // in the compare.
1376   int Imm = 0;
1377   bool UseImm = false;
1378   bool isNegativeImm = false;
1379   // FIXME: At -O0 we don't have anything that canonicalizes operand order.
1380   // Thus, Src1Value may be a ConstantInt, but we're missing it.
1381   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1382     if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1383         SrcVT == MVT::i1) {
1384       const APInt &CIVal = ConstInt->getValue();
1385       Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
1386       if (Imm < 0) {
1387         isNegativeImm = true;
1388         Imm = -Imm;
1389       }
1390       UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1391         (ARM_AM::getSOImmVal(Imm) != -1);
1392     }
1393   } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1394     if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1395       if (ConstFP->isZero() && !ConstFP->isNegative())
1396         UseImm = true;
1397   }
1398 
1399   unsigned CmpOpc;
1400   bool isICmp = true;
1401   bool needsExt = false;
1402   switch (SrcVT.getSimpleVT().SimpleTy) {
1403     default: return false;
1404     // TODO: Verify compares.
1405     case MVT::f32:
1406       isICmp = false;
1407       CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
1408       break;
1409     case MVT::f64:
1410       isICmp = false;
1411       CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
1412       break;
1413     case MVT::i1:
1414     case MVT::i8:
1415     case MVT::i16:
1416       needsExt = true;
1417     // Intentional fall-through.
1418     case MVT::i32:
1419       if (isThumb2) {
1420         if (!UseImm)
1421           CmpOpc = ARM::t2CMPrr;
1422         else
1423           CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri;
1424       } else {
1425         if (!UseImm)
1426           CmpOpc = ARM::CMPrr;
1427         else
1428           CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri;
1429       }
1430       break;
1431   }
1432 
1433   unsigned SrcReg1 = getRegForValue(Src1Value);
1434   if (SrcReg1 == 0) return false;
1435 
1436   unsigned SrcReg2 = 0;
1437   if (!UseImm) {
1438     SrcReg2 = getRegForValue(Src2Value);
1439     if (SrcReg2 == 0) return false;
1440   }
1441 
1442   // We have i1, i8, or i16, we need to either zero extend or sign extend.
1443   if (needsExt) {
1444     unsigned ResultReg;
1445     ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1446     if (ResultReg == 0) return false;
1447     SrcReg1 = ResultReg;
1448     if (!UseImm) {
1449       ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1450       if (ResultReg == 0) return false;
1451       SrcReg2 = ResultReg;
1452     }
1453   }
1454 
1455   if (!UseImm) {
1456     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1457                             TII.get(CmpOpc))
1458                     .addReg(SrcReg1).addReg(SrcReg2));
1459   } else {
1460     MachineInstrBuilder MIB;
1461     MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
1462       .addReg(SrcReg1);
1463 
1464     // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
1465     if (isICmp)
1466       MIB.addImm(Imm);
1467     AddOptionalDefs(MIB);
1468   }
1469 
1470   // For floating point we need to move the result to a comparison register
1471   // that we can then use for branches.
1472   if (Ty->isFloatTy() || Ty->isDoubleTy())
1473     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1474                             TII.get(ARM::FMSTAT)));
1475   return true;
1476 }
1477 
1478 bool ARMFastISel::SelectCmp(const Instruction *I) {
1479   const CmpInst *CI = cast<CmpInst>(I);
1480   Type *Ty = CI->getOperand(0)->getType();
1481 
1482   // Get the compare predicate.
1483   ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
1484 
1485   // We may not handle every CC for now.
1486   if (ARMPred == ARMCC::AL) return false;
1487 
1488   // Emit the compare.
1489   if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1490     return false;
1491 
1492   // Now set a register based on the comparison. Explicitly set the predicates
1493   // here.
1494   unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1495   TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass
1496                                     : ARM::GPRRegisterClass;
1497   unsigned DestReg = createResultReg(RC);
1498   Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
1499   unsigned ZeroReg = TargetMaterializeConstant(Zero);
1500   bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
1501   unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR;
1502   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
1503           .addReg(ZeroReg).addImm(1)
1504           .addImm(ARMPred).addReg(CondReg);
1505 
1506   UpdateValueMap(I, DestReg);
1507   return true;
1508 }
1509 
1510 bool ARMFastISel::SelectFPExt(const Instruction *I) {
1511   // Make sure we have VFP and that we're extending float to double.
1512   if (!Subtarget->hasVFP2()) return false;
1513 
1514   Value *V = I->getOperand(0);
1515   if (!I->getType()->isDoubleTy() ||
1516       !V->getType()->isFloatTy()) return false;
1517 
1518   unsigned Op = getRegForValue(V);
1519   if (Op == 0) return false;
1520 
1521   unsigned Result = createResultReg(ARM::DPRRegisterClass);
1522   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1523                           TII.get(ARM::VCVTDS), Result)
1524                   .addReg(Op));
1525   UpdateValueMap(I, Result);
1526   return true;
1527 }
1528 
1529 bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
1530   // Make sure we have VFP and that we're truncating double to float.
1531   if (!Subtarget->hasVFP2()) return false;
1532 
1533   Value *V = I->getOperand(0);
1534   if (!(I->getType()->isFloatTy() &&
1535         V->getType()->isDoubleTy())) return false;
1536 
1537   unsigned Op = getRegForValue(V);
1538   if (Op == 0) return false;
1539 
1540   unsigned Result = createResultReg(ARM::SPRRegisterClass);
1541   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1542                           TII.get(ARM::VCVTSD), Result)
1543                   .addReg(Op));
1544   UpdateValueMap(I, Result);
1545   return true;
1546 }
1547 
1548 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
1549   // Make sure we have VFP.
1550   if (!Subtarget->hasVFP2()) return false;
1551 
1552   MVT DstVT;
1553   Type *Ty = I->getType();
1554   if (!isTypeLegal(Ty, DstVT))
1555     return false;
1556 
1557   Value *Src = I->getOperand(0);
1558   EVT SrcVT = TLI.getValueType(Src->getType(), true);
1559   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1560     return false;
1561 
1562   unsigned SrcReg = getRegForValue(Src);
1563   if (SrcReg == 0) return false;
1564 
1565   // Handle sign-extension.
1566   if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1567     EVT DestVT = MVT::i32;
1568     unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT,
1569                                        /*isZExt*/!isSigned);
1570     if (ResultReg == 0) return false;
1571     SrcReg = ResultReg;
1572   }
1573 
1574   // The conversion routine works on fp-reg to fp-reg and the operand above
1575   // was an integer, move it to the fp registers if possible.
1576   unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1577   if (FP == 0) return false;
1578 
1579   unsigned Opc;
1580   if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1581   else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1582   else return false;
1583 
1584   unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1585   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1586                           ResultReg)
1587                   .addReg(FP));
1588   UpdateValueMap(I, ResultReg);
1589   return true;
1590 }
1591 
1592 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
1593   // Make sure we have VFP.
1594   if (!Subtarget->hasVFP2()) return false;
1595 
1596   MVT DstVT;
1597   Type *RetTy = I->getType();
1598   if (!isTypeLegal(RetTy, DstVT))
1599     return false;
1600 
1601   unsigned Op = getRegForValue(I->getOperand(0));
1602   if (Op == 0) return false;
1603 
1604   unsigned Opc;
1605   Type *OpTy = I->getOperand(0)->getType();
1606   if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1607   else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1608   else return false;
1609 
1610   // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
1611   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1612   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1613                           ResultReg)
1614                   .addReg(Op));
1615 
1616   // This result needs to be in an integer register, but the conversion only
1617   // takes place in fp-regs.
1618   unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1619   if (IntReg == 0) return false;
1620 
1621   UpdateValueMap(I, IntReg);
1622   return true;
1623 }
1624 
1625 bool ARMFastISel::SelectSelect(const Instruction *I) {
1626   MVT VT;
1627   if (!isTypeLegal(I->getType(), VT))
1628     return false;
1629 
1630   // Things need to be register sized for register moves.
1631   if (VT != MVT::i32) return false;
1632   const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
1633 
1634   unsigned CondReg = getRegForValue(I->getOperand(0));
1635   if (CondReg == 0) return false;
1636   unsigned Op1Reg = getRegForValue(I->getOperand(1));
1637   if (Op1Reg == 0) return false;
1638 
1639   // Check to see if we can use an immediate in the conditional move.
1640   int Imm = 0;
1641   bool UseImm = false;
1642   bool isNegativeImm = false;
1643   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
1644     assert (VT == MVT::i32 && "Expecting an i32.");
1645     Imm = (int)ConstInt->getValue().getZExtValue();
1646     if (Imm < 0) {
1647       isNegativeImm = true;
1648       Imm = ~Imm;
1649     }
1650     UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1651       (ARM_AM::getSOImmVal(Imm) != -1);
1652   }
1653 
1654   unsigned Op2Reg = 0;
1655   if (!UseImm) {
1656     Op2Reg = getRegForValue(I->getOperand(2));
1657     if (Op2Reg == 0) return false;
1658   }
1659 
1660   unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
1661   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
1662                   .addReg(CondReg).addImm(0));
1663 
1664   unsigned MovCCOpc;
1665   if (!UseImm) {
1666     MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1667   } else {
1668     if (!isNegativeImm) {
1669       MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1670     } else {
1671       MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1672     }
1673   }
1674   unsigned ResultReg = createResultReg(RC);
1675   if (!UseImm)
1676     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1677     .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
1678   else
1679     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1680     .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
1681   UpdateValueMap(I, ResultReg);
1682   return true;
1683 }
1684 
1685 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
1686   MVT VT;
1687   Type *Ty = I->getType();
1688   if (!isTypeLegal(Ty, VT))
1689     return false;
1690 
1691   // If we have integer div support we should have selected this automagically.
1692   // In case we have a real miss go ahead and return false and we'll pick
1693   // it up later.
1694   if (Subtarget->hasDivide()) return false;
1695 
1696   // Otherwise emit a libcall.
1697   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1698   if (VT == MVT::i8)
1699     LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1700   else if (VT == MVT::i16)
1701     LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1702   else if (VT == MVT::i32)
1703     LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1704   else if (VT == MVT::i64)
1705     LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1706   else if (VT == MVT::i128)
1707     LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1708   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
1709 
1710   return ARMEmitLibcall(I, LC);
1711 }
1712 
1713 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
1714   MVT VT;
1715   Type *Ty = I->getType();
1716   if (!isTypeLegal(Ty, VT))
1717     return false;
1718 
1719   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1720   if (VT == MVT::i8)
1721     LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1722   else if (VT == MVT::i16)
1723     LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1724   else if (VT == MVT::i32)
1725     LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1726   else if (VT == MVT::i64)
1727     LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1728   else if (VT == MVT::i128)
1729     LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1730   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
1731 
1732   return ARMEmitLibcall(I, LC);
1733 }
1734 
1735 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
1736   EVT DestVT  = TLI.getValueType(I->getType(), true);
1737 
1738   // We can get here in the case when we have a binary operation on a non-legal
1739   // type and the target independent selector doesn't know how to handle it.
1740   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1741     return false;
1742 
1743   unsigned Opc;
1744   switch (ISDOpcode) {
1745     default: return false;
1746     case ISD::ADD:
1747       Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1748       break;
1749     case ISD::OR:
1750       Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1751       break;
1752     case ISD::SUB:
1753       Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1754       break;
1755   }
1756 
1757   unsigned SrcReg1 = getRegForValue(I->getOperand(0));
1758   if (SrcReg1 == 0) return false;
1759 
1760   // TODO: Often the 2nd operand is an immediate, which can be encoded directly
1761   // in the instruction, rather then materializing the value in a register.
1762   unsigned SrcReg2 = getRegForValue(I->getOperand(1));
1763   if (SrcReg2 == 0) return false;
1764 
1765   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1766   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1767                           TII.get(Opc), ResultReg)
1768                   .addReg(SrcReg1).addReg(SrcReg2));
1769   UpdateValueMap(I, ResultReg);
1770   return true;
1771 }
1772 
1773 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
1774   EVT VT  = TLI.getValueType(I->getType(), true);
1775 
1776   // We can get here in the case when we want to use NEON for our fp
1777   // operations, but can't figure out how to. Just use the vfp instructions
1778   // if we have them.
1779   // FIXME: It'd be nice to use NEON instructions.
1780   Type *Ty = I->getType();
1781   bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
1782   if (isFloat && !Subtarget->hasVFP2())
1783     return false;
1784 
1785   unsigned Opc;
1786   bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1787   switch (ISDOpcode) {
1788     default: return false;
1789     case ISD::FADD:
1790       Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1791       break;
1792     case ISD::FSUB:
1793       Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1794       break;
1795     case ISD::FMUL:
1796       Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1797       break;
1798   }
1799   unsigned Op1 = getRegForValue(I->getOperand(0));
1800   if (Op1 == 0) return false;
1801 
1802   unsigned Op2 = getRegForValue(I->getOperand(1));
1803   if (Op2 == 0) return false;
1804 
1805   unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
1806   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1807                           TII.get(Opc), ResultReg)
1808                   .addReg(Op1).addReg(Op2));
1809   UpdateValueMap(I, ResultReg);
1810   return true;
1811 }
1812 
1813 // Call Handling Code
1814 
1815 // This is largely taken directly from CCAssignFnForNode - we don't support
1816 // varargs in FastISel so that part has been removed.
1817 // TODO: We may not support all of this.
1818 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) {
1819   switch (CC) {
1820   default:
1821     llvm_unreachable("Unsupported calling convention");
1822   case CallingConv::Fast:
1823     // Ignore fastcc. Silence compiler warnings.
1824     (void)RetFastCC_ARM_APCS;
1825     (void)FastCC_ARM_APCS;
1826     // Fallthrough
1827   case CallingConv::C:
1828     // Use target triple & subtarget features to do actual dispatch.
1829     if (Subtarget->isAAPCS_ABI()) {
1830       if (Subtarget->hasVFP2() &&
1831           TM.Options.FloatABIType == FloatABI::Hard)
1832         return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1833       else
1834         return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1835     } else
1836         return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1837   case CallingConv::ARM_AAPCS_VFP:
1838     return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1839   case CallingConv::ARM_AAPCS:
1840     return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1841   case CallingConv::ARM_APCS:
1842     return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1843   }
1844 }
1845 
1846 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1847                                   SmallVectorImpl<unsigned> &ArgRegs,
1848                                   SmallVectorImpl<MVT> &ArgVTs,
1849                                   SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1850                                   SmallVectorImpl<unsigned> &RegArgs,
1851                                   CallingConv::ID CC,
1852                                   unsigned &NumBytes) {
1853   SmallVector<CCValAssign, 16> ArgLocs;
1854   CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context);
1855   CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
1856 
1857   // Get a count of how many bytes are to be pushed on the stack.
1858   NumBytes = CCInfo.getNextStackOffset();
1859 
1860   // Issue CALLSEQ_START
1861   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1862   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1863                           TII.get(AdjStackDown))
1864                   .addImm(NumBytes));
1865 
1866   // Process the args.
1867   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1868     CCValAssign &VA = ArgLocs[i];
1869     unsigned Arg = ArgRegs[VA.getValNo()];
1870     MVT ArgVT = ArgVTs[VA.getValNo()];
1871 
1872     // We don't handle NEON/vector parameters yet.
1873     if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
1874       return false;
1875 
1876     // Handle arg promotion, etc.
1877     switch (VA.getLocInfo()) {
1878       case CCValAssign::Full: break;
1879       case CCValAssign::SExt: {
1880         MVT DestVT = VA.getLocVT();
1881         unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT,
1882                                            /*isZExt*/false);
1883         assert (ResultReg != 0 && "Failed to emit a sext");
1884         Arg = ResultReg;
1885         ArgVT = DestVT;
1886         break;
1887       }
1888       case CCValAssign::AExt:
1889         // Intentional fall-through.  Handle AExt and ZExt.
1890       case CCValAssign::ZExt: {
1891         MVT DestVT = VA.getLocVT();
1892         unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT,
1893                                            /*isZExt*/true);
1894         assert (ResultReg != 0 && "Failed to emit a sext");
1895         Arg = ResultReg;
1896         ArgVT = DestVT;
1897         break;
1898       }
1899       case CCValAssign::BCvt: {
1900         unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
1901                                  /*TODO: Kill=*/false);
1902         assert(BC != 0 && "Failed to emit a bitcast!");
1903         Arg = BC;
1904         ArgVT = VA.getLocVT();
1905         break;
1906       }
1907       default: llvm_unreachable("Unknown arg promotion!");
1908     }
1909 
1910     // Now copy/store arg to correct locations.
1911     if (VA.isRegLoc() && !VA.needsCustom()) {
1912       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1913               VA.getLocReg())
1914         .addReg(Arg);
1915       RegArgs.push_back(VA.getLocReg());
1916     } else if (VA.needsCustom()) {
1917       // TODO: We need custom lowering for vector (v2f64) args.
1918       if (VA.getLocVT() != MVT::f64) return false;
1919 
1920       CCValAssign &NextVA = ArgLocs[++i];
1921 
1922       // TODO: Only handle register args for now.
1923       if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false;
1924 
1925       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1926                               TII.get(ARM::VMOVRRD), VA.getLocReg())
1927                       .addReg(NextVA.getLocReg(), RegState::Define)
1928                       .addReg(Arg));
1929       RegArgs.push_back(VA.getLocReg());
1930       RegArgs.push_back(NextVA.getLocReg());
1931     } else {
1932       assert(VA.isMemLoc());
1933       // Need to store on the stack.
1934       Address Addr;
1935       Addr.BaseType = Address::RegBase;
1936       Addr.Base.Reg = ARM::SP;
1937       Addr.Offset = VA.getLocMemOffset();
1938 
1939       if (!ARMEmitStore(ArgVT, Arg, Addr)) return false;
1940     }
1941   }
1942   return true;
1943 }
1944 
1945 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
1946                              const Instruction *I, CallingConv::ID CC,
1947                              unsigned &NumBytes) {
1948   // Issue CALLSEQ_END
1949   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
1950   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1951                           TII.get(AdjStackUp))
1952                   .addImm(NumBytes).addImm(0));
1953 
1954   // Now the return value.
1955   if (RetVT != MVT::isVoid) {
1956     SmallVector<CCValAssign, 16> RVLocs;
1957     CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
1958     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true));
1959 
1960     // Copy all of the result registers out of their specified physreg.
1961     if (RVLocs.size() == 2 && RetVT == MVT::f64) {
1962       // For this move we copy into two registers and then move into the
1963       // double fp reg we want.
1964       EVT DestVT = RVLocs[0].getValVT();
1965       TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
1966       unsigned ResultReg = createResultReg(DstRC);
1967       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1968                               TII.get(ARM::VMOVDRR), ResultReg)
1969                       .addReg(RVLocs[0].getLocReg())
1970                       .addReg(RVLocs[1].getLocReg()));
1971 
1972       UsedRegs.push_back(RVLocs[0].getLocReg());
1973       UsedRegs.push_back(RVLocs[1].getLocReg());
1974 
1975       // Finally update the result.
1976       UpdateValueMap(I, ResultReg);
1977     } else {
1978       assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
1979       EVT CopyVT = RVLocs[0].getValVT();
1980 
1981       // Special handling for extended integers.
1982       if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1983         CopyVT = MVT::i32;
1984 
1985       TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
1986 
1987       unsigned ResultReg = createResultReg(DstRC);
1988       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1989               ResultReg).addReg(RVLocs[0].getLocReg());
1990       UsedRegs.push_back(RVLocs[0].getLocReg());
1991 
1992       // Finally update the result.
1993       UpdateValueMap(I, ResultReg);
1994     }
1995   }
1996 
1997   return true;
1998 }
1999 
2000 bool ARMFastISel::SelectRet(const Instruction *I) {
2001   const ReturnInst *Ret = cast<ReturnInst>(I);
2002   const Function &F = *I->getParent()->getParent();
2003 
2004   if (!FuncInfo.CanLowerReturn)
2005     return false;
2006 
2007   if (F.isVarArg())
2008     return false;
2009 
2010   CallingConv::ID CC = F.getCallingConv();
2011   if (Ret->getNumOperands() > 0) {
2012     SmallVector<ISD::OutputArg, 4> Outs;
2013     GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
2014                   Outs, TLI);
2015 
2016     // Analyze operands of the call, assigning locations to each operand.
2017     SmallVector<CCValAssign, 16> ValLocs;
2018     CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext());
2019     CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */));
2020 
2021     const Value *RV = Ret->getOperand(0);
2022     unsigned Reg = getRegForValue(RV);
2023     if (Reg == 0)
2024       return false;
2025 
2026     // Only handle a single return value for now.
2027     if (ValLocs.size() != 1)
2028       return false;
2029 
2030     CCValAssign &VA = ValLocs[0];
2031 
2032     // Don't bother handling odd stuff for now.
2033     if (VA.getLocInfo() != CCValAssign::Full)
2034       return false;
2035     // Only handle register returns for now.
2036     if (!VA.isRegLoc())
2037       return false;
2038 
2039     unsigned SrcReg = Reg + VA.getValNo();
2040     EVT RVVT = TLI.getValueType(RV->getType());
2041     EVT DestVT = VA.getValVT();
2042     // Special handling for extended integers.
2043     if (RVVT != DestVT) {
2044       if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2045         return false;
2046 
2047       if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
2048         return false;
2049 
2050       assert(DestVT == MVT::i32 && "ARM should always ext to i32");
2051 
2052       bool isZExt = Outs[0].Flags.isZExt();
2053       unsigned ResultReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, isZExt);
2054       if (ResultReg == 0) return false;
2055       SrcReg = ResultReg;
2056     }
2057 
2058     // Make the copy.
2059     unsigned DstReg = VA.getLocReg();
2060     const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
2061     // Avoid a cross-class copy. This is very unlikely.
2062     if (!SrcRC->contains(DstReg))
2063       return false;
2064     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
2065             DstReg).addReg(SrcReg);
2066 
2067     // Mark the register as live out of the function.
2068     MRI.addLiveOut(VA.getLocReg());
2069   }
2070 
2071   unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
2072   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2073                           TII.get(RetOpc)));
2074   return true;
2075 }
2076 
2077 unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) {
2078 
2079   // iOS needs the r9 versions of the opcodes.
2080   bool isiOS = Subtarget->isTargetIOS();
2081   if (isThumb2) {
2082     return isiOS ? ARM::tBLr9 : ARM::tBL;
2083   } else  {
2084     return isiOS ? ARM::BLr9 : ARM::BL;
2085   }
2086 }
2087 
2088 // A quick function that will emit a call for a named libcall in F with the
2089 // vector of passed arguments for the Instruction in I. We can assume that we
2090 // can emit a call for any libcall we can produce. This is an abridged version
2091 // of the full call infrastructure since we won't need to worry about things
2092 // like computed function pointers or strange arguments at call sites.
2093 // TODO: Try to unify this and the normal call bits for ARM, then try to unify
2094 // with X86.
2095 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
2096   CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
2097 
2098   // Handle *simple* calls for now.
2099   Type *RetTy = I->getType();
2100   MVT RetVT;
2101   if (RetTy->isVoidTy())
2102     RetVT = MVT::isVoid;
2103   else if (!isTypeLegal(RetTy, RetVT))
2104     return false;
2105 
2106   // TODO: For now if we have long calls specified we don't handle the call.
2107   if (EnableARMLongCalls) return false;
2108 
2109   // Set up the argument vectors.
2110   SmallVector<Value*, 8> Args;
2111   SmallVector<unsigned, 8> ArgRegs;
2112   SmallVector<MVT, 8> ArgVTs;
2113   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2114   Args.reserve(I->getNumOperands());
2115   ArgRegs.reserve(I->getNumOperands());
2116   ArgVTs.reserve(I->getNumOperands());
2117   ArgFlags.reserve(I->getNumOperands());
2118   for (unsigned i = 0; i < I->getNumOperands(); ++i) {
2119     Value *Op = I->getOperand(i);
2120     unsigned Arg = getRegForValue(Op);
2121     if (Arg == 0) return false;
2122 
2123     Type *ArgTy = Op->getType();
2124     MVT ArgVT;
2125     if (!isTypeLegal(ArgTy, ArgVT)) return false;
2126 
2127     ISD::ArgFlagsTy Flags;
2128     unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2129     Flags.setOrigAlign(OriginalAlignment);
2130 
2131     Args.push_back(Op);
2132     ArgRegs.push_back(Arg);
2133     ArgVTs.push_back(ArgVT);
2134     ArgFlags.push_back(Flags);
2135   }
2136 
2137   // Handle the arguments now that we've gotten them.
2138   SmallVector<unsigned, 4> RegArgs;
2139   unsigned NumBytes;
2140   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
2141     return false;
2142 
2143   // Issue the call, BLr9 for iOS, BL otherwise.
2144   // TODO: Turn this into the table of arm call ops.
2145   MachineInstrBuilder MIB;
2146   unsigned CallOpc = ARMSelectCallOp(NULL);
2147   if(isThumb2)
2148     // Explicitly adding the predicate here.
2149     MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2150                          TII.get(CallOpc)))
2151                          .addExternalSymbol(TLI.getLibcallName(Call));
2152   else
2153     // Explicitly adding the predicate here.
2154     MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2155                          TII.get(CallOpc))
2156           .addExternalSymbol(TLI.getLibcallName(Call)));
2157 
2158   // Add implicit physical register uses to the call.
2159   for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
2160     MIB.addReg(RegArgs[i]);
2161 
2162   // Finish off the call including any return values.
2163   SmallVector<unsigned, 4> UsedRegs;
2164   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
2165 
2166   // Set all unused physreg defs as dead.
2167   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2168 
2169   return true;
2170 }
2171 
2172 bool ARMFastISel::SelectCall(const Instruction *I,
2173                              const char *IntrMemName = 0) {
2174   const CallInst *CI = cast<CallInst>(I);
2175   const Value *Callee = CI->getCalledValue();
2176 
2177   // Can't handle inline asm.
2178   if (isa<InlineAsm>(Callee)) return false;
2179 
2180   // Only handle global variable Callees.
2181   const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2182   if (!GV)
2183     return false;
2184 
2185   // Check the calling convention.
2186   ImmutableCallSite CS(CI);
2187   CallingConv::ID CC = CS.getCallingConv();
2188 
2189   // TODO: Avoid some calling conventions?
2190 
2191   // Let SDISel handle vararg functions.
2192   PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
2193   FunctionType *FTy = cast<FunctionType>(PT->getElementType());
2194   if (FTy->isVarArg())
2195     return false;
2196 
2197   // Handle *simple* calls for now.
2198   Type *RetTy = I->getType();
2199   MVT RetVT;
2200   if (RetTy->isVoidTy())
2201     RetVT = MVT::isVoid;
2202   else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2203            RetVT != MVT::i8  && RetVT != MVT::i1)
2204     return false;
2205 
2206   // TODO: For now if we have long calls specified we don't handle the call.
2207   if (EnableARMLongCalls) return false;
2208 
2209   // Set up the argument vectors.
2210   SmallVector<Value*, 8> Args;
2211   SmallVector<unsigned, 8> ArgRegs;
2212   SmallVector<MVT, 8> ArgVTs;
2213   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2214   Args.reserve(CS.arg_size());
2215   ArgRegs.reserve(CS.arg_size());
2216   ArgVTs.reserve(CS.arg_size());
2217   ArgFlags.reserve(CS.arg_size());
2218   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
2219        i != e; ++i) {
2220     // If we're lowering a memory intrinsic instead of a regular call, skip the
2221     // last two arguments, which shouldn't be passed to the underlying function.
2222     if (IntrMemName && e-i <= 2)
2223       break;
2224 
2225     ISD::ArgFlagsTy Flags;
2226     unsigned AttrInd = i - CS.arg_begin() + 1;
2227     if (CS.paramHasAttr(AttrInd, Attribute::SExt))
2228       Flags.setSExt();
2229     if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
2230       Flags.setZExt();
2231 
2232     // FIXME: Only handle *easy* calls for now.
2233     if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
2234         CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
2235         CS.paramHasAttr(AttrInd, Attribute::Nest) ||
2236         CS.paramHasAttr(AttrInd, Attribute::ByVal))
2237       return false;
2238 
2239     Type *ArgTy = (*i)->getType();
2240     MVT ArgVT;
2241     if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2242         ArgVT != MVT::i1)
2243       return false;
2244 
2245     unsigned Arg = getRegForValue(*i);
2246     if (Arg == 0)
2247       return false;
2248 
2249     unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2250     Flags.setOrigAlign(OriginalAlignment);
2251 
2252     Args.push_back(*i);
2253     ArgRegs.push_back(Arg);
2254     ArgVTs.push_back(ArgVT);
2255     ArgFlags.push_back(Flags);
2256   }
2257 
2258   // Handle the arguments now that we've gotten them.
2259   SmallVector<unsigned, 4> RegArgs;
2260   unsigned NumBytes;
2261   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
2262     return false;
2263 
2264   // Issue the call, BLr9 for iOS, BL otherwise.
2265   // TODO: Turn this into the table of arm call ops.
2266   MachineInstrBuilder MIB;
2267   unsigned CallOpc = ARMSelectCallOp(GV);
2268   // Explicitly adding the predicate here.
2269   if(isThumb2) {
2270     // Explicitly adding the predicate here.
2271     MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2272                                  TII.get(CallOpc)));
2273     if (!IntrMemName)
2274       MIB.addGlobalAddress(GV, 0, 0);
2275     else
2276       MIB.addExternalSymbol(IntrMemName, 0);
2277   } else {
2278     if (!IntrMemName)
2279       // Explicitly adding the predicate here.
2280       MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2281                                    TII.get(CallOpc))
2282             .addGlobalAddress(GV, 0, 0));
2283     else
2284       MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2285                                    TII.get(CallOpc))
2286             .addExternalSymbol(IntrMemName, 0));
2287   }
2288 
2289   // Add implicit physical register uses to the call.
2290   for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
2291     MIB.addReg(RegArgs[i]);
2292 
2293   // Finish off the call including any return values.
2294   SmallVector<unsigned, 4> UsedRegs;
2295   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
2296 
2297   // Set all unused physreg defs as dead.
2298   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2299 
2300   return true;
2301 }
2302 
2303 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2304   return Len <= 16;
2305 }
2306 
2307 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) {
2308   // Make sure we don't bloat code by inlining very large memcpy's.
2309   if (!ARMIsMemCpySmall(Len))
2310     return false;
2311 
2312   // We don't care about alignment here since we just emit integer accesses.
2313   while (Len) {
2314     MVT VT;
2315     if (Len >= 4)
2316       VT = MVT::i32;
2317     else if (Len >= 2)
2318       VT = MVT::i16;
2319     else {
2320       assert(Len == 1);
2321       VT = MVT::i8;
2322     }
2323 
2324     bool RV;
2325     unsigned ResultReg;
2326     RV = ARMEmitLoad(VT, ResultReg, Src);
2327     assert (RV == true && "Should be able to handle this load.");
2328     RV = ARMEmitStore(VT, ResultReg, Dest);
2329     assert (RV == true && "Should be able to handle this store.");
2330     (void)RV;
2331 
2332     unsigned Size = VT.getSizeInBits()/8;
2333     Len -= Size;
2334     Dest.Offset += Size;
2335     Src.Offset += Size;
2336   }
2337 
2338   return true;
2339 }
2340 
2341 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
2342   // FIXME: Handle more intrinsics.
2343   switch (I.getIntrinsicID()) {
2344   default: return false;
2345   case Intrinsic::memcpy:
2346   case Intrinsic::memmove: {
2347     const MemTransferInst &MTI = cast<MemTransferInst>(I);
2348     // Don't handle volatile.
2349     if (MTI.isVolatile())
2350       return false;
2351 
2352     // Disable inlining for memmove before calls to ComputeAddress.  Otherwise,
2353     // we would emit dead code because we don't currently handle memmoves.
2354     bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
2355     if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
2356       // Small memcpy's are common enough that we want to do them without a call
2357       // if possible.
2358       uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
2359       if (ARMIsMemCpySmall(Len)) {
2360         Address Dest, Src;
2361         if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
2362             !ARMComputeAddress(MTI.getRawSource(), Src))
2363           return false;
2364         if (ARMTryEmitSmallMemCpy(Dest, Src, Len))
2365           return true;
2366       }
2367     }
2368 
2369     if (!MTI.getLength()->getType()->isIntegerTy(32))
2370       return false;
2371 
2372     if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
2373       return false;
2374 
2375     const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
2376     return SelectCall(&I, IntrMemName);
2377   }
2378   case Intrinsic::memset: {
2379     const MemSetInst &MSI = cast<MemSetInst>(I);
2380     // Don't handle volatile.
2381     if (MSI.isVolatile())
2382       return false;
2383 
2384     if (!MSI.getLength()->getType()->isIntegerTy(32))
2385       return false;
2386 
2387     if (MSI.getDestAddressSpace() > 255)
2388       return false;
2389 
2390     return SelectCall(&I, "memset");
2391   }
2392   }
2393 }
2394 
2395 bool ARMFastISel::SelectTrunc(const Instruction *I) {
2396   // The high bits for a type smaller than the register size are assumed to be
2397   // undefined.
2398   Value *Op = I->getOperand(0);
2399 
2400   EVT SrcVT, DestVT;
2401   SrcVT = TLI.getValueType(Op->getType(), true);
2402   DestVT = TLI.getValueType(I->getType(), true);
2403 
2404   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2405     return false;
2406   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2407     return false;
2408 
2409   unsigned SrcReg = getRegForValue(Op);
2410   if (!SrcReg) return false;
2411 
2412   // Because the high bits are undefined, a truncate doesn't generate
2413   // any code.
2414   UpdateValueMap(I, SrcReg);
2415   return true;
2416 }
2417 
2418 unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
2419                                     bool isZExt) {
2420   if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2421     return 0;
2422 
2423   unsigned Opc;
2424   bool isBoolZext = false;
2425   if (!SrcVT.isSimple()) return 0;
2426   switch (SrcVT.getSimpleVT().SimpleTy) {
2427   default: return 0;
2428   case MVT::i16:
2429     if (!Subtarget->hasV6Ops()) return 0;
2430     if (isZExt)
2431       Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH;
2432     else
2433       Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
2434     break;
2435   case MVT::i8:
2436     if (!Subtarget->hasV6Ops()) return 0;
2437     if (isZExt)
2438       Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB;
2439     else
2440       Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
2441     break;
2442   case MVT::i1:
2443     if (isZExt) {
2444       Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
2445       isBoolZext = true;
2446       break;
2447     }
2448     return 0;
2449   }
2450 
2451   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
2452   MachineInstrBuilder MIB;
2453   MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
2454         .addReg(SrcReg);
2455   if (isBoolZext)
2456     MIB.addImm(1);
2457   else
2458     MIB.addImm(0);
2459   AddOptionalDefs(MIB);
2460   return ResultReg;
2461 }
2462 
2463 bool ARMFastISel::SelectIntExt(const Instruction *I) {
2464   // On ARM, in general, integer casts don't involve legal types; this code
2465   // handles promotable integers.
2466   Type *DestTy = I->getType();
2467   Value *Src = I->getOperand(0);
2468   Type *SrcTy = Src->getType();
2469 
2470   EVT SrcVT, DestVT;
2471   SrcVT = TLI.getValueType(SrcTy, true);
2472   DestVT = TLI.getValueType(DestTy, true);
2473 
2474   bool isZExt = isa<ZExtInst>(I);
2475   unsigned SrcReg = getRegForValue(Src);
2476   if (!SrcReg) return false;
2477 
2478   unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2479   if (ResultReg == 0) return false;
2480   UpdateValueMap(I, ResultReg);
2481   return true;
2482 }
2483 
2484 // TODO: SoftFP support.
2485 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
2486 
2487   switch (I->getOpcode()) {
2488     case Instruction::Load:
2489       return SelectLoad(I);
2490     case Instruction::Store:
2491       return SelectStore(I);
2492     case Instruction::Br:
2493       return SelectBranch(I);
2494     case Instruction::IndirectBr:
2495       return SelectIndirectBr(I);
2496     case Instruction::ICmp:
2497     case Instruction::FCmp:
2498       return SelectCmp(I);
2499     case Instruction::FPExt:
2500       return SelectFPExt(I);
2501     case Instruction::FPTrunc:
2502       return SelectFPTrunc(I);
2503     case Instruction::SIToFP:
2504       return SelectIToFP(I, /*isSigned*/ true);
2505     case Instruction::UIToFP:
2506       return SelectIToFP(I, /*isSigned*/ false);
2507     case Instruction::FPToSI:
2508       return SelectFPToI(I, /*isSigned*/ true);
2509     case Instruction::FPToUI:
2510       return SelectFPToI(I, /*isSigned*/ false);
2511     case Instruction::Add:
2512       return SelectBinaryIntOp(I, ISD::ADD);
2513     case Instruction::Or:
2514       return SelectBinaryIntOp(I, ISD::OR);
2515     case Instruction::Sub:
2516       return SelectBinaryIntOp(I, ISD::SUB);
2517     case Instruction::FAdd:
2518       return SelectBinaryFPOp(I, ISD::FADD);
2519     case Instruction::FSub:
2520       return SelectBinaryFPOp(I, ISD::FSUB);
2521     case Instruction::FMul:
2522       return SelectBinaryFPOp(I, ISD::FMUL);
2523     case Instruction::SDiv:
2524       return SelectDiv(I, /*isSigned*/ true);
2525     case Instruction::UDiv:
2526       return SelectDiv(I, /*isSigned*/ false);
2527     case Instruction::SRem:
2528       return SelectRem(I, /*isSigned*/ true);
2529     case Instruction::URem:
2530       return SelectRem(I, /*isSigned*/ false);
2531     case Instruction::Call:
2532       if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2533         return SelectIntrinsicCall(*II);
2534       return SelectCall(I);
2535     case Instruction::Select:
2536       return SelectSelect(I);
2537     case Instruction::Ret:
2538       return SelectRet(I);
2539     case Instruction::Trunc:
2540       return SelectTrunc(I);
2541     case Instruction::ZExt:
2542     case Instruction::SExt:
2543       return SelectIntExt(I);
2544     default: break;
2545   }
2546   return false;
2547 }
2548 
2549 /// TryToFoldLoad - The specified machine instr operand is a vreg, and that
2550 /// vreg is being provided by the specified load instruction.  If possible,
2551 /// try to fold the load as an operand to the instruction, returning true if
2552 /// successful.
2553 bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
2554                                 const LoadInst *LI) {
2555   // Verify we have a legal type before going any further.
2556   MVT VT;
2557   if (!isLoadTypeLegal(LI->getType(), VT))
2558     return false;
2559 
2560   // Combine load followed by zero- or sign-extend.
2561   // ldrb r1, [r0]       ldrb r1, [r0]
2562   // uxtb r2, r1     =>
2563   // mov  r3, r2         mov  r3, r1
2564   bool isZExt = true;
2565   switch(MI->getOpcode()) {
2566     default: return false;
2567     case ARM::SXTH:
2568     case ARM::t2SXTH:
2569       isZExt = false;
2570     case ARM::UXTH:
2571     case ARM::t2UXTH:
2572       if (VT != MVT::i16)
2573         return false;
2574     break;
2575     case ARM::SXTB:
2576     case ARM::t2SXTB:
2577       isZExt = false;
2578     case ARM::UXTB:
2579     case ARM::t2UXTB:
2580       if (VT != MVT::i8)
2581         return false;
2582     break;
2583   }
2584   // See if we can handle this address.
2585   Address Addr;
2586   if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
2587 
2588   unsigned ResultReg = MI->getOperand(0).getReg();
2589   if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false))
2590     return false;
2591   MI->eraseFromParent();
2592   return true;
2593 }
2594 
2595 namespace llvm {
2596   llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
2597     // Completely untested on non-iOS.
2598     const TargetMachine &TM = funcInfo.MF->getTarget();
2599 
2600     // Darwin and thumb1 only for now.
2601     const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
2602     if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only() &&
2603         !DisableARMFastISel)
2604       return new ARMFastISel(funcInfo);
2605     return 0;
2606   }
2607 }
2608