1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the ARM-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // ARMGenFastISel.inc, which is #included here.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "ARM.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMCallingConv.h"
19 #include "ARMTargetMachine.h"
20 #include "ARMSubtarget.h"
21 #include "ARMConstantPoolValue.h"
22 #include "MCTargetDesc/ARMAddressingModes.h"
23 #include "llvm/CallingConv.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/GlobalVariable.h"
26 #include "llvm/Instructions.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Module.h"
29 #include "llvm/Operator.h"
30 #include "llvm/CodeGen/Analysis.h"
31 #include "llvm/CodeGen/FastISel.h"
32 #include "llvm/CodeGen/FunctionLoweringInfo.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineConstantPool.h"
36 #include "llvm/CodeGen/MachineFrameInfo.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/Support/CallSite.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/GetElementPtrTypeIterator.h"
43 #include "llvm/DataLayout.h"
44 #include "llvm/Target/TargetInstrInfo.h"
45 #include "llvm/Target/TargetLowering.h"
46 #include "llvm/Target/TargetMachine.h"
47 #include "llvm/Target/TargetOptions.h"
48 using namespace llvm;
49 
50 extern cl::opt<bool> EnableARMLongCalls;
51 
52 namespace {
53 
54   // All possible address modes, plus some.
55   typedef struct Address {
56     enum {
57       RegBase,
58       FrameIndexBase
59     } BaseType;
60 
61     union {
62       unsigned Reg;
63       int FI;
64     } Base;
65 
66     int Offset;
67 
68     // Innocuous defaults for our address.
69     Address()
70      : BaseType(RegBase), Offset(0) {
71        Base.Reg = 0;
72      }
73   } Address;
74 
75 class ARMFastISel : public FastISel {
76 
77   /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
78   /// make the right decision when generating code for different targets.
79   const ARMSubtarget *Subtarget;
80   const TargetMachine &TM;
81   const TargetInstrInfo &TII;
82   const TargetLowering &TLI;
83   ARMFunctionInfo *AFI;
84 
85   // Convenience variables to avoid some queries.
86   bool isThumb2;
87   LLVMContext *Context;
88 
89   public:
90     explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
91                          const TargetLibraryInfo *libInfo)
92     : FastISel(funcInfo, libInfo),
93       TM(funcInfo.MF->getTarget()),
94       TII(*TM.getInstrInfo()),
95       TLI(*TM.getTargetLowering()) {
96       Subtarget = &TM.getSubtarget<ARMSubtarget>();
97       AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
98       isThumb2 = AFI->isThumbFunction();
99       Context = &funcInfo.Fn->getContext();
100     }
101 
102     // Code from FastISel.cpp.
103   private:
104     unsigned FastEmitInst_(unsigned MachineInstOpcode,
105                            const TargetRegisterClass *RC);
106     unsigned FastEmitInst_r(unsigned MachineInstOpcode,
107                             const TargetRegisterClass *RC,
108                             unsigned Op0, bool Op0IsKill);
109     unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
110                              const TargetRegisterClass *RC,
111                              unsigned Op0, bool Op0IsKill,
112                              unsigned Op1, bool Op1IsKill);
113     unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
114                               const TargetRegisterClass *RC,
115                               unsigned Op0, bool Op0IsKill,
116                               unsigned Op1, bool Op1IsKill,
117                               unsigned Op2, bool Op2IsKill);
118     unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
119                              const TargetRegisterClass *RC,
120                              unsigned Op0, bool Op0IsKill,
121                              uint64_t Imm);
122     unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
123                              const TargetRegisterClass *RC,
124                              unsigned Op0, bool Op0IsKill,
125                              const ConstantFP *FPImm);
126     unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
127                               const TargetRegisterClass *RC,
128                               unsigned Op0, bool Op0IsKill,
129                               unsigned Op1, bool Op1IsKill,
130                               uint64_t Imm);
131     unsigned FastEmitInst_i(unsigned MachineInstOpcode,
132                             const TargetRegisterClass *RC,
133                             uint64_t Imm);
134     unsigned FastEmitInst_ii(unsigned MachineInstOpcode,
135                              const TargetRegisterClass *RC,
136                              uint64_t Imm1, uint64_t Imm2);
137 
138     unsigned FastEmitInst_extractsubreg(MVT RetVT,
139                                         unsigned Op0, bool Op0IsKill,
140                                         uint32_t Idx);
141 
142     // Backend specific FastISel code.
143   private:
144     virtual bool TargetSelectInstruction(const Instruction *I);
145     virtual unsigned TargetMaterializeConstant(const Constant *C);
146     virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
147     virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
148                                const LoadInst *LI);
149   private:
150   #include "ARMGenFastISel.inc"
151 
152     // Instruction selection routines.
153   private:
154     bool SelectLoad(const Instruction *I);
155     bool SelectStore(const Instruction *I);
156     bool SelectBranch(const Instruction *I);
157     bool SelectIndirectBr(const Instruction *I);
158     bool SelectCmp(const Instruction *I);
159     bool SelectFPExt(const Instruction *I);
160     bool SelectFPTrunc(const Instruction *I);
161     bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
162     bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);
163     bool SelectIToFP(const Instruction *I, bool isSigned);
164     bool SelectFPToI(const Instruction *I, bool isSigned);
165     bool SelectDiv(const Instruction *I, bool isSigned);
166     bool SelectRem(const Instruction *I, bool isSigned);
167     bool SelectCall(const Instruction *I, const char *IntrMemName);
168     bool SelectIntrinsicCall(const IntrinsicInst &I);
169     bool SelectSelect(const Instruction *I);
170     bool SelectRet(const Instruction *I);
171     bool SelectTrunc(const Instruction *I);
172     bool SelectIntExt(const Instruction *I);
173     bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy);
174 
175     // Utility routines.
176   private:
177     bool isTypeLegal(Type *Ty, MVT &VT);
178     bool isLoadTypeLegal(Type *Ty, MVT &VT);
179     bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
180                     bool isZExt);
181     bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
182                      unsigned Alignment = 0, bool isZExt = true,
183                      bool allocReg = true);
184     bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
185                       unsigned Alignment = 0);
186     bool ARMComputeAddress(const Value *Obj, Address &Addr);
187     void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
188     bool ARMIsMemCpySmall(uint64_t Len);
189     bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len);
190     unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
191     unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
192     unsigned ARMMaterializeInt(const Constant *C, EVT VT);
193     unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
194     unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
195     unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
196     unsigned ARMSelectCallOp(bool UseReg);
197     unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, EVT VT);
198 
199     // Call handling routines.
200   private:
201     CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
202                                   bool Return,
203                                   bool isVarArg);
204     bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
205                          SmallVectorImpl<unsigned> &ArgRegs,
206                          SmallVectorImpl<MVT> &ArgVTs,
207                          SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
208                          SmallVectorImpl<unsigned> &RegArgs,
209                          CallingConv::ID CC,
210                          unsigned &NumBytes,
211                          bool isVarArg);
212     unsigned getLibcallReg(const Twine &Name);
213     bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
214                     const Instruction *I, CallingConv::ID CC,
215                     unsigned &NumBytes, bool isVarArg);
216     bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
217 
218     // OptionalDef handling routines.
219   private:
220     bool isARMNEONPred(const MachineInstr *MI);
221     bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
222     const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
223     void AddLoadStoreOperands(EVT VT, Address &Addr,
224                               const MachineInstrBuilder &MIB,
225                               unsigned Flags, bool useAM3);
226 };
227 
228 } // end anonymous namespace
229 
230 #include "ARMGenCallingConv.inc"
231 
232 // DefinesOptionalPredicate - This is different from DefinesPredicate in that
233 // we don't care about implicit defs here, just places we'll need to add a
234 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
235 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
236   if (!MI->hasOptionalDef())
237     return false;
238 
239   // Look to see if our OptionalDef is defining CPSR or CCR.
240   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
241     const MachineOperand &MO = MI->getOperand(i);
242     if (!MO.isReg() || !MO.isDef()) continue;
243     if (MO.getReg() == ARM::CPSR)
244       *CPSR = true;
245   }
246   return true;
247 }
248 
249 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
250   const MCInstrDesc &MCID = MI->getDesc();
251 
252   // If we're a thumb2 or not NEON function we were handled via isPredicable.
253   if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
254        AFI->isThumb2Function())
255     return false;
256 
257   for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
258     if (MCID.OpInfo[i].isPredicate())
259       return true;
260 
261   return false;
262 }
263 
264 // If the machine is predicable go ahead and add the predicate operands, if
265 // it needs default CC operands add those.
266 // TODO: If we want to support thumb1 then we'll need to deal with optional
267 // CPSR defs that need to be added before the remaining operands. See s_cc_out
268 // for descriptions why.
269 const MachineInstrBuilder &
270 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
271   MachineInstr *MI = &*MIB;
272 
273   // Do we use a predicate? or...
274   // Are we NEON in ARM mode and have a predicate operand? If so, I know
275   // we're not predicable but add it anyways.
276   if (TII.isPredicable(MI) || isARMNEONPred(MI))
277     AddDefaultPred(MIB);
278 
279   // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
280   // defines CPSR. All other OptionalDefines in ARM are the CCR register.
281   bool CPSR = false;
282   if (DefinesOptionalPredicate(MI, &CPSR)) {
283     if (CPSR)
284       AddDefaultT1CC(MIB);
285     else
286       AddDefaultCC(MIB);
287   }
288   return MIB;
289 }
290 
291 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
292                                     const TargetRegisterClass* RC) {
293   unsigned ResultReg = createResultReg(RC);
294   const MCInstrDesc &II = TII.get(MachineInstOpcode);
295 
296   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
297   return ResultReg;
298 }
299 
300 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
301                                      const TargetRegisterClass *RC,
302                                      unsigned Op0, bool Op0IsKill) {
303   unsigned ResultReg = createResultReg(RC);
304   const MCInstrDesc &II = TII.get(MachineInstOpcode);
305 
306   if (II.getNumDefs() >= 1) {
307     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
308                    .addReg(Op0, Op0IsKill * RegState::Kill));
309   } else {
310     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
311                    .addReg(Op0, Op0IsKill * RegState::Kill));
312     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
313                    TII.get(TargetOpcode::COPY), ResultReg)
314                    .addReg(II.ImplicitDefs[0]));
315   }
316   return ResultReg;
317 }
318 
319 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
320                                       const TargetRegisterClass *RC,
321                                       unsigned Op0, bool Op0IsKill,
322                                       unsigned Op1, bool Op1IsKill) {
323   unsigned ResultReg = createResultReg(RC);
324   const MCInstrDesc &II = TII.get(MachineInstOpcode);
325 
326   if (II.getNumDefs() >= 1) {
327     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
328                    .addReg(Op0, Op0IsKill * RegState::Kill)
329                    .addReg(Op1, Op1IsKill * RegState::Kill));
330   } else {
331     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
332                    .addReg(Op0, Op0IsKill * RegState::Kill)
333                    .addReg(Op1, Op1IsKill * RegState::Kill));
334     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
335                            TII.get(TargetOpcode::COPY), ResultReg)
336                    .addReg(II.ImplicitDefs[0]));
337   }
338   return ResultReg;
339 }
340 
341 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
342                                        const TargetRegisterClass *RC,
343                                        unsigned Op0, bool Op0IsKill,
344                                        unsigned Op1, bool Op1IsKill,
345                                        unsigned Op2, bool Op2IsKill) {
346   unsigned ResultReg = createResultReg(RC);
347   const MCInstrDesc &II = TII.get(MachineInstOpcode);
348 
349   if (II.getNumDefs() >= 1) {
350     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
351                    .addReg(Op0, Op0IsKill * RegState::Kill)
352                    .addReg(Op1, Op1IsKill * RegState::Kill)
353                    .addReg(Op2, Op2IsKill * RegState::Kill));
354   } else {
355     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
356                    .addReg(Op0, Op0IsKill * RegState::Kill)
357                    .addReg(Op1, Op1IsKill * RegState::Kill)
358                    .addReg(Op2, Op2IsKill * RegState::Kill));
359     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
360                            TII.get(TargetOpcode::COPY), ResultReg)
361                    .addReg(II.ImplicitDefs[0]));
362   }
363   return ResultReg;
364 }
365 
366 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
367                                       const TargetRegisterClass *RC,
368                                       unsigned Op0, bool Op0IsKill,
369                                       uint64_t Imm) {
370   unsigned ResultReg = createResultReg(RC);
371   const MCInstrDesc &II = TII.get(MachineInstOpcode);
372 
373   if (II.getNumDefs() >= 1) {
374     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
375                    .addReg(Op0, Op0IsKill * RegState::Kill)
376                    .addImm(Imm));
377   } else {
378     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
379                    .addReg(Op0, Op0IsKill * RegState::Kill)
380                    .addImm(Imm));
381     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
382                            TII.get(TargetOpcode::COPY), ResultReg)
383                    .addReg(II.ImplicitDefs[0]));
384   }
385   return ResultReg;
386 }
387 
388 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
389                                       const TargetRegisterClass *RC,
390                                       unsigned Op0, bool Op0IsKill,
391                                       const ConstantFP *FPImm) {
392   unsigned ResultReg = createResultReg(RC);
393   const MCInstrDesc &II = TII.get(MachineInstOpcode);
394 
395   if (II.getNumDefs() >= 1) {
396     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
397                    .addReg(Op0, Op0IsKill * RegState::Kill)
398                    .addFPImm(FPImm));
399   } else {
400     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
401                    .addReg(Op0, Op0IsKill * RegState::Kill)
402                    .addFPImm(FPImm));
403     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
404                            TII.get(TargetOpcode::COPY), ResultReg)
405                    .addReg(II.ImplicitDefs[0]));
406   }
407   return ResultReg;
408 }
409 
410 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
411                                        const TargetRegisterClass *RC,
412                                        unsigned Op0, bool Op0IsKill,
413                                        unsigned Op1, bool Op1IsKill,
414                                        uint64_t Imm) {
415   unsigned ResultReg = createResultReg(RC);
416   const MCInstrDesc &II = TII.get(MachineInstOpcode);
417 
418   if (II.getNumDefs() >= 1) {
419     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
420                    .addReg(Op0, Op0IsKill * RegState::Kill)
421                    .addReg(Op1, Op1IsKill * RegState::Kill)
422                    .addImm(Imm));
423   } else {
424     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
425                    .addReg(Op0, Op0IsKill * RegState::Kill)
426                    .addReg(Op1, Op1IsKill * RegState::Kill)
427                    .addImm(Imm));
428     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
429                            TII.get(TargetOpcode::COPY), ResultReg)
430                    .addReg(II.ImplicitDefs[0]));
431   }
432   return ResultReg;
433 }
434 
435 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
436                                      const TargetRegisterClass *RC,
437                                      uint64_t Imm) {
438   unsigned ResultReg = createResultReg(RC);
439   const MCInstrDesc &II = TII.get(MachineInstOpcode);
440 
441   if (II.getNumDefs() >= 1) {
442     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
443                    .addImm(Imm));
444   } else {
445     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
446                    .addImm(Imm));
447     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
448                            TII.get(TargetOpcode::COPY), ResultReg)
449                    .addReg(II.ImplicitDefs[0]));
450   }
451   return ResultReg;
452 }
453 
454 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
455                                       const TargetRegisterClass *RC,
456                                       uint64_t Imm1, uint64_t Imm2) {
457   unsigned ResultReg = createResultReg(RC);
458   const MCInstrDesc &II = TII.get(MachineInstOpcode);
459 
460   if (II.getNumDefs() >= 1) {
461     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
462                     .addImm(Imm1).addImm(Imm2));
463   } else {
464     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
465                     .addImm(Imm1).addImm(Imm2));
466     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
467                             TII.get(TargetOpcode::COPY),
468                             ResultReg)
469                     .addReg(II.ImplicitDefs[0]));
470   }
471   return ResultReg;
472 }
473 
474 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
475                                                  unsigned Op0, bool Op0IsKill,
476                                                  uint32_t Idx) {
477   unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
478   assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
479          "Cannot yet extract from physregs");
480 
481   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
482                           DL, TII.get(TargetOpcode::COPY), ResultReg)
483                   .addReg(Op0, getKillRegState(Op0IsKill), Idx));
484   return ResultReg;
485 }
486 
487 // TODO: Don't worry about 64-bit now, but when this is fixed remove the
488 // checks from the various callers.
489 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
490   if (VT == MVT::f64) return 0;
491 
492   unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
493   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
494                           TII.get(ARM::VMOVSR), MoveReg)
495                   .addReg(SrcReg));
496   return MoveReg;
497 }
498 
499 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
500   if (VT == MVT::i64) return 0;
501 
502   unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
503   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
504                           TII.get(ARM::VMOVRS), MoveReg)
505                   .addReg(SrcReg));
506   return MoveReg;
507 }
508 
509 // For double width floating point we need to materialize two constants
510 // (the high and the low) into integer registers then use a move to get
511 // the combined constant into an FP reg.
512 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
513   const APFloat Val = CFP->getValueAPF();
514   bool is64bit = VT == MVT::f64;
515 
516   // This checks to see if we can use VFP3 instructions to materialize
517   // a constant, otherwise we have to go through the constant pool.
518   if (TLI.isFPImmLegal(Val, VT)) {
519     int Imm;
520     unsigned Opc;
521     if (is64bit) {
522       Imm = ARM_AM::getFP64Imm(Val);
523       Opc = ARM::FCONSTD;
524     } else {
525       Imm = ARM_AM::getFP32Imm(Val);
526       Opc = ARM::FCONSTS;
527     }
528     unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
529     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
530                             DestReg)
531                     .addImm(Imm));
532     return DestReg;
533   }
534 
535   // Require VFP2 for loading fp constants.
536   if (!Subtarget->hasVFP2()) return false;
537 
538   // MachineConstantPool wants an explicit alignment.
539   unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
540   if (Align == 0) {
541     // TODO: Figure out if this is correct.
542     Align = TD.getTypeAllocSize(CFP->getType());
543   }
544   unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
545   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
546   unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
547 
548   // The extra reg is for addrmode5.
549   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
550                           DestReg)
551                   .addConstantPoolIndex(Idx)
552                   .addReg(0));
553   return DestReg;
554 }
555 
556 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
557 
558   if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
559     return false;
560 
561   // If we can do this in a single instruction without a constant pool entry
562   // do so now.
563   const ConstantInt *CI = cast<ConstantInt>(C);
564   if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
565     unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
566     unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
567     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
568                             TII.get(Opc), ImmReg)
569                     .addImm(CI->getZExtValue()));
570     return ImmReg;
571   }
572 
573   // Use MVN to emit negative constants.
574   if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
575     unsigned Imm = (unsigned)~(CI->getSExtValue());
576     bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
577       (ARM_AM::getSOImmVal(Imm) != -1);
578     if (UseImm) {
579       unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
580       unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
581       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
582                               TII.get(Opc), ImmReg)
583                       .addImm(Imm));
584       return ImmReg;
585     }
586   }
587 
588   // Load from constant pool.  For now 32-bit only.
589   if (VT != MVT::i32)
590     return false;
591 
592   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
593 
594   // MachineConstantPool wants an explicit alignment.
595   unsigned Align = TD.getPrefTypeAlignment(C->getType());
596   if (Align == 0) {
597     // TODO: Figure out if this is correct.
598     Align = TD.getTypeAllocSize(C->getType());
599   }
600   unsigned Idx = MCP.getConstantPoolIndex(C, Align);
601 
602   if (isThumb2)
603     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
604                             TII.get(ARM::t2LDRpci), DestReg)
605                     .addConstantPoolIndex(Idx));
606   else
607     // The extra immediate is for addrmode2.
608     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
609                             TII.get(ARM::LDRcp), DestReg)
610                     .addConstantPoolIndex(Idx)
611                     .addImm(0));
612 
613   return DestReg;
614 }
615 
616 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
617   // For now 32-bit only.
618   if (VT != MVT::i32) return 0;
619 
620   Reloc::Model RelocM = TM.getRelocationModel();
621   bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM);
622   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
623 
624   // Use movw+movt when possible, it avoids constant pool entries.
625   // Darwin targets don't support movt with Reloc::Static, see
626   // ARMTargetLowering::LowerGlobalAddressDarwin.  Other targets only support
627   // static movt relocations.
628   if (Subtarget->useMovt() &&
629       Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) {
630     unsigned Opc;
631     switch (RelocM) {
632     case Reloc::PIC_:
633       Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
634       break;
635     case Reloc::DynamicNoPIC:
636       Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn;
637       break;
638     default:
639       Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
640       break;
641     }
642     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
643                             DestReg).addGlobalAddress(GV));
644   } else {
645     // MachineConstantPool wants an explicit alignment.
646     unsigned Align = TD.getPrefTypeAlignment(GV->getType());
647     if (Align == 0) {
648       // TODO: Figure out if this is correct.
649       Align = TD.getTypeAllocSize(GV->getType());
650     }
651 
652     if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_)
653       return ARMLowerPICELF(GV, Align, VT);
654 
655     // Grab index.
656     unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 :
657       (Subtarget->isThumb() ? 4 : 8);
658     unsigned Id = AFI->createPICLabelUId();
659     ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
660                                                                 ARMCP::CPValue,
661                                                                 PCAdj);
662     unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
663 
664     // Load value.
665     MachineInstrBuilder MIB;
666     if (isThumb2) {
667       unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
668       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
669         .addConstantPoolIndex(Idx);
670       if (RelocM == Reloc::PIC_)
671         MIB.addImm(Id);
672       AddOptionalDefs(MIB);
673     } else {
674       // The extra immediate is for addrmode2.
675       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
676                     DestReg)
677         .addConstantPoolIndex(Idx)
678         .addImm(0);
679       AddOptionalDefs(MIB);
680 
681       if (RelocM == Reloc::PIC_) {
682         unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
683         unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
684 
685         MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
686                                           DL, TII.get(Opc), NewDestReg)
687                                   .addReg(DestReg)
688                                   .addImm(Id);
689         AddOptionalDefs(MIB);
690         return NewDestReg;
691       }
692     }
693   }
694 
695   if (IsIndirect) {
696     MachineInstrBuilder MIB;
697     unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
698     if (isThumb2)
699       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
700                     TII.get(ARM::t2LDRi12), NewDestReg)
701             .addReg(DestReg)
702             .addImm(0);
703     else
704       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12),
705                     NewDestReg)
706             .addReg(DestReg)
707             .addImm(0);
708     DestReg = NewDestReg;
709     AddOptionalDefs(MIB);
710   }
711 
712   return DestReg;
713 }
714 
715 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
716   EVT VT = TLI.getValueType(C->getType(), true);
717 
718   // Only handle simple types.
719   if (!VT.isSimple()) return 0;
720 
721   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
722     return ARMMaterializeFP(CFP, VT);
723   else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
724     return ARMMaterializeGV(GV, VT);
725   else if (isa<ConstantInt>(C))
726     return ARMMaterializeInt(C, VT);
727 
728   return 0;
729 }
730 
731 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
732 
733 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
734   // Don't handle dynamic allocas.
735   if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
736 
737   MVT VT;
738   if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
739 
740   DenseMap<const AllocaInst*, int>::iterator SI =
741     FuncInfo.StaticAllocaMap.find(AI);
742 
743   // This will get lowered later into the correct offsets and registers
744   // via rewriteXFrameIndex.
745   if (SI != FuncInfo.StaticAllocaMap.end()) {
746     const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
747     unsigned ResultReg = createResultReg(RC);
748     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
749     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
750                             TII.get(Opc), ResultReg)
751                             .addFrameIndex(SI->second)
752                             .addImm(0));
753     return ResultReg;
754   }
755 
756   return 0;
757 }
758 
759 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
760   EVT evt = TLI.getValueType(Ty, true);
761 
762   // Only handle simple types.
763   if (evt == MVT::Other || !evt.isSimple()) return false;
764   VT = evt.getSimpleVT();
765 
766   // Handle all legal types, i.e. a register that will directly hold this
767   // value.
768   return TLI.isTypeLegal(VT);
769 }
770 
771 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
772   if (isTypeLegal(Ty, VT)) return true;
773 
774   // If this is a type than can be sign or zero-extended to a basic operation
775   // go ahead and accept it now.
776   if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
777     return true;
778 
779   return false;
780 }
781 
782 // Computes the address to get to an object.
783 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
784   // Some boilerplate from the X86 FastISel.
785   const User *U = NULL;
786   unsigned Opcode = Instruction::UserOp1;
787   if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
788     // Don't walk into other basic blocks unless the object is an alloca from
789     // another block, otherwise it may not have a virtual register assigned.
790     if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
791         FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
792       Opcode = I->getOpcode();
793       U = I;
794     }
795   } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
796     Opcode = C->getOpcode();
797     U = C;
798   }
799 
800   if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
801     if (Ty->getAddressSpace() > 255)
802       // Fast instruction selection doesn't support the special
803       // address spaces.
804       return false;
805 
806   switch (Opcode) {
807     default:
808     break;
809     case Instruction::BitCast: {
810       // Look through bitcasts.
811       return ARMComputeAddress(U->getOperand(0), Addr);
812     }
813     case Instruction::IntToPtr: {
814       // Look past no-op inttoptrs.
815       if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
816         return ARMComputeAddress(U->getOperand(0), Addr);
817       break;
818     }
819     case Instruction::PtrToInt: {
820       // Look past no-op ptrtoints.
821       if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
822         return ARMComputeAddress(U->getOperand(0), Addr);
823       break;
824     }
825     case Instruction::GetElementPtr: {
826       Address SavedAddr = Addr;
827       int TmpOffset = Addr.Offset;
828 
829       // Iterate through the GEP folding the constants into offsets where
830       // we can.
831       gep_type_iterator GTI = gep_type_begin(U);
832       for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
833            i != e; ++i, ++GTI) {
834         const Value *Op = *i;
835         if (StructType *STy = dyn_cast<StructType>(*GTI)) {
836           const StructLayout *SL = TD.getStructLayout(STy);
837           unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
838           TmpOffset += SL->getElementOffset(Idx);
839         } else {
840           uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
841           for (;;) {
842             if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
843               // Constant-offset addressing.
844               TmpOffset += CI->getSExtValue() * S;
845               break;
846             }
847             if (isa<AddOperator>(Op) &&
848                 (!isa<Instruction>(Op) ||
849                  FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
850                  == FuncInfo.MBB) &&
851                 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
852               // An add (in the same block) with a constant operand. Fold the
853               // constant.
854               ConstantInt *CI =
855               cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
856               TmpOffset += CI->getSExtValue() * S;
857               // Iterate on the other operand.
858               Op = cast<AddOperator>(Op)->getOperand(0);
859               continue;
860             }
861             // Unsupported
862             goto unsupported_gep;
863           }
864         }
865       }
866 
867       // Try to grab the base operand now.
868       Addr.Offset = TmpOffset;
869       if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
870 
871       // We failed, restore everything and try the other options.
872       Addr = SavedAddr;
873 
874       unsupported_gep:
875       break;
876     }
877     case Instruction::Alloca: {
878       const AllocaInst *AI = cast<AllocaInst>(Obj);
879       DenseMap<const AllocaInst*, int>::iterator SI =
880         FuncInfo.StaticAllocaMap.find(AI);
881       if (SI != FuncInfo.StaticAllocaMap.end()) {
882         Addr.BaseType = Address::FrameIndexBase;
883         Addr.Base.FI = SI->second;
884         return true;
885       }
886       break;
887     }
888   }
889 
890   // Try to get this in a register if nothing else has worked.
891   if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
892   return Addr.Base.Reg != 0;
893 }
894 
895 void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
896 
897   assert(VT.isSimple() && "Non-simple types are invalid here!");
898 
899   bool needsLowering = false;
900   switch (VT.getSimpleVT().SimpleTy) {
901     default: llvm_unreachable("Unhandled load/store type!");
902     case MVT::i1:
903     case MVT::i8:
904     case MVT::i16:
905     case MVT::i32:
906       if (!useAM3) {
907         // Integer loads/stores handle 12-bit offsets.
908         needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
909         // Handle negative offsets.
910         if (needsLowering && isThumb2)
911           needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
912                             Addr.Offset > -256);
913       } else {
914         // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
915         needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
916       }
917       break;
918     case MVT::f32:
919     case MVT::f64:
920       // Floating point operands handle 8-bit offsets.
921       needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
922       break;
923   }
924 
925   // If this is a stack pointer and the offset needs to be simplified then
926   // put the alloca address into a register, set the base type back to
927   // register and continue. This should almost never happen.
928   if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
929     const TargetRegisterClass *RC = isThumb2 ?
930       (const TargetRegisterClass*)&ARM::tGPRRegClass :
931       (const TargetRegisterClass*)&ARM::GPRRegClass;
932     unsigned ResultReg = createResultReg(RC);
933     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
934     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
935                             TII.get(Opc), ResultReg)
936                             .addFrameIndex(Addr.Base.FI)
937                             .addImm(0));
938     Addr.Base.Reg = ResultReg;
939     Addr.BaseType = Address::RegBase;
940   }
941 
942   // Since the offset is too large for the load/store instruction
943   // get the reg+offset into a register.
944   if (needsLowering) {
945     Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
946                                  /*Op0IsKill*/false, Addr.Offset, MVT::i32);
947     Addr.Offset = 0;
948   }
949 }
950 
951 void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
952                                        const MachineInstrBuilder &MIB,
953                                        unsigned Flags, bool useAM3) {
954   // addrmode5 output depends on the selection dag addressing dividing the
955   // offset by 4 that it then later multiplies. Do this here as well.
956   if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
957       VT.getSimpleVT().SimpleTy == MVT::f64)
958     Addr.Offset /= 4;
959 
960   // Frame base works a bit differently. Handle it separately.
961   if (Addr.BaseType == Address::FrameIndexBase) {
962     int FI = Addr.Base.FI;
963     int Offset = Addr.Offset;
964     MachineMemOperand *MMO =
965           FuncInfo.MF->getMachineMemOperand(
966                                   MachinePointerInfo::getFixedStack(FI, Offset),
967                                   Flags,
968                                   MFI.getObjectSize(FI),
969                                   MFI.getObjectAlignment(FI));
970     // Now add the rest of the operands.
971     MIB.addFrameIndex(FI);
972 
973     // ARM halfword load/stores and signed byte loads need an additional
974     // operand.
975     if (useAM3) {
976       signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
977       MIB.addReg(0);
978       MIB.addImm(Imm);
979     } else {
980       MIB.addImm(Addr.Offset);
981     }
982     MIB.addMemOperand(MMO);
983   } else {
984     // Now add the rest of the operands.
985     MIB.addReg(Addr.Base.Reg);
986 
987     // ARM halfword load/stores and signed byte loads need an additional
988     // operand.
989     if (useAM3) {
990       signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
991       MIB.addReg(0);
992       MIB.addImm(Imm);
993     } else {
994       MIB.addImm(Addr.Offset);
995     }
996   }
997   AddOptionalDefs(MIB);
998 }
999 
1000 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
1001                               unsigned Alignment, bool isZExt, bool allocReg) {
1002   assert(VT.isSimple() && "Non-simple types are invalid here!");
1003   unsigned Opc;
1004   bool useAM3 = false;
1005   bool needVMOV = false;
1006   const TargetRegisterClass *RC;
1007   switch (VT.getSimpleVT().SimpleTy) {
1008     // This is mostly going to be Neon/vector support.
1009     default: return false;
1010     case MVT::i1:
1011     case MVT::i8:
1012       if (isThumb2) {
1013         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1014           Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
1015         else
1016           Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
1017       } else {
1018         if (isZExt) {
1019           Opc = ARM::LDRBi12;
1020         } else {
1021           Opc = ARM::LDRSB;
1022           useAM3 = true;
1023         }
1024       }
1025       RC = &ARM::GPRRegClass;
1026       break;
1027     case MVT::i16:
1028       if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
1029         return false;
1030 
1031       if (isThumb2) {
1032         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1033           Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
1034         else
1035           Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
1036       } else {
1037         Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
1038         useAM3 = true;
1039       }
1040       RC = &ARM::GPRRegClass;
1041       break;
1042     case MVT::i32:
1043       if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
1044         return false;
1045 
1046       if (isThumb2) {
1047         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1048           Opc = ARM::t2LDRi8;
1049         else
1050           Opc = ARM::t2LDRi12;
1051       } else {
1052         Opc = ARM::LDRi12;
1053       }
1054       RC = &ARM::GPRRegClass;
1055       break;
1056     case MVT::f32:
1057       if (!Subtarget->hasVFP2()) return false;
1058       // Unaligned loads need special handling. Floats require word-alignment.
1059       if (Alignment && Alignment < 4) {
1060         needVMOV = true;
1061         VT = MVT::i32;
1062         Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
1063         RC = &ARM::GPRRegClass;
1064       } else {
1065         Opc = ARM::VLDRS;
1066         RC = TLI.getRegClassFor(VT);
1067       }
1068       break;
1069     case MVT::f64:
1070       if (!Subtarget->hasVFP2()) return false;
1071       // FIXME: Unaligned loads need special handling.  Doublewords require
1072       // word-alignment.
1073       if (Alignment && Alignment < 4)
1074         return false;
1075 
1076       Opc = ARM::VLDRD;
1077       RC = TLI.getRegClassFor(VT);
1078       break;
1079   }
1080   // Simplify this down to something we can handle.
1081   ARMSimplifyAddress(Addr, VT, useAM3);
1082 
1083   // Create the base instruction, then add the operands.
1084   if (allocReg)
1085     ResultReg = createResultReg(RC);
1086   assert (ResultReg > 255 && "Expected an allocated virtual register.");
1087   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1088                                     TII.get(Opc), ResultReg);
1089   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
1090 
1091   // If we had an unaligned load of a float we've converted it to an regular
1092   // load.  Now we must move from the GRP to the FP register.
1093   if (needVMOV) {
1094     unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1095     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1096                             TII.get(ARM::VMOVSR), MoveReg)
1097                     .addReg(ResultReg));
1098     ResultReg = MoveReg;
1099   }
1100   return true;
1101 }
1102 
1103 bool ARMFastISel::SelectLoad(const Instruction *I) {
1104   // Atomic loads need special handling.
1105   if (cast<LoadInst>(I)->isAtomic())
1106     return false;
1107 
1108   // Verify we have a legal type before going any further.
1109   MVT VT;
1110   if (!isLoadTypeLegal(I->getType(), VT))
1111     return false;
1112 
1113   // See if we can handle this address.
1114   Address Addr;
1115   if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
1116 
1117   unsigned ResultReg;
1118   if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
1119     return false;
1120   UpdateValueMap(I, ResultReg);
1121   return true;
1122 }
1123 
1124 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
1125                                unsigned Alignment) {
1126   unsigned StrOpc;
1127   bool useAM3 = false;
1128   switch (VT.getSimpleVT().SimpleTy) {
1129     // This is mostly going to be Neon/vector support.
1130     default: return false;
1131     case MVT::i1: {
1132       unsigned Res = createResultReg(isThumb2 ?
1133         (const TargetRegisterClass*)&ARM::tGPRRegClass :
1134         (const TargetRegisterClass*)&ARM::GPRRegClass);
1135       unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1136       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1137                               TII.get(Opc), Res)
1138                       .addReg(SrcReg).addImm(1));
1139       SrcReg = Res;
1140     } // Fallthrough here.
1141     case MVT::i8:
1142       if (isThumb2) {
1143         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1144           StrOpc = ARM::t2STRBi8;
1145         else
1146           StrOpc = ARM::t2STRBi12;
1147       } else {
1148         StrOpc = ARM::STRBi12;
1149       }
1150       break;
1151     case MVT::i16:
1152       if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
1153         return false;
1154 
1155       if (isThumb2) {
1156         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1157           StrOpc = ARM::t2STRHi8;
1158         else
1159           StrOpc = ARM::t2STRHi12;
1160       } else {
1161         StrOpc = ARM::STRH;
1162         useAM3 = true;
1163       }
1164       break;
1165     case MVT::i32:
1166       if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
1167         return false;
1168 
1169       if (isThumb2) {
1170         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1171           StrOpc = ARM::t2STRi8;
1172         else
1173           StrOpc = ARM::t2STRi12;
1174       } else {
1175         StrOpc = ARM::STRi12;
1176       }
1177       break;
1178     case MVT::f32:
1179       if (!Subtarget->hasVFP2()) return false;
1180       // Unaligned stores need special handling. Floats require word-alignment.
1181       if (Alignment && Alignment < 4) {
1182         unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1183         AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1184                                 TII.get(ARM::VMOVRS), MoveReg)
1185                         .addReg(SrcReg));
1186         SrcReg = MoveReg;
1187         VT = MVT::i32;
1188         StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1189       } else {
1190         StrOpc = ARM::VSTRS;
1191       }
1192       break;
1193     case MVT::f64:
1194       if (!Subtarget->hasVFP2()) return false;
1195       // FIXME: Unaligned stores need special handling.  Doublewords require
1196       // word-alignment.
1197       if (Alignment && Alignment < 4)
1198           return false;
1199 
1200       StrOpc = ARM::VSTRD;
1201       break;
1202   }
1203   // Simplify this down to something we can handle.
1204   ARMSimplifyAddress(Addr, VT, useAM3);
1205 
1206   // Create the base instruction, then add the operands.
1207   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1208                                     TII.get(StrOpc))
1209                             .addReg(SrcReg);
1210   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
1211   return true;
1212 }
1213 
1214 bool ARMFastISel::SelectStore(const Instruction *I) {
1215   Value *Op0 = I->getOperand(0);
1216   unsigned SrcReg = 0;
1217 
1218   // Atomic stores need special handling.
1219   if (cast<StoreInst>(I)->isAtomic())
1220     return false;
1221 
1222   // Verify we have a legal type before going any further.
1223   MVT VT;
1224   if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
1225     return false;
1226 
1227   // Get the value to be stored into a register.
1228   SrcReg = getRegForValue(Op0);
1229   if (SrcReg == 0) return false;
1230 
1231   // See if we can handle this address.
1232   Address Addr;
1233   if (!ARMComputeAddress(I->getOperand(1), Addr))
1234     return false;
1235 
1236   if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
1237     return false;
1238   return true;
1239 }
1240 
1241 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
1242   switch (Pred) {
1243     // Needs two compares...
1244     case CmpInst::FCMP_ONE:
1245     case CmpInst::FCMP_UEQ:
1246     default:
1247       // AL is our "false" for now. The other two need more compares.
1248       return ARMCC::AL;
1249     case CmpInst::ICMP_EQ:
1250     case CmpInst::FCMP_OEQ:
1251       return ARMCC::EQ;
1252     case CmpInst::ICMP_SGT:
1253     case CmpInst::FCMP_OGT:
1254       return ARMCC::GT;
1255     case CmpInst::ICMP_SGE:
1256     case CmpInst::FCMP_OGE:
1257       return ARMCC::GE;
1258     case CmpInst::ICMP_UGT:
1259     case CmpInst::FCMP_UGT:
1260       return ARMCC::HI;
1261     case CmpInst::FCMP_OLT:
1262       return ARMCC::MI;
1263     case CmpInst::ICMP_ULE:
1264     case CmpInst::FCMP_OLE:
1265       return ARMCC::LS;
1266     case CmpInst::FCMP_ORD:
1267       return ARMCC::VC;
1268     case CmpInst::FCMP_UNO:
1269       return ARMCC::VS;
1270     case CmpInst::FCMP_UGE:
1271       return ARMCC::PL;
1272     case CmpInst::ICMP_SLT:
1273     case CmpInst::FCMP_ULT:
1274       return ARMCC::LT;
1275     case CmpInst::ICMP_SLE:
1276     case CmpInst::FCMP_ULE:
1277       return ARMCC::LE;
1278     case CmpInst::FCMP_UNE:
1279     case CmpInst::ICMP_NE:
1280       return ARMCC::NE;
1281     case CmpInst::ICMP_UGE:
1282       return ARMCC::HS;
1283     case CmpInst::ICMP_ULT:
1284       return ARMCC::LO;
1285   }
1286 }
1287 
1288 bool ARMFastISel::SelectBranch(const Instruction *I) {
1289   const BranchInst *BI = cast<BranchInst>(I);
1290   MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1291   MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1292 
1293   // Simple branch support.
1294 
1295   // If we can, avoid recomputing the compare - redoing it could lead to wonky
1296   // behavior.
1297   if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1298     if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
1299 
1300       // Get the compare predicate.
1301       // Try to take advantage of fallthrough opportunities.
1302       CmpInst::Predicate Predicate = CI->getPredicate();
1303       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1304         std::swap(TBB, FBB);
1305         Predicate = CmpInst::getInversePredicate(Predicate);
1306       }
1307 
1308       ARMCC::CondCodes ARMPred = getComparePred(Predicate);
1309 
1310       // We may not handle every CC for now.
1311       if (ARMPred == ARMCC::AL) return false;
1312 
1313       // Emit the compare.
1314       if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1315         return false;
1316 
1317       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1318       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1319       .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
1320       FastEmitBranch(FBB, DL);
1321       FuncInfo.MBB->addSuccessor(TBB);
1322       return true;
1323     }
1324   } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1325     MVT SourceVT;
1326     if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1327         (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1328       unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1329       unsigned OpReg = getRegForValue(TI->getOperand(0));
1330       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1331                               TII.get(TstOpc))
1332                       .addReg(OpReg).addImm(1));
1333 
1334       unsigned CCMode = ARMCC::NE;
1335       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1336         std::swap(TBB, FBB);
1337         CCMode = ARMCC::EQ;
1338       }
1339 
1340       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1341       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1342       .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1343 
1344       FastEmitBranch(FBB, DL);
1345       FuncInfo.MBB->addSuccessor(TBB);
1346       return true;
1347     }
1348   } else if (const ConstantInt *CI =
1349              dyn_cast<ConstantInt>(BI->getCondition())) {
1350     uint64_t Imm = CI->getZExtValue();
1351     MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
1352     FastEmitBranch(Target, DL);
1353     return true;
1354   }
1355 
1356   unsigned CmpReg = getRegForValue(BI->getCondition());
1357   if (CmpReg == 0) return false;
1358 
1359   // We've been divorced from our compare!  Our block was split, and
1360   // now our compare lives in a predecessor block.  We musn't
1361   // re-compare here, as the children of the compare aren't guaranteed
1362   // live across the block boundary (we *could* check for this).
1363   // Regardless, the compare has been done in the predecessor block,
1364   // and it left a value for us in a virtual register.  Ergo, we test
1365   // the one-bit value left in the virtual register.
1366   unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1367   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
1368                   .addReg(CmpReg).addImm(1));
1369 
1370   unsigned CCMode = ARMCC::NE;
1371   if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1372     std::swap(TBB, FBB);
1373     CCMode = ARMCC::EQ;
1374   }
1375 
1376   unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1377   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1378                   .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1379   FastEmitBranch(FBB, DL);
1380   FuncInfo.MBB->addSuccessor(TBB);
1381   return true;
1382 }
1383 
1384 bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
1385   unsigned AddrReg = getRegForValue(I->getOperand(0));
1386   if (AddrReg == 0) return false;
1387 
1388   unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1389   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
1390                   .addReg(AddrReg));
1391 
1392   const IndirectBrInst *IB = cast<IndirectBrInst>(I);
1393   for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
1394     FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]);
1395 
1396   return true;
1397 }
1398 
1399 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
1400                              bool isZExt) {
1401   Type *Ty = Src1Value->getType();
1402   EVT SrcVT = TLI.getValueType(Ty, true);
1403   if (!SrcVT.isSimple()) return false;
1404 
1405   bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
1406   if (isFloat && !Subtarget->hasVFP2())
1407     return false;
1408 
1409   // Check to see if the 2nd operand is a constant that we can encode directly
1410   // in the compare.
1411   int Imm = 0;
1412   bool UseImm = false;
1413   bool isNegativeImm = false;
1414   // FIXME: At -O0 we don't have anything that canonicalizes operand order.
1415   // Thus, Src1Value may be a ConstantInt, but we're missing it.
1416   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1417     if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1418         SrcVT == MVT::i1) {
1419       const APInt &CIVal = ConstInt->getValue();
1420       Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
1421       // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather
1422       // then a cmn, because there is no way to represent 2147483648 as a
1423       // signed 32-bit int.
1424       if (Imm < 0 && Imm != (int)0x80000000) {
1425         isNegativeImm = true;
1426         Imm = -Imm;
1427       }
1428       UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1429         (ARM_AM::getSOImmVal(Imm) != -1);
1430     }
1431   } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1432     if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1433       if (ConstFP->isZero() && !ConstFP->isNegative())
1434         UseImm = true;
1435   }
1436 
1437   unsigned CmpOpc;
1438   bool isICmp = true;
1439   bool needsExt = false;
1440   switch (SrcVT.getSimpleVT().SimpleTy) {
1441     default: return false;
1442     // TODO: Verify compares.
1443     case MVT::f32:
1444       isICmp = false;
1445       CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
1446       break;
1447     case MVT::f64:
1448       isICmp = false;
1449       CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
1450       break;
1451     case MVT::i1:
1452     case MVT::i8:
1453     case MVT::i16:
1454       needsExt = true;
1455     // Intentional fall-through.
1456     case MVT::i32:
1457       if (isThumb2) {
1458         if (!UseImm)
1459           CmpOpc = ARM::t2CMPrr;
1460         else
1461           CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1462       } else {
1463         if (!UseImm)
1464           CmpOpc = ARM::CMPrr;
1465         else
1466           CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1467       }
1468       break;
1469   }
1470 
1471   unsigned SrcReg1 = getRegForValue(Src1Value);
1472   if (SrcReg1 == 0) return false;
1473 
1474   unsigned SrcReg2 = 0;
1475   if (!UseImm) {
1476     SrcReg2 = getRegForValue(Src2Value);
1477     if (SrcReg2 == 0) return false;
1478   }
1479 
1480   // We have i1, i8, or i16, we need to either zero extend or sign extend.
1481   if (needsExt) {
1482     SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1483     if (SrcReg1 == 0) return false;
1484     if (!UseImm) {
1485       SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1486       if (SrcReg2 == 0) return false;
1487     }
1488   }
1489 
1490   if (!UseImm) {
1491     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1492                             TII.get(CmpOpc))
1493                     .addReg(SrcReg1).addReg(SrcReg2));
1494   } else {
1495     MachineInstrBuilder MIB;
1496     MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
1497       .addReg(SrcReg1);
1498 
1499     // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
1500     if (isICmp)
1501       MIB.addImm(Imm);
1502     AddOptionalDefs(MIB);
1503   }
1504 
1505   // For floating point we need to move the result to a comparison register
1506   // that we can then use for branches.
1507   if (Ty->isFloatTy() || Ty->isDoubleTy())
1508     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1509                             TII.get(ARM::FMSTAT)));
1510   return true;
1511 }
1512 
1513 bool ARMFastISel::SelectCmp(const Instruction *I) {
1514   const CmpInst *CI = cast<CmpInst>(I);
1515 
1516   // Get the compare predicate.
1517   ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
1518 
1519   // We may not handle every CC for now.
1520   if (ARMPred == ARMCC::AL) return false;
1521 
1522   // Emit the compare.
1523   if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1524     return false;
1525 
1526   // Now set a register based on the comparison. Explicitly set the predicates
1527   // here.
1528   unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1529   const TargetRegisterClass *RC = isThumb2 ?
1530     (const TargetRegisterClass*)&ARM::rGPRRegClass :
1531     (const TargetRegisterClass*)&ARM::GPRRegClass;
1532   unsigned DestReg = createResultReg(RC);
1533   Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
1534   unsigned ZeroReg = TargetMaterializeConstant(Zero);
1535   // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
1536   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
1537           .addReg(ZeroReg).addImm(1)
1538           .addImm(ARMPred).addReg(ARM::CPSR);
1539 
1540   UpdateValueMap(I, DestReg);
1541   return true;
1542 }
1543 
1544 bool ARMFastISel::SelectFPExt(const Instruction *I) {
1545   // Make sure we have VFP and that we're extending float to double.
1546   if (!Subtarget->hasVFP2()) return false;
1547 
1548   Value *V = I->getOperand(0);
1549   if (!I->getType()->isDoubleTy() ||
1550       !V->getType()->isFloatTy()) return false;
1551 
1552   unsigned Op = getRegForValue(V);
1553   if (Op == 0) return false;
1554 
1555   unsigned Result = createResultReg(&ARM::DPRRegClass);
1556   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1557                           TII.get(ARM::VCVTDS), Result)
1558                   .addReg(Op));
1559   UpdateValueMap(I, Result);
1560   return true;
1561 }
1562 
1563 bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
1564   // Make sure we have VFP and that we're truncating double to float.
1565   if (!Subtarget->hasVFP2()) return false;
1566 
1567   Value *V = I->getOperand(0);
1568   if (!(I->getType()->isFloatTy() &&
1569         V->getType()->isDoubleTy())) return false;
1570 
1571   unsigned Op = getRegForValue(V);
1572   if (Op == 0) return false;
1573 
1574   unsigned Result = createResultReg(&ARM::SPRRegClass);
1575   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1576                           TII.get(ARM::VCVTSD), Result)
1577                   .addReg(Op));
1578   UpdateValueMap(I, Result);
1579   return true;
1580 }
1581 
1582 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
1583   // Make sure we have VFP.
1584   if (!Subtarget->hasVFP2()) return false;
1585 
1586   MVT DstVT;
1587   Type *Ty = I->getType();
1588   if (!isTypeLegal(Ty, DstVT))
1589     return false;
1590 
1591   Value *Src = I->getOperand(0);
1592   EVT SrcVT = TLI.getValueType(Src->getType(), true);
1593   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1594     return false;
1595 
1596   unsigned SrcReg = getRegForValue(Src);
1597   if (SrcReg == 0) return false;
1598 
1599   // Handle sign-extension.
1600   if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1601     EVT DestVT = MVT::i32;
1602     SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT,
1603                                        /*isZExt*/!isSigned);
1604     if (SrcReg == 0) return false;
1605   }
1606 
1607   // The conversion routine works on fp-reg to fp-reg and the operand above
1608   // was an integer, move it to the fp registers if possible.
1609   unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1610   if (FP == 0) return false;
1611 
1612   unsigned Opc;
1613   if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1614   else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1615   else return false;
1616 
1617   unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1618   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1619                           ResultReg)
1620                   .addReg(FP));
1621   UpdateValueMap(I, ResultReg);
1622   return true;
1623 }
1624 
1625 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
1626   // Make sure we have VFP.
1627   if (!Subtarget->hasVFP2()) return false;
1628 
1629   MVT DstVT;
1630   Type *RetTy = I->getType();
1631   if (!isTypeLegal(RetTy, DstVT))
1632     return false;
1633 
1634   unsigned Op = getRegForValue(I->getOperand(0));
1635   if (Op == 0) return false;
1636 
1637   unsigned Opc;
1638   Type *OpTy = I->getOperand(0)->getType();
1639   if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1640   else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1641   else return false;
1642 
1643   // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
1644   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1645   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1646                           ResultReg)
1647                   .addReg(Op));
1648 
1649   // This result needs to be in an integer register, but the conversion only
1650   // takes place in fp-regs.
1651   unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1652   if (IntReg == 0) return false;
1653 
1654   UpdateValueMap(I, IntReg);
1655   return true;
1656 }
1657 
1658 bool ARMFastISel::SelectSelect(const Instruction *I) {
1659   MVT VT;
1660   if (!isTypeLegal(I->getType(), VT))
1661     return false;
1662 
1663   // Things need to be register sized for register moves.
1664   if (VT != MVT::i32) return false;
1665   const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
1666 
1667   unsigned CondReg = getRegForValue(I->getOperand(0));
1668   if (CondReg == 0) return false;
1669   unsigned Op1Reg = getRegForValue(I->getOperand(1));
1670   if (Op1Reg == 0) return false;
1671 
1672   // Check to see if we can use an immediate in the conditional move.
1673   int Imm = 0;
1674   bool UseImm = false;
1675   bool isNegativeImm = false;
1676   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
1677     assert (VT == MVT::i32 && "Expecting an i32.");
1678     Imm = (int)ConstInt->getValue().getZExtValue();
1679     if (Imm < 0) {
1680       isNegativeImm = true;
1681       Imm = ~Imm;
1682     }
1683     UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1684       (ARM_AM::getSOImmVal(Imm) != -1);
1685   }
1686 
1687   unsigned Op2Reg = 0;
1688   if (!UseImm) {
1689     Op2Reg = getRegForValue(I->getOperand(2));
1690     if (Op2Reg == 0) return false;
1691   }
1692 
1693   unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
1694   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
1695                   .addReg(CondReg).addImm(0));
1696 
1697   unsigned MovCCOpc;
1698   if (!UseImm) {
1699     MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1700   } else {
1701     if (!isNegativeImm) {
1702       MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1703     } else {
1704       MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1705     }
1706   }
1707   unsigned ResultReg = createResultReg(RC);
1708   if (!UseImm)
1709     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1710     .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
1711   else
1712     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1713     .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
1714   UpdateValueMap(I, ResultReg);
1715   return true;
1716 }
1717 
1718 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
1719   MVT VT;
1720   Type *Ty = I->getType();
1721   if (!isTypeLegal(Ty, VT))
1722     return false;
1723 
1724   // If we have integer div support we should have selected this automagically.
1725   // In case we have a real miss go ahead and return false and we'll pick
1726   // it up later.
1727   if (Subtarget->hasDivide()) return false;
1728 
1729   // Otherwise emit a libcall.
1730   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1731   if (VT == MVT::i8)
1732     LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1733   else if (VT == MVT::i16)
1734     LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1735   else if (VT == MVT::i32)
1736     LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1737   else if (VT == MVT::i64)
1738     LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1739   else if (VT == MVT::i128)
1740     LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1741   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
1742 
1743   return ARMEmitLibcall(I, LC);
1744 }
1745 
1746 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
1747   MVT VT;
1748   Type *Ty = I->getType();
1749   if (!isTypeLegal(Ty, VT))
1750     return false;
1751 
1752   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1753   if (VT == MVT::i8)
1754     LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1755   else if (VT == MVT::i16)
1756     LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1757   else if (VT == MVT::i32)
1758     LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1759   else if (VT == MVT::i64)
1760     LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1761   else if (VT == MVT::i128)
1762     LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1763   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
1764 
1765   return ARMEmitLibcall(I, LC);
1766 }
1767 
1768 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
1769   EVT DestVT  = TLI.getValueType(I->getType(), true);
1770 
1771   // We can get here in the case when we have a binary operation on a non-legal
1772   // type and the target independent selector doesn't know how to handle it.
1773   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1774     return false;
1775 
1776   unsigned Opc;
1777   switch (ISDOpcode) {
1778     default: return false;
1779     case ISD::ADD:
1780       Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1781       break;
1782     case ISD::OR:
1783       Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1784       break;
1785     case ISD::SUB:
1786       Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1787       break;
1788   }
1789 
1790   unsigned SrcReg1 = getRegForValue(I->getOperand(0));
1791   if (SrcReg1 == 0) return false;
1792 
1793   // TODO: Often the 2nd operand is an immediate, which can be encoded directly
1794   // in the instruction, rather then materializing the value in a register.
1795   unsigned SrcReg2 = getRegForValue(I->getOperand(1));
1796   if (SrcReg2 == 0) return false;
1797 
1798   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1799   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1800                           TII.get(Opc), ResultReg)
1801                   .addReg(SrcReg1).addReg(SrcReg2));
1802   UpdateValueMap(I, ResultReg);
1803   return true;
1804 }
1805 
1806 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
1807   EVT VT  = TLI.getValueType(I->getType(), true);
1808 
1809   // We can get here in the case when we want to use NEON for our fp
1810   // operations, but can't figure out how to. Just use the vfp instructions
1811   // if we have them.
1812   // FIXME: It'd be nice to use NEON instructions.
1813   Type *Ty = I->getType();
1814   bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
1815   if (isFloat && !Subtarget->hasVFP2())
1816     return false;
1817 
1818   unsigned Opc;
1819   bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1820   switch (ISDOpcode) {
1821     default: return false;
1822     case ISD::FADD:
1823       Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1824       break;
1825     case ISD::FSUB:
1826       Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1827       break;
1828     case ISD::FMUL:
1829       Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1830       break;
1831   }
1832   unsigned Op1 = getRegForValue(I->getOperand(0));
1833   if (Op1 == 0) return false;
1834 
1835   unsigned Op2 = getRegForValue(I->getOperand(1));
1836   if (Op2 == 0) return false;
1837 
1838   unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
1839   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1840                           TII.get(Opc), ResultReg)
1841                   .addReg(Op1).addReg(Op2));
1842   UpdateValueMap(I, ResultReg);
1843   return true;
1844 }
1845 
1846 // Call Handling Code
1847 
1848 // This is largely taken directly from CCAssignFnForNode
1849 // TODO: We may not support all of this.
1850 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
1851                                            bool Return,
1852                                            bool isVarArg) {
1853   switch (CC) {
1854   default:
1855     llvm_unreachable("Unsupported calling convention");
1856   case CallingConv::Fast:
1857     if (Subtarget->hasVFP2() && !isVarArg) {
1858       if (!Subtarget->isAAPCS_ABI())
1859         return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1860       // For AAPCS ABI targets, just use VFP variant of the calling convention.
1861       return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1862     }
1863     // Fallthrough
1864   case CallingConv::C:
1865     // Use target triple & subtarget features to do actual dispatch.
1866     if (Subtarget->isAAPCS_ABI()) {
1867       if (Subtarget->hasVFP2() &&
1868           TM.Options.FloatABIType == FloatABI::Hard && !isVarArg)
1869         return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1870       else
1871         return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1872     } else
1873         return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1874   case CallingConv::ARM_AAPCS_VFP:
1875     if (!isVarArg)
1876       return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1877     // Fall through to soft float variant, variadic functions don't
1878     // use hard floating point ABI.
1879   case CallingConv::ARM_AAPCS:
1880     return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1881   case CallingConv::ARM_APCS:
1882     return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1883   case CallingConv::GHC:
1884     if (Return)
1885       llvm_unreachable("Can't return in GHC call convention");
1886     else
1887       return CC_ARM_APCS_GHC;
1888   }
1889 }
1890 
1891 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1892                                   SmallVectorImpl<unsigned> &ArgRegs,
1893                                   SmallVectorImpl<MVT> &ArgVTs,
1894                                   SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1895                                   SmallVectorImpl<unsigned> &RegArgs,
1896                                   CallingConv::ID CC,
1897                                   unsigned &NumBytes,
1898                                   bool isVarArg) {
1899   SmallVector<CCValAssign, 16> ArgLocs;
1900   CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context);
1901   CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1902                              CCAssignFnForCall(CC, false, isVarArg));
1903 
1904   // Check that we can handle all of the arguments. If we can't, then bail out
1905   // now before we add code to the MBB.
1906   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1907     CCValAssign &VA = ArgLocs[i];
1908     MVT ArgVT = ArgVTs[VA.getValNo()];
1909 
1910     // We don't handle NEON/vector parameters yet.
1911     if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
1912       return false;
1913 
1914     // Now copy/store arg to correct locations.
1915     if (VA.isRegLoc() && !VA.needsCustom()) {
1916       continue;
1917     } else if (VA.needsCustom()) {
1918       // TODO: We need custom lowering for vector (v2f64) args.
1919       if (VA.getLocVT() != MVT::f64 ||
1920           // TODO: Only handle register args for now.
1921           !VA.isRegLoc() || !ArgLocs[++i].isRegLoc())
1922         return false;
1923     } else {
1924       switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) {
1925       default:
1926         return false;
1927       case MVT::i1:
1928       case MVT::i8:
1929       case MVT::i16:
1930       case MVT::i32:
1931         break;
1932       case MVT::f32:
1933         if (!Subtarget->hasVFP2())
1934           return false;
1935         break;
1936       case MVT::f64:
1937         if (!Subtarget->hasVFP2())
1938           return false;
1939         break;
1940       }
1941     }
1942   }
1943 
1944   // At the point, we are able to handle the call's arguments in fast isel.
1945 
1946   // Get a count of how many bytes are to be pushed on the stack.
1947   NumBytes = CCInfo.getNextStackOffset();
1948 
1949   // Issue CALLSEQ_START
1950   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1951   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1952                           TII.get(AdjStackDown))
1953                   .addImm(NumBytes));
1954 
1955   // Process the args.
1956   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1957     CCValAssign &VA = ArgLocs[i];
1958     unsigned Arg = ArgRegs[VA.getValNo()];
1959     MVT ArgVT = ArgVTs[VA.getValNo()];
1960 
1961     assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) &&
1962            "We don't handle NEON/vector parameters yet.");
1963 
1964     // Handle arg promotion, etc.
1965     switch (VA.getLocInfo()) {
1966       case CCValAssign::Full: break;
1967       case CCValAssign::SExt: {
1968         MVT DestVT = VA.getLocVT();
1969         Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false);
1970         assert (Arg != 0 && "Failed to emit a sext");
1971         ArgVT = DestVT;
1972         break;
1973       }
1974       case CCValAssign::AExt:
1975         // Intentional fall-through.  Handle AExt and ZExt.
1976       case CCValAssign::ZExt: {
1977         MVT DestVT = VA.getLocVT();
1978         Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true);
1979         assert (Arg != 0 && "Failed to emit a sext");
1980         ArgVT = DestVT;
1981         break;
1982       }
1983       case CCValAssign::BCvt: {
1984         unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
1985                                  /*TODO: Kill=*/false);
1986         assert(BC != 0 && "Failed to emit a bitcast!");
1987         Arg = BC;
1988         ArgVT = VA.getLocVT();
1989         break;
1990       }
1991       default: llvm_unreachable("Unknown arg promotion!");
1992     }
1993 
1994     // Now copy/store arg to correct locations.
1995     if (VA.isRegLoc() && !VA.needsCustom()) {
1996       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1997               VA.getLocReg())
1998         .addReg(Arg);
1999       RegArgs.push_back(VA.getLocReg());
2000     } else if (VA.needsCustom()) {
2001       // TODO: We need custom lowering for vector (v2f64) args.
2002       assert(VA.getLocVT() == MVT::f64 &&
2003              "Custom lowering for v2f64 args not available");
2004 
2005       CCValAssign &NextVA = ArgLocs[++i];
2006 
2007       assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2008              "We only handle register args!");
2009 
2010       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2011                               TII.get(ARM::VMOVRRD), VA.getLocReg())
2012                       .addReg(NextVA.getLocReg(), RegState::Define)
2013                       .addReg(Arg));
2014       RegArgs.push_back(VA.getLocReg());
2015       RegArgs.push_back(NextVA.getLocReg());
2016     } else {
2017       assert(VA.isMemLoc());
2018       // Need to store on the stack.
2019       Address Addr;
2020       Addr.BaseType = Address::RegBase;
2021       Addr.Base.Reg = ARM::SP;
2022       Addr.Offset = VA.getLocMemOffset();
2023 
2024       bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2025       assert(EmitRet && "Could not emit a store for argument!");
2026     }
2027   }
2028 
2029   return true;
2030 }
2031 
2032 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
2033                              const Instruction *I, CallingConv::ID CC,
2034                              unsigned &NumBytes, bool isVarArg) {
2035   // Issue CALLSEQ_END
2036   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
2037   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2038                           TII.get(AdjStackUp))
2039                   .addImm(NumBytes).addImm(0));
2040 
2041   // Now the return value.
2042   if (RetVT != MVT::isVoid) {
2043     SmallVector<CCValAssign, 16> RVLocs;
2044     CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
2045     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2046 
2047     // Copy all of the result registers out of their specified physreg.
2048     if (RVLocs.size() == 2 && RetVT == MVT::f64) {
2049       // For this move we copy into two registers and then move into the
2050       // double fp reg we want.
2051       EVT DestVT = RVLocs[0].getValVT();
2052       const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
2053       unsigned ResultReg = createResultReg(DstRC);
2054       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2055                               TII.get(ARM::VMOVDRR), ResultReg)
2056                       .addReg(RVLocs[0].getLocReg())
2057                       .addReg(RVLocs[1].getLocReg()));
2058 
2059       UsedRegs.push_back(RVLocs[0].getLocReg());
2060       UsedRegs.push_back(RVLocs[1].getLocReg());
2061 
2062       // Finally update the result.
2063       UpdateValueMap(I, ResultReg);
2064     } else {
2065       assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
2066       EVT CopyVT = RVLocs[0].getValVT();
2067 
2068       // Special handling for extended integers.
2069       if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2070         CopyVT = MVT::i32;
2071 
2072       const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
2073 
2074       unsigned ResultReg = createResultReg(DstRC);
2075       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
2076               ResultReg).addReg(RVLocs[0].getLocReg());
2077       UsedRegs.push_back(RVLocs[0].getLocReg());
2078 
2079       // Finally update the result.
2080       UpdateValueMap(I, ResultReg);
2081     }
2082   }
2083 
2084   return true;
2085 }
2086 
2087 bool ARMFastISel::SelectRet(const Instruction *I) {
2088   const ReturnInst *Ret = cast<ReturnInst>(I);
2089   const Function &F = *I->getParent()->getParent();
2090 
2091   if (!FuncInfo.CanLowerReturn)
2092     return false;
2093 
2094   CallingConv::ID CC = F.getCallingConv();
2095   if (Ret->getNumOperands() > 0) {
2096     SmallVector<ISD::OutputArg, 4> Outs;
2097     GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
2098                   Outs, TLI);
2099 
2100     // Analyze operands of the call, assigning locations to each operand.
2101     SmallVector<CCValAssign, 16> ValLocs;
2102     CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext());
2103     CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */,
2104                                                  F.isVarArg()));
2105 
2106     const Value *RV = Ret->getOperand(0);
2107     unsigned Reg = getRegForValue(RV);
2108     if (Reg == 0)
2109       return false;
2110 
2111     // Only handle a single return value for now.
2112     if (ValLocs.size() != 1)
2113       return false;
2114 
2115     CCValAssign &VA = ValLocs[0];
2116 
2117     // Don't bother handling odd stuff for now.
2118     if (VA.getLocInfo() != CCValAssign::Full)
2119       return false;
2120     // Only handle register returns for now.
2121     if (!VA.isRegLoc())
2122       return false;
2123 
2124     unsigned SrcReg = Reg + VA.getValNo();
2125     EVT RVVT = TLI.getValueType(RV->getType());
2126     EVT DestVT = VA.getValVT();
2127     // Special handling for extended integers.
2128     if (RVVT != DestVT) {
2129       if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2130         return false;
2131 
2132       assert(DestVT == MVT::i32 && "ARM should always ext to i32");
2133 
2134       // Perform extension if flagged as either zext or sext.  Otherwise, do
2135       // nothing.
2136       if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2137         SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2138         if (SrcReg == 0) return false;
2139       }
2140     }
2141 
2142     // Make the copy.
2143     unsigned DstReg = VA.getLocReg();
2144     const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
2145     // Avoid a cross-class copy. This is very unlikely.
2146     if (!SrcRC->contains(DstReg))
2147       return false;
2148     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
2149             DstReg).addReg(SrcReg);
2150 
2151     // Mark the register as live out of the function.
2152     MRI.addLiveOut(VA.getLocReg());
2153   }
2154 
2155   unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
2156   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2157                           TII.get(RetOpc)));
2158   return true;
2159 }
2160 
2161 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
2162   if (UseReg)
2163     return isThumb2 ? ARM::tBLXr : ARM::BLX;
2164   else
2165     return isThumb2 ? ARM::tBL : ARM::BL;
2166 }
2167 
2168 unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
2169   GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
2170                                        GlobalValue::ExternalLinkage, 0, Name);
2171   return ARMMaterializeGV(GV, TLI.getValueType(GV->getType()));
2172 }
2173 
2174 // A quick function that will emit a call for a named libcall in F with the
2175 // vector of passed arguments for the Instruction in I. We can assume that we
2176 // can emit a call for any libcall we can produce. This is an abridged version
2177 // of the full call infrastructure since we won't need to worry about things
2178 // like computed function pointers or strange arguments at call sites.
2179 // TODO: Try to unify this and the normal call bits for ARM, then try to unify
2180 // with X86.
2181 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
2182   CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
2183 
2184   // Handle *simple* calls for now.
2185   Type *RetTy = I->getType();
2186   MVT RetVT;
2187   if (RetTy->isVoidTy())
2188     RetVT = MVT::isVoid;
2189   else if (!isTypeLegal(RetTy, RetVT))
2190     return false;
2191 
2192   // Can't handle non-double multi-reg retvals.
2193   if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2194     SmallVector<CCValAssign, 16> RVLocs;
2195     CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
2196     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false));
2197     if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2198       return false;
2199   }
2200 
2201   // Set up the argument vectors.
2202   SmallVector<Value*, 8> Args;
2203   SmallVector<unsigned, 8> ArgRegs;
2204   SmallVector<MVT, 8> ArgVTs;
2205   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2206   Args.reserve(I->getNumOperands());
2207   ArgRegs.reserve(I->getNumOperands());
2208   ArgVTs.reserve(I->getNumOperands());
2209   ArgFlags.reserve(I->getNumOperands());
2210   for (unsigned i = 0; i < I->getNumOperands(); ++i) {
2211     Value *Op = I->getOperand(i);
2212     unsigned Arg = getRegForValue(Op);
2213     if (Arg == 0) return false;
2214 
2215     Type *ArgTy = Op->getType();
2216     MVT ArgVT;
2217     if (!isTypeLegal(ArgTy, ArgVT)) return false;
2218 
2219     ISD::ArgFlagsTy Flags;
2220     unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2221     Flags.setOrigAlign(OriginalAlignment);
2222 
2223     Args.push_back(Op);
2224     ArgRegs.push_back(Arg);
2225     ArgVTs.push_back(ArgVT);
2226     ArgFlags.push_back(Flags);
2227   }
2228 
2229   // Handle the arguments now that we've gotten them.
2230   SmallVector<unsigned, 4> RegArgs;
2231   unsigned NumBytes;
2232   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2233                        RegArgs, CC, NumBytes, false))
2234     return false;
2235 
2236   unsigned CalleeReg = 0;
2237   if (EnableARMLongCalls) {
2238     CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
2239     if (CalleeReg == 0) return false;
2240   }
2241 
2242   // Issue the call.
2243   unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls);
2244   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2245                                     DL, TII.get(CallOpc));
2246   // BL / BLX don't take a predicate, but tBL / tBLX do.
2247   if (isThumb2)
2248     AddDefaultPred(MIB);
2249   if (EnableARMLongCalls)
2250     MIB.addReg(CalleeReg);
2251   else
2252     MIB.addExternalSymbol(TLI.getLibcallName(Call));
2253 
2254   // Add implicit physical register uses to the call.
2255   for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
2256     MIB.addReg(RegArgs[i], RegState::Implicit);
2257 
2258   // Add a register mask with the call-preserved registers.
2259   // Proper defs for return values will be added by setPhysRegsDeadExcept().
2260   MIB.addRegMask(TRI.getCallPreservedMask(CC));
2261 
2262   // Finish off the call including any return values.
2263   SmallVector<unsigned, 4> UsedRegs;
2264   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false;
2265 
2266   // Set all unused physreg defs as dead.
2267   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2268 
2269   return true;
2270 }
2271 
2272 bool ARMFastISel::SelectCall(const Instruction *I,
2273                              const char *IntrMemName = 0) {
2274   const CallInst *CI = cast<CallInst>(I);
2275   const Value *Callee = CI->getCalledValue();
2276 
2277   // Can't handle inline asm.
2278   if (isa<InlineAsm>(Callee)) return false;
2279 
2280   // Check the calling convention.
2281   ImmutableCallSite CS(CI);
2282   CallingConv::ID CC = CS.getCallingConv();
2283 
2284   // TODO: Avoid some calling conventions?
2285 
2286   PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
2287   FunctionType *FTy = cast<FunctionType>(PT->getElementType());
2288   bool isVarArg = FTy->isVarArg();
2289 
2290   // Handle *simple* calls for now.
2291   Type *RetTy = I->getType();
2292   MVT RetVT;
2293   if (RetTy->isVoidTy())
2294     RetVT = MVT::isVoid;
2295   else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2296            RetVT != MVT::i8  && RetVT != MVT::i1)
2297     return false;
2298 
2299   // Can't handle non-double multi-reg retvals.
2300   if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2301       RetVT != MVT::i16 && RetVT != MVT::i32) {
2302     SmallVector<CCValAssign, 16> RVLocs;
2303     CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
2304     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2305     if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2306       return false;
2307   }
2308 
2309   // Set up the argument vectors.
2310   SmallVector<Value*, 8> Args;
2311   SmallVector<unsigned, 8> ArgRegs;
2312   SmallVector<MVT, 8> ArgVTs;
2313   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2314   unsigned arg_size = CS.arg_size();
2315   Args.reserve(arg_size);
2316   ArgRegs.reserve(arg_size);
2317   ArgVTs.reserve(arg_size);
2318   ArgFlags.reserve(arg_size);
2319   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
2320        i != e; ++i) {
2321     // If we're lowering a memory intrinsic instead of a regular call, skip the
2322     // last two arguments, which shouldn't be passed to the underlying function.
2323     if (IntrMemName && e-i <= 2)
2324       break;
2325 
2326     ISD::ArgFlagsTy Flags;
2327     unsigned AttrInd = i - CS.arg_begin() + 1;
2328     if (CS.paramHasAttr(AttrInd, Attributes::SExt))
2329       Flags.setSExt();
2330     if (CS.paramHasAttr(AttrInd, Attributes::ZExt))
2331       Flags.setZExt();
2332 
2333     // FIXME: Only handle *easy* calls for now.
2334     if (CS.paramHasAttr(AttrInd, Attributes::InReg) ||
2335         CS.paramHasAttr(AttrInd, Attributes::StructRet) ||
2336         CS.paramHasAttr(AttrInd, Attributes::Nest) ||
2337         CS.paramHasAttr(AttrInd, Attributes::ByVal))
2338       return false;
2339 
2340     Type *ArgTy = (*i)->getType();
2341     MVT ArgVT;
2342     if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2343         ArgVT != MVT::i1)
2344       return false;
2345 
2346     unsigned Arg = getRegForValue(*i);
2347     if (Arg == 0)
2348       return false;
2349 
2350     unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2351     Flags.setOrigAlign(OriginalAlignment);
2352 
2353     Args.push_back(*i);
2354     ArgRegs.push_back(Arg);
2355     ArgVTs.push_back(ArgVT);
2356     ArgFlags.push_back(Flags);
2357   }
2358 
2359   // Handle the arguments now that we've gotten them.
2360   SmallVector<unsigned, 4> RegArgs;
2361   unsigned NumBytes;
2362   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2363                        RegArgs, CC, NumBytes, isVarArg))
2364     return false;
2365 
2366   bool UseReg = false;
2367   const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2368   if (!GV || EnableARMLongCalls) UseReg = true;
2369 
2370   unsigned CalleeReg = 0;
2371   if (UseReg) {
2372     if (IntrMemName)
2373       CalleeReg = getLibcallReg(IntrMemName);
2374     else
2375       CalleeReg = getRegForValue(Callee);
2376 
2377     if (CalleeReg == 0) return false;
2378   }
2379 
2380   // Issue the call.
2381   unsigned CallOpc = ARMSelectCallOp(UseReg);
2382   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2383                                     DL, TII.get(CallOpc));
2384 
2385   // ARM calls don't take a predicate, but tBL / tBLX do.
2386   if(isThumb2)
2387     AddDefaultPred(MIB);
2388   if (UseReg)
2389     MIB.addReg(CalleeReg);
2390   else if (!IntrMemName)
2391     MIB.addGlobalAddress(GV, 0, 0);
2392   else
2393     MIB.addExternalSymbol(IntrMemName, 0);
2394 
2395   // Add implicit physical register uses to the call.
2396   for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
2397     MIB.addReg(RegArgs[i], RegState::Implicit);
2398 
2399   // Add a register mask with the call-preserved registers.
2400   // Proper defs for return values will be added by setPhysRegsDeadExcept().
2401   MIB.addRegMask(TRI.getCallPreservedMask(CC));
2402 
2403   // Finish off the call including any return values.
2404   SmallVector<unsigned, 4> UsedRegs;
2405   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
2406     return false;
2407 
2408   // Set all unused physreg defs as dead.
2409   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2410 
2411   return true;
2412 }
2413 
2414 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2415   return Len <= 16;
2416 }
2417 
2418 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
2419                                         uint64_t Len) {
2420   // Make sure we don't bloat code by inlining very large memcpy's.
2421   if (!ARMIsMemCpySmall(Len))
2422     return false;
2423 
2424   // We don't care about alignment here since we just emit integer accesses.
2425   while (Len) {
2426     MVT VT;
2427     if (Len >= 4)
2428       VT = MVT::i32;
2429     else if (Len >= 2)
2430       VT = MVT::i16;
2431     else {
2432       assert(Len == 1);
2433       VT = MVT::i8;
2434     }
2435 
2436     bool RV;
2437     unsigned ResultReg;
2438     RV = ARMEmitLoad(VT, ResultReg, Src);
2439     assert (RV == true && "Should be able to handle this load.");
2440     RV = ARMEmitStore(VT, ResultReg, Dest);
2441     assert (RV == true && "Should be able to handle this store.");
2442     (void)RV;
2443 
2444     unsigned Size = VT.getSizeInBits()/8;
2445     Len -= Size;
2446     Dest.Offset += Size;
2447     Src.Offset += Size;
2448   }
2449 
2450   return true;
2451 }
2452 
2453 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
2454   // FIXME: Handle more intrinsics.
2455   switch (I.getIntrinsicID()) {
2456   default: return false;
2457   case Intrinsic::frameaddress: {
2458     MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
2459     MFI->setFrameAddressIsTaken(true);
2460 
2461     unsigned LdrOpc;
2462     const TargetRegisterClass *RC;
2463     if (isThumb2) {
2464       LdrOpc =  ARM::t2LDRi12;
2465       RC = (const TargetRegisterClass*)&ARM::tGPRRegClass;
2466     } else {
2467       LdrOpc =  ARM::LDRi12;
2468       RC = (const TargetRegisterClass*)&ARM::GPRRegClass;
2469     }
2470 
2471     const ARMBaseRegisterInfo *RegInfo =
2472           static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo());
2473     unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
2474     unsigned SrcReg = FramePtr;
2475 
2476     // Recursively load frame address
2477     // ldr r0 [fp]
2478     // ldr r0 [r0]
2479     // ldr r0 [r0]
2480     // ...
2481     unsigned DestReg;
2482     unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
2483     while (Depth--) {
2484       DestReg = createResultReg(RC);
2485       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2486                               TII.get(LdrOpc), DestReg)
2487                       .addReg(SrcReg).addImm(0));
2488       SrcReg = DestReg;
2489     }
2490     UpdateValueMap(&I, SrcReg);
2491     return true;
2492   }
2493   case Intrinsic::memcpy:
2494   case Intrinsic::memmove: {
2495     const MemTransferInst &MTI = cast<MemTransferInst>(I);
2496     // Don't handle volatile.
2497     if (MTI.isVolatile())
2498       return false;
2499 
2500     // Disable inlining for memmove before calls to ComputeAddress.  Otherwise,
2501     // we would emit dead code because we don't currently handle memmoves.
2502     bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
2503     if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
2504       // Small memcpy's are common enough that we want to do them without a call
2505       // if possible.
2506       uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
2507       if (ARMIsMemCpySmall(Len)) {
2508         Address Dest, Src;
2509         if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
2510             !ARMComputeAddress(MTI.getRawSource(), Src))
2511           return false;
2512         if (ARMTryEmitSmallMemCpy(Dest, Src, Len))
2513           return true;
2514       }
2515     }
2516 
2517     if (!MTI.getLength()->getType()->isIntegerTy(32))
2518       return false;
2519 
2520     if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
2521       return false;
2522 
2523     const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
2524     return SelectCall(&I, IntrMemName);
2525   }
2526   case Intrinsic::memset: {
2527     const MemSetInst &MSI = cast<MemSetInst>(I);
2528     // Don't handle volatile.
2529     if (MSI.isVolatile())
2530       return false;
2531 
2532     if (!MSI.getLength()->getType()->isIntegerTy(32))
2533       return false;
2534 
2535     if (MSI.getDestAddressSpace() > 255)
2536       return false;
2537 
2538     return SelectCall(&I, "memset");
2539   }
2540   case Intrinsic::trap: {
2541     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP));
2542     return true;
2543   }
2544   }
2545 }
2546 
2547 bool ARMFastISel::SelectTrunc(const Instruction *I) {
2548   // The high bits for a type smaller than the register size are assumed to be
2549   // undefined.
2550   Value *Op = I->getOperand(0);
2551 
2552   EVT SrcVT, DestVT;
2553   SrcVT = TLI.getValueType(Op->getType(), true);
2554   DestVT = TLI.getValueType(I->getType(), true);
2555 
2556   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2557     return false;
2558   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2559     return false;
2560 
2561   unsigned SrcReg = getRegForValue(Op);
2562   if (!SrcReg) return false;
2563 
2564   // Because the high bits are undefined, a truncate doesn't generate
2565   // any code.
2566   UpdateValueMap(I, SrcReg);
2567   return true;
2568 }
2569 
2570 unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
2571                                     bool isZExt) {
2572   if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2573     return 0;
2574 
2575   unsigned Opc;
2576   bool isBoolZext = false;
2577   if (!SrcVT.isSimple()) return 0;
2578   switch (SrcVT.getSimpleVT().SimpleTy) {
2579   default: return 0;
2580   case MVT::i16:
2581     if (!Subtarget->hasV6Ops()) return 0;
2582     if (isZExt)
2583       Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH;
2584     else
2585       Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
2586     break;
2587   case MVT::i8:
2588     if (!Subtarget->hasV6Ops()) return 0;
2589     if (isZExt)
2590       Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB;
2591     else
2592       Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
2593     break;
2594   case MVT::i1:
2595     if (isZExt) {
2596       Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
2597       isBoolZext = true;
2598       break;
2599     }
2600     return 0;
2601   }
2602 
2603   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
2604   MachineInstrBuilder MIB;
2605   MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
2606         .addReg(SrcReg);
2607   if (isBoolZext)
2608     MIB.addImm(1);
2609   else
2610     MIB.addImm(0);
2611   AddOptionalDefs(MIB);
2612   return ResultReg;
2613 }
2614 
2615 bool ARMFastISel::SelectIntExt(const Instruction *I) {
2616   // On ARM, in general, integer casts don't involve legal types; this code
2617   // handles promotable integers.
2618   Type *DestTy = I->getType();
2619   Value *Src = I->getOperand(0);
2620   Type *SrcTy = Src->getType();
2621 
2622   EVT SrcVT, DestVT;
2623   SrcVT = TLI.getValueType(SrcTy, true);
2624   DestVT = TLI.getValueType(DestTy, true);
2625 
2626   bool isZExt = isa<ZExtInst>(I);
2627   unsigned SrcReg = getRegForValue(Src);
2628   if (!SrcReg) return false;
2629 
2630   unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2631   if (ResultReg == 0) return false;
2632   UpdateValueMap(I, ResultReg);
2633   return true;
2634 }
2635 
2636 bool ARMFastISel::SelectShift(const Instruction *I,
2637                               ARM_AM::ShiftOpc ShiftTy) {
2638   // We handle thumb2 mode by target independent selector
2639   // or SelectionDAG ISel.
2640   if (isThumb2)
2641     return false;
2642 
2643   // Only handle i32 now.
2644   EVT DestVT = TLI.getValueType(I->getType(), true);
2645   if (DestVT != MVT::i32)
2646     return false;
2647 
2648   unsigned Opc = ARM::MOVsr;
2649   unsigned ShiftImm;
2650   Value *Src2Value = I->getOperand(1);
2651   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2652     ShiftImm = CI->getZExtValue();
2653 
2654     // Fall back to selection DAG isel if the shift amount
2655     // is zero or greater than the width of the value type.
2656     if (ShiftImm == 0 || ShiftImm >=32)
2657       return false;
2658 
2659     Opc = ARM::MOVsi;
2660   }
2661 
2662   Value *Src1Value = I->getOperand(0);
2663   unsigned Reg1 = getRegForValue(Src1Value);
2664   if (Reg1 == 0) return false;
2665 
2666   unsigned Reg2 = 0;
2667   if (Opc == ARM::MOVsr) {
2668     Reg2 = getRegForValue(Src2Value);
2669     if (Reg2 == 0) return false;
2670   }
2671 
2672   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
2673   if(ResultReg == 0) return false;
2674 
2675   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2676                                     TII.get(Opc), ResultReg)
2677                             .addReg(Reg1);
2678 
2679   if (Opc == ARM::MOVsi)
2680     MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
2681   else if (Opc == ARM::MOVsr) {
2682     MIB.addReg(Reg2);
2683     MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
2684   }
2685 
2686   AddOptionalDefs(MIB);
2687   UpdateValueMap(I, ResultReg);
2688   return true;
2689 }
2690 
2691 // TODO: SoftFP support.
2692 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
2693 
2694   switch (I->getOpcode()) {
2695     case Instruction::Load:
2696       return SelectLoad(I);
2697     case Instruction::Store:
2698       return SelectStore(I);
2699     case Instruction::Br:
2700       return SelectBranch(I);
2701     case Instruction::IndirectBr:
2702       return SelectIndirectBr(I);
2703     case Instruction::ICmp:
2704     case Instruction::FCmp:
2705       return SelectCmp(I);
2706     case Instruction::FPExt:
2707       return SelectFPExt(I);
2708     case Instruction::FPTrunc:
2709       return SelectFPTrunc(I);
2710     case Instruction::SIToFP:
2711       return SelectIToFP(I, /*isSigned*/ true);
2712     case Instruction::UIToFP:
2713       return SelectIToFP(I, /*isSigned*/ false);
2714     case Instruction::FPToSI:
2715       return SelectFPToI(I, /*isSigned*/ true);
2716     case Instruction::FPToUI:
2717       return SelectFPToI(I, /*isSigned*/ false);
2718     case Instruction::Add:
2719       return SelectBinaryIntOp(I, ISD::ADD);
2720     case Instruction::Or:
2721       return SelectBinaryIntOp(I, ISD::OR);
2722     case Instruction::Sub:
2723       return SelectBinaryIntOp(I, ISD::SUB);
2724     case Instruction::FAdd:
2725       return SelectBinaryFPOp(I, ISD::FADD);
2726     case Instruction::FSub:
2727       return SelectBinaryFPOp(I, ISD::FSUB);
2728     case Instruction::FMul:
2729       return SelectBinaryFPOp(I, ISD::FMUL);
2730     case Instruction::SDiv:
2731       return SelectDiv(I, /*isSigned*/ true);
2732     case Instruction::UDiv:
2733       return SelectDiv(I, /*isSigned*/ false);
2734     case Instruction::SRem:
2735       return SelectRem(I, /*isSigned*/ true);
2736     case Instruction::URem:
2737       return SelectRem(I, /*isSigned*/ false);
2738     case Instruction::Call:
2739       if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2740         return SelectIntrinsicCall(*II);
2741       return SelectCall(I);
2742     case Instruction::Select:
2743       return SelectSelect(I);
2744     case Instruction::Ret:
2745       return SelectRet(I);
2746     case Instruction::Trunc:
2747       return SelectTrunc(I);
2748     case Instruction::ZExt:
2749     case Instruction::SExt:
2750       return SelectIntExt(I);
2751     case Instruction::Shl:
2752       return SelectShift(I, ARM_AM::lsl);
2753     case Instruction::LShr:
2754       return SelectShift(I, ARM_AM::lsr);
2755     case Instruction::AShr:
2756       return SelectShift(I, ARM_AM::asr);
2757     default: break;
2758   }
2759   return false;
2760 }
2761 
2762 /// TryToFoldLoad - The specified machine instr operand is a vreg, and that
2763 /// vreg is being provided by the specified load instruction.  If possible,
2764 /// try to fold the load as an operand to the instruction, returning true if
2765 /// successful.
2766 bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
2767                                 const LoadInst *LI) {
2768   // Verify we have a legal type before going any further.
2769   MVT VT;
2770   if (!isLoadTypeLegal(LI->getType(), VT))
2771     return false;
2772 
2773   // Combine load followed by zero- or sign-extend.
2774   // ldrb r1, [r0]       ldrb r1, [r0]
2775   // uxtb r2, r1     =>
2776   // mov  r3, r2         mov  r3, r1
2777   bool isZExt = true;
2778   switch(MI->getOpcode()) {
2779     default: return false;
2780     case ARM::SXTH:
2781     case ARM::t2SXTH:
2782       isZExt = false;
2783     case ARM::UXTH:
2784     case ARM::t2UXTH:
2785       if (VT != MVT::i16)
2786         return false;
2787     break;
2788     case ARM::SXTB:
2789     case ARM::t2SXTB:
2790       isZExt = false;
2791     case ARM::UXTB:
2792     case ARM::t2UXTB:
2793       if (VT != MVT::i8)
2794         return false;
2795     break;
2796   }
2797   // See if we can handle this address.
2798   Address Addr;
2799   if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
2800 
2801   unsigned ResultReg = MI->getOperand(0).getReg();
2802   if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false))
2803     return false;
2804   MI->eraseFromParent();
2805   return true;
2806 }
2807 
2808 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
2809                                      unsigned Align, EVT VT) {
2810   bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
2811   ARMConstantPoolConstant *CPV =
2812     ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
2813   unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
2814 
2815   unsigned Opc;
2816   unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT));
2817   // Load value.
2818   if (isThumb2) {
2819     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2820                             TII.get(ARM::t2LDRpci), DestReg1)
2821                     .addConstantPoolIndex(Idx));
2822     Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs;
2823   } else {
2824     // The extra immediate is for addrmode2.
2825     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2826                             DL, TII.get(ARM::LDRcp), DestReg1)
2827                     .addConstantPoolIndex(Idx).addImm(0));
2828     Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs;
2829   }
2830 
2831   unsigned GlobalBaseReg = AFI->getGlobalBaseReg();
2832   if (GlobalBaseReg == 0) {
2833     GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT));
2834     AFI->setGlobalBaseReg(GlobalBaseReg);
2835   }
2836 
2837   unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT));
2838   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2839                                     DL, TII.get(Opc), DestReg2)
2840                             .addReg(DestReg1)
2841                             .addReg(GlobalBaseReg);
2842   if (!UseGOTOFF)
2843     MIB.addImm(0);
2844   AddOptionalDefs(MIB);
2845 
2846   return DestReg2;
2847 }
2848 
2849 namespace llvm {
2850   FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
2851                                 const TargetLibraryInfo *libInfo) {
2852     // Completely untested on non-iOS.
2853     const TargetMachine &TM = funcInfo.MF->getTarget();
2854 
2855     // Darwin and thumb1 only for now.
2856     const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
2857     if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only())
2858       return new ARMFastISel(funcInfo, libInfo);
2859     return 0;
2860   }
2861 }
2862