1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the ARM-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // ARMGenFastISel.inc, which is #included here.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "ARM.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMCallingConv.h"
19 #include "ARMRegisterInfo.h"
20 #include "ARMTargetMachine.h"
21 #include "ARMSubtarget.h"
22 #include "ARMConstantPoolValue.h"
23 #include "MCTargetDesc/ARMAddressingModes.h"
24 #include "llvm/CallingConv.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/GlobalVariable.h"
27 #include "llvm/Instructions.h"
28 #include "llvm/IntrinsicInst.h"
29 #include "llvm/Module.h"
30 #include "llvm/Operator.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/FastISel.h"
33 #include "llvm/CodeGen/FunctionLoweringInfo.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineModuleInfo.h"
36 #include "llvm/CodeGen/MachineConstantPool.h"
37 #include "llvm/CodeGen/MachineFrameInfo.h"
38 #include "llvm/CodeGen/MachineMemOperand.h"
39 #include "llvm/CodeGen/MachineRegisterInfo.h"
40 #include "llvm/Support/CallSite.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/GetElementPtrTypeIterator.h"
44 #include "llvm/Target/TargetData.h"
45 #include "llvm/Target/TargetInstrInfo.h"
46 #include "llvm/Target/TargetLowering.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include "llvm/Target/TargetOptions.h"
49 using namespace llvm;
50 
51 static cl::opt<bool>
52 DisableARMFastISel("disable-arm-fast-isel",
53                     cl::desc("Turn off experimental ARM fast-isel support"),
54                     cl::init(false), cl::Hidden);
55 
56 extern cl::opt<bool> EnableARMLongCalls;
57 
58 namespace {
59 
60   // All possible address modes, plus some.
61   typedef struct Address {
62     enum {
63       RegBase,
64       FrameIndexBase
65     } BaseType;
66 
67     union {
68       unsigned Reg;
69       int FI;
70     } Base;
71 
72     int Offset;
73 
74     // Innocuous defaults for our address.
75     Address()
76      : BaseType(RegBase), Offset(0) {
77        Base.Reg = 0;
78      }
79   } Address;
80 
81 class ARMFastISel : public FastISel {
82 
83   /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
84   /// make the right decision when generating code for different targets.
85   const ARMSubtarget *Subtarget;
86   const TargetMachine &TM;
87   const TargetInstrInfo &TII;
88   const TargetLowering &TLI;
89   ARMFunctionInfo *AFI;
90 
91   // Convenience variables to avoid some queries.
92   bool isThumb2;
93   LLVMContext *Context;
94 
95   public:
96     explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
97     : FastISel(funcInfo),
98       TM(funcInfo.MF->getTarget()),
99       TII(*TM.getInstrInfo()),
100       TLI(*TM.getTargetLowering()) {
101       Subtarget = &TM.getSubtarget<ARMSubtarget>();
102       AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
103       isThumb2 = AFI->isThumbFunction();
104       Context = &funcInfo.Fn->getContext();
105     }
106 
107     // Code from FastISel.cpp.
108     virtual unsigned FastEmitInst_(unsigned MachineInstOpcode,
109                                    const TargetRegisterClass *RC);
110     virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
111                                     const TargetRegisterClass *RC,
112                                     unsigned Op0, bool Op0IsKill);
113     virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
114                                      const TargetRegisterClass *RC,
115                                      unsigned Op0, bool Op0IsKill,
116                                      unsigned Op1, bool Op1IsKill);
117     virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
118                                       const TargetRegisterClass *RC,
119                                       unsigned Op0, bool Op0IsKill,
120                                       unsigned Op1, bool Op1IsKill,
121                                       unsigned Op2, bool Op2IsKill);
122     virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
123                                      const TargetRegisterClass *RC,
124                                      unsigned Op0, bool Op0IsKill,
125                                      uint64_t Imm);
126     virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
127                                      const TargetRegisterClass *RC,
128                                      unsigned Op0, bool Op0IsKill,
129                                      const ConstantFP *FPImm);
130     virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
131                                       const TargetRegisterClass *RC,
132                                       unsigned Op0, bool Op0IsKill,
133                                       unsigned Op1, bool Op1IsKill,
134                                       uint64_t Imm);
135     virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
136                                     const TargetRegisterClass *RC,
137                                     uint64_t Imm);
138     virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode,
139                                      const TargetRegisterClass *RC,
140                                      uint64_t Imm1, uint64_t Imm2);
141 
142     virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
143                                                 unsigned Op0, bool Op0IsKill,
144                                                 uint32_t Idx);
145 
146     // Backend specific FastISel code.
147     virtual bool TargetSelectInstruction(const Instruction *I);
148     virtual unsigned TargetMaterializeConstant(const Constant *C);
149     virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
150     virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
151                                const LoadInst *LI);
152 
153   #include "ARMGenFastISel.inc"
154 
155     // Instruction selection routines.
156   private:
157     bool SelectLoad(const Instruction *I);
158     bool SelectStore(const Instruction *I);
159     bool SelectBranch(const Instruction *I);
160     bool SelectCmp(const Instruction *I);
161     bool SelectFPExt(const Instruction *I);
162     bool SelectFPTrunc(const Instruction *I);
163     bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode);
164     bool SelectSIToFP(const Instruction *I);
165     bool SelectFPToSI(const Instruction *I);
166     bool SelectSDiv(const Instruction *I);
167     bool SelectSRem(const Instruction *I);
168     bool SelectCall(const Instruction *I, const char *IntrMemName);
169     bool SelectIntrinsicCall(const IntrinsicInst &I);
170     bool SelectSelect(const Instruction *I);
171     bool SelectRet(const Instruction *I);
172     bool SelectTrunc(const Instruction *I);
173     bool SelectIntExt(const Instruction *I);
174 
175     // Utility routines.
176   private:
177     bool isTypeLegal(Type *Ty, MVT &VT);
178     bool isLoadTypeLegal(Type *Ty, MVT &VT);
179     bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
180                     bool isZExt);
181     bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, bool isZExt,
182                      bool allocReg);
183 
184     bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr);
185     bool ARMComputeAddress(const Value *Obj, Address &Addr);
186     void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
187     bool ARMIsMemCpySmall(uint64_t Len);
188     bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len);
189     unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
190     unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
191     unsigned ARMMaterializeInt(const Constant *C, EVT VT);
192     unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
193     unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
194     unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
195     unsigned ARMSelectCallOp(const GlobalValue *GV);
196 
197     // Call handling routines.
198   private:
199     CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return);
200     bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
201                          SmallVectorImpl<unsigned> &ArgRegs,
202                          SmallVectorImpl<MVT> &ArgVTs,
203                          SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
204                          SmallVectorImpl<unsigned> &RegArgs,
205                          CallingConv::ID CC,
206                          unsigned &NumBytes);
207     bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
208                     const Instruction *I, CallingConv::ID CC,
209                     unsigned &NumBytes);
210     bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
211 
212     // OptionalDef handling routines.
213   private:
214     bool isARMNEONPred(const MachineInstr *MI);
215     bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
216     const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
217     void AddLoadStoreOperands(EVT VT, Address &Addr,
218                               const MachineInstrBuilder &MIB,
219                               unsigned Flags, bool useAM3);
220 };
221 
222 } // end anonymous namespace
223 
224 #include "ARMGenCallingConv.inc"
225 
226 // DefinesOptionalPredicate - This is different from DefinesPredicate in that
227 // we don't care about implicit defs here, just places we'll need to add a
228 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
229 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
230   const MCInstrDesc &MCID = MI->getDesc();
231   if (!MCID.hasOptionalDef())
232     return false;
233 
234   // Look to see if our OptionalDef is defining CPSR or CCR.
235   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236     const MachineOperand &MO = MI->getOperand(i);
237     if (!MO.isReg() || !MO.isDef()) continue;
238     if (MO.getReg() == ARM::CPSR)
239       *CPSR = true;
240   }
241   return true;
242 }
243 
244 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
245   const MCInstrDesc &MCID = MI->getDesc();
246 
247   // If we're a thumb2 or not NEON function we were handled via isPredicable.
248   if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
249        AFI->isThumb2Function())
250     return false;
251 
252   for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
253     if (MCID.OpInfo[i].isPredicate())
254       return true;
255 
256   return false;
257 }
258 
259 // If the machine is predicable go ahead and add the predicate operands, if
260 // it needs default CC operands add those.
261 // TODO: If we want to support thumb1 then we'll need to deal with optional
262 // CPSR defs that need to be added before the remaining operands. See s_cc_out
263 // for descriptions why.
264 const MachineInstrBuilder &
265 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
266   MachineInstr *MI = &*MIB;
267 
268   // Do we use a predicate? or...
269   // Are we NEON in ARM mode and have a predicate operand? If so, I know
270   // we're not predicable but add it anyways.
271   if (TII.isPredicable(MI) || isARMNEONPred(MI))
272     AddDefaultPred(MIB);
273 
274   // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
275   // defines CPSR. All other OptionalDefines in ARM are the CCR register.
276   bool CPSR = false;
277   if (DefinesOptionalPredicate(MI, &CPSR)) {
278     if (CPSR)
279       AddDefaultT1CC(MIB);
280     else
281       AddDefaultCC(MIB);
282   }
283   return MIB;
284 }
285 
286 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
287                                     const TargetRegisterClass* RC) {
288   unsigned ResultReg = createResultReg(RC);
289   const MCInstrDesc &II = TII.get(MachineInstOpcode);
290 
291   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
292   return ResultReg;
293 }
294 
295 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
296                                      const TargetRegisterClass *RC,
297                                      unsigned Op0, bool Op0IsKill) {
298   unsigned ResultReg = createResultReg(RC);
299   const MCInstrDesc &II = TII.get(MachineInstOpcode);
300 
301   if (II.getNumDefs() >= 1)
302     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
303                    .addReg(Op0, Op0IsKill * RegState::Kill));
304   else {
305     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
306                    .addReg(Op0, Op0IsKill * RegState::Kill));
307     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
308                    TII.get(TargetOpcode::COPY), ResultReg)
309                    .addReg(II.ImplicitDefs[0]));
310   }
311   return ResultReg;
312 }
313 
314 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
315                                       const TargetRegisterClass *RC,
316                                       unsigned Op0, bool Op0IsKill,
317                                       unsigned Op1, bool Op1IsKill) {
318   unsigned ResultReg = createResultReg(RC);
319   const MCInstrDesc &II = TII.get(MachineInstOpcode);
320 
321   if (II.getNumDefs() >= 1)
322     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
323                    .addReg(Op0, Op0IsKill * RegState::Kill)
324                    .addReg(Op1, Op1IsKill * RegState::Kill));
325   else {
326     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
327                    .addReg(Op0, Op0IsKill * RegState::Kill)
328                    .addReg(Op1, Op1IsKill * RegState::Kill));
329     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
330                            TII.get(TargetOpcode::COPY), ResultReg)
331                    .addReg(II.ImplicitDefs[0]));
332   }
333   return ResultReg;
334 }
335 
336 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
337                                        const TargetRegisterClass *RC,
338                                        unsigned Op0, bool Op0IsKill,
339                                        unsigned Op1, bool Op1IsKill,
340                                        unsigned Op2, bool Op2IsKill) {
341   unsigned ResultReg = createResultReg(RC);
342   const MCInstrDesc &II = TII.get(MachineInstOpcode);
343 
344   if (II.getNumDefs() >= 1)
345     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
346                    .addReg(Op0, Op0IsKill * RegState::Kill)
347                    .addReg(Op1, Op1IsKill * RegState::Kill)
348                    .addReg(Op2, Op2IsKill * RegState::Kill));
349   else {
350     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
351                    .addReg(Op0, Op0IsKill * RegState::Kill)
352                    .addReg(Op1, Op1IsKill * RegState::Kill)
353                    .addReg(Op2, Op2IsKill * RegState::Kill));
354     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
355                            TII.get(TargetOpcode::COPY), ResultReg)
356                    .addReg(II.ImplicitDefs[0]));
357   }
358   return ResultReg;
359 }
360 
361 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
362                                       const TargetRegisterClass *RC,
363                                       unsigned Op0, bool Op0IsKill,
364                                       uint64_t Imm) {
365   unsigned ResultReg = createResultReg(RC);
366   const MCInstrDesc &II = TII.get(MachineInstOpcode);
367 
368   if (II.getNumDefs() >= 1)
369     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
370                    .addReg(Op0, Op0IsKill * RegState::Kill)
371                    .addImm(Imm));
372   else {
373     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
374                    .addReg(Op0, Op0IsKill * RegState::Kill)
375                    .addImm(Imm));
376     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
377                            TII.get(TargetOpcode::COPY), ResultReg)
378                    .addReg(II.ImplicitDefs[0]));
379   }
380   return ResultReg;
381 }
382 
383 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
384                                       const TargetRegisterClass *RC,
385                                       unsigned Op0, bool Op0IsKill,
386                                       const ConstantFP *FPImm) {
387   unsigned ResultReg = createResultReg(RC);
388   const MCInstrDesc &II = TII.get(MachineInstOpcode);
389 
390   if (II.getNumDefs() >= 1)
391     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
392                    .addReg(Op0, Op0IsKill * RegState::Kill)
393                    .addFPImm(FPImm));
394   else {
395     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
396                    .addReg(Op0, Op0IsKill * RegState::Kill)
397                    .addFPImm(FPImm));
398     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
399                            TII.get(TargetOpcode::COPY), ResultReg)
400                    .addReg(II.ImplicitDefs[0]));
401   }
402   return ResultReg;
403 }
404 
405 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
406                                        const TargetRegisterClass *RC,
407                                        unsigned Op0, bool Op0IsKill,
408                                        unsigned Op1, bool Op1IsKill,
409                                        uint64_t Imm) {
410   unsigned ResultReg = createResultReg(RC);
411   const MCInstrDesc &II = TII.get(MachineInstOpcode);
412 
413   if (II.getNumDefs() >= 1)
414     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
415                    .addReg(Op0, Op0IsKill * RegState::Kill)
416                    .addReg(Op1, Op1IsKill * RegState::Kill)
417                    .addImm(Imm));
418   else {
419     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
420                    .addReg(Op0, Op0IsKill * RegState::Kill)
421                    .addReg(Op1, Op1IsKill * RegState::Kill)
422                    .addImm(Imm));
423     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
424                            TII.get(TargetOpcode::COPY), ResultReg)
425                    .addReg(II.ImplicitDefs[0]));
426   }
427   return ResultReg;
428 }
429 
430 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
431                                      const TargetRegisterClass *RC,
432                                      uint64_t Imm) {
433   unsigned ResultReg = createResultReg(RC);
434   const MCInstrDesc &II = TII.get(MachineInstOpcode);
435 
436   if (II.getNumDefs() >= 1)
437     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
438                    .addImm(Imm));
439   else {
440     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
441                    .addImm(Imm));
442     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
443                            TII.get(TargetOpcode::COPY), ResultReg)
444                    .addReg(II.ImplicitDefs[0]));
445   }
446   return ResultReg;
447 }
448 
449 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
450                                       const TargetRegisterClass *RC,
451                                       uint64_t Imm1, uint64_t Imm2) {
452   unsigned ResultReg = createResultReg(RC);
453   const MCInstrDesc &II = TII.get(MachineInstOpcode);
454 
455   if (II.getNumDefs() >= 1)
456     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
457                     .addImm(Imm1).addImm(Imm2));
458   else {
459     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
460                     .addImm(Imm1).addImm(Imm2));
461     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
462                             TII.get(TargetOpcode::COPY),
463                             ResultReg)
464                     .addReg(II.ImplicitDefs[0]));
465   }
466   return ResultReg;
467 }
468 
469 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
470                                                  unsigned Op0, bool Op0IsKill,
471                                                  uint32_t Idx) {
472   unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
473   assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
474          "Cannot yet extract from physregs");
475   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
476                          DL, TII.get(TargetOpcode::COPY), ResultReg)
477                  .addReg(Op0, getKillRegState(Op0IsKill), Idx));
478   return ResultReg;
479 }
480 
481 // TODO: Don't worry about 64-bit now, but when this is fixed remove the
482 // checks from the various callers.
483 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
484   if (VT == MVT::f64) return 0;
485 
486   unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
487   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
488                           TII.get(ARM::VMOVRS), MoveReg)
489                   .addReg(SrcReg));
490   return MoveReg;
491 }
492 
493 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
494   if (VT == MVT::i64) return 0;
495 
496   unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
497   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
498                           TII.get(ARM::VMOVSR), MoveReg)
499                   .addReg(SrcReg));
500   return MoveReg;
501 }
502 
503 // For double width floating point we need to materialize two constants
504 // (the high and the low) into integer registers then use a move to get
505 // the combined constant into an FP reg.
506 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
507   const APFloat Val = CFP->getValueAPF();
508   bool is64bit = VT == MVT::f64;
509 
510   // This checks to see if we can use VFP3 instructions to materialize
511   // a constant, otherwise we have to go through the constant pool.
512   if (TLI.isFPImmLegal(Val, VT)) {
513     int Imm;
514     unsigned Opc;
515     if (is64bit) {
516       Imm = ARM_AM::getFP64Imm(Val);
517       Opc = ARM::FCONSTD;
518     } else {
519       Imm = ARM_AM::getFP32Imm(Val);
520       Opc = ARM::FCONSTS;
521     }
522     unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
523     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
524                             DestReg)
525                     .addImm(Imm));
526     return DestReg;
527   }
528 
529   // Require VFP2 for loading fp constants.
530   if (!Subtarget->hasVFP2()) return false;
531 
532   // MachineConstantPool wants an explicit alignment.
533   unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
534   if (Align == 0) {
535     // TODO: Figure out if this is correct.
536     Align = TD.getTypeAllocSize(CFP->getType());
537   }
538   unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
539   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
540   unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
541 
542   // The extra reg is for addrmode5.
543   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
544                           DestReg)
545                   .addConstantPoolIndex(Idx)
546                   .addReg(0));
547   return DestReg;
548 }
549 
550 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
551 
552   if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
553     return false;
554 
555   // If we can do this in a single instruction without a constant pool entry
556   // do so now.
557   const ConstantInt *CI = cast<ConstantInt>(C);
558   if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
559     unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
560     unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
561     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
562                             TII.get(Opc), ImmReg)
563                     .addImm(CI->getZExtValue()));
564     return ImmReg;
565   }
566 
567   // Use MVN to emit negative constants.
568   if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
569     unsigned Imm = (unsigned)~(CI->getSExtValue());
570     bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
571       (ARM_AM::getSOImmVal(Imm) != -1);
572     if (UseImm) {
573       unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
574       unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
575       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
576                               TII.get(Opc), ImmReg)
577                       .addImm(Imm));
578       return ImmReg;
579     }
580   }
581 
582   // Load from constant pool.  For now 32-bit only.
583   if (VT != MVT::i32)
584     return false;
585 
586   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
587 
588   // MachineConstantPool wants an explicit alignment.
589   unsigned Align = TD.getPrefTypeAlignment(C->getType());
590   if (Align == 0) {
591     // TODO: Figure out if this is correct.
592     Align = TD.getTypeAllocSize(C->getType());
593   }
594   unsigned Idx = MCP.getConstantPoolIndex(C, Align);
595 
596   if (isThumb2)
597     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
598                             TII.get(ARM::t2LDRpci), DestReg)
599                     .addConstantPoolIndex(Idx));
600   else
601     // The extra immediate is for addrmode2.
602     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
603                             TII.get(ARM::LDRcp), DestReg)
604                     .addConstantPoolIndex(Idx)
605                     .addImm(0));
606 
607   return DestReg;
608 }
609 
610 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
611   // For now 32-bit only.
612   if (VT != MVT::i32) return 0;
613 
614   Reloc::Model RelocM = TM.getRelocationModel();
615 
616   // TODO: Need more magic for ARM PIC.
617   if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0;
618 
619   // MachineConstantPool wants an explicit alignment.
620   unsigned Align = TD.getPrefTypeAlignment(GV->getType());
621   if (Align == 0) {
622     // TODO: Figure out if this is correct.
623     Align = TD.getTypeAllocSize(GV->getType());
624   }
625 
626   // Grab index.
627   unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8);
628   unsigned Id = AFI->createPICLabelUId();
629   ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
630                                                               ARMCP::CPValue,
631                                                               PCAdj);
632   unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
633 
634   // Load value.
635   MachineInstrBuilder MIB;
636   unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
637   if (isThumb2) {
638     unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
639     MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
640           .addConstantPoolIndex(Idx);
641     if (RelocM == Reloc::PIC_)
642       MIB.addImm(Id);
643   } else {
644     // The extra immediate is for addrmode2.
645     MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
646                   DestReg)
647           .addConstantPoolIndex(Idx)
648           .addImm(0);
649   }
650   AddOptionalDefs(MIB);
651 
652   if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) {
653     unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
654     if (isThumb2)
655       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
656                     TII.get(ARM::t2LDRi12), NewDestReg)
657             .addReg(DestReg)
658             .addImm(0);
659     else
660       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12),
661                     NewDestReg)
662             .addReg(DestReg)
663             .addImm(0);
664     DestReg = NewDestReg;
665     AddOptionalDefs(MIB);
666   }
667 
668   return DestReg;
669 }
670 
671 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
672   EVT VT = TLI.getValueType(C->getType(), true);
673 
674   // Only handle simple types.
675   if (!VT.isSimple()) return 0;
676 
677   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
678     return ARMMaterializeFP(CFP, VT);
679   else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
680     return ARMMaterializeGV(GV, VT);
681   else if (isa<ConstantInt>(C))
682     return ARMMaterializeInt(C, VT);
683 
684   return 0;
685 }
686 
687 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
688 
689 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
690   // Don't handle dynamic allocas.
691   if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
692 
693   MVT VT;
694   if (!isLoadTypeLegal(AI->getType(), VT)) return false;
695 
696   DenseMap<const AllocaInst*, int>::iterator SI =
697     FuncInfo.StaticAllocaMap.find(AI);
698 
699   // This will get lowered later into the correct offsets and registers
700   // via rewriteXFrameIndex.
701   if (SI != FuncInfo.StaticAllocaMap.end()) {
702     TargetRegisterClass* RC = TLI.getRegClassFor(VT);
703     unsigned ResultReg = createResultReg(RC);
704     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
705     AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL,
706                             TII.get(Opc), ResultReg)
707                             .addFrameIndex(SI->second)
708                             .addImm(0));
709     return ResultReg;
710   }
711 
712   return 0;
713 }
714 
715 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
716   EVT evt = TLI.getValueType(Ty, true);
717 
718   // Only handle simple types.
719   if (evt == MVT::Other || !evt.isSimple()) return false;
720   VT = evt.getSimpleVT();
721 
722   // Handle all legal types, i.e. a register that will directly hold this
723   // value.
724   return TLI.isTypeLegal(VT);
725 }
726 
727 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
728   if (isTypeLegal(Ty, VT)) return true;
729 
730   // If this is a type than can be sign or zero-extended to a basic operation
731   // go ahead and accept it now.
732   if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
733     return true;
734 
735   return false;
736 }
737 
738 // Computes the address to get to an object.
739 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
740   // Some boilerplate from the X86 FastISel.
741   const User *U = NULL;
742   unsigned Opcode = Instruction::UserOp1;
743   if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
744     // Don't walk into other basic blocks unless the object is an alloca from
745     // another block, otherwise it may not have a virtual register assigned.
746     if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
747         FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
748       Opcode = I->getOpcode();
749       U = I;
750     }
751   } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
752     Opcode = C->getOpcode();
753     U = C;
754   }
755 
756   if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
757     if (Ty->getAddressSpace() > 255)
758       // Fast instruction selection doesn't support the special
759       // address spaces.
760       return false;
761 
762   switch (Opcode) {
763     default:
764     break;
765     case Instruction::BitCast: {
766       // Look through bitcasts.
767       return ARMComputeAddress(U->getOperand(0), Addr);
768     }
769     case Instruction::IntToPtr: {
770       // Look past no-op inttoptrs.
771       if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
772         return ARMComputeAddress(U->getOperand(0), Addr);
773       break;
774     }
775     case Instruction::PtrToInt: {
776       // Look past no-op ptrtoints.
777       if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
778         return ARMComputeAddress(U->getOperand(0), Addr);
779       break;
780     }
781     case Instruction::GetElementPtr: {
782       Address SavedAddr = Addr;
783       int TmpOffset = Addr.Offset;
784 
785       // Iterate through the GEP folding the constants into offsets where
786       // we can.
787       gep_type_iterator GTI = gep_type_begin(U);
788       for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
789            i != e; ++i, ++GTI) {
790         const Value *Op = *i;
791         if (StructType *STy = dyn_cast<StructType>(*GTI)) {
792           const StructLayout *SL = TD.getStructLayout(STy);
793           unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
794           TmpOffset += SL->getElementOffset(Idx);
795         } else {
796           uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
797           for (;;) {
798             if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
799               // Constant-offset addressing.
800               TmpOffset += CI->getSExtValue() * S;
801               break;
802             }
803             if (isa<AddOperator>(Op) &&
804                 (!isa<Instruction>(Op) ||
805                  FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
806                  == FuncInfo.MBB) &&
807                 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
808               // An add (in the same block) with a constant operand. Fold the
809               // constant.
810               ConstantInt *CI =
811               cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
812               TmpOffset += CI->getSExtValue() * S;
813               // Iterate on the other operand.
814               Op = cast<AddOperator>(Op)->getOperand(0);
815               continue;
816             }
817             // Unsupported
818             goto unsupported_gep;
819           }
820         }
821       }
822 
823       // Try to grab the base operand now.
824       Addr.Offset = TmpOffset;
825       if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
826 
827       // We failed, restore everything and try the other options.
828       Addr = SavedAddr;
829 
830       unsupported_gep:
831       break;
832     }
833     case Instruction::Alloca: {
834       const AllocaInst *AI = cast<AllocaInst>(Obj);
835       DenseMap<const AllocaInst*, int>::iterator SI =
836         FuncInfo.StaticAllocaMap.find(AI);
837       if (SI != FuncInfo.StaticAllocaMap.end()) {
838         Addr.BaseType = Address::FrameIndexBase;
839         Addr.Base.FI = SI->second;
840         return true;
841       }
842       break;
843     }
844   }
845 
846   // Materialize the global variable's address into a reg which can
847   // then be used later to load the variable.
848   if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
849     unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType()));
850     if (Tmp == 0) return false;
851 
852     Addr.Base.Reg = Tmp;
853     return true;
854   }
855 
856   // Try to get this in a register if nothing else has worked.
857   if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
858   return Addr.Base.Reg != 0;
859 }
860 
861 void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
862 
863   assert(VT.isSimple() && "Non-simple types are invalid here!");
864 
865   bool needsLowering = false;
866   switch (VT.getSimpleVT().SimpleTy) {
867     default:
868       assert(false && "Unhandled load/store type!");
869       break;
870     case MVT::i1:
871     case MVT::i8:
872     case MVT::i16:
873     case MVT::i32:
874       if (!useAM3) {
875         // Integer loads/stores handle 12-bit offsets.
876         needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
877         // Handle negative offsets.
878         if (needsLowering && isThumb2)
879           needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
880                             Addr.Offset > -256);
881       } else {
882         // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
883         needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
884       }
885       break;
886     case MVT::f32:
887     case MVT::f64:
888       // Floating point operands handle 8-bit offsets.
889       needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
890       break;
891   }
892 
893   // If this is a stack pointer and the offset needs to be simplified then
894   // put the alloca address into a register, set the base type back to
895   // register and continue. This should almost never happen.
896   if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
897     TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass :
898                               ARM::GPRRegisterClass;
899     unsigned ResultReg = createResultReg(RC);
900     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
901     AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL,
902                             TII.get(Opc), ResultReg)
903                             .addFrameIndex(Addr.Base.FI)
904                             .addImm(0));
905     Addr.Base.Reg = ResultReg;
906     Addr.BaseType = Address::RegBase;
907   }
908 
909   // Since the offset is too large for the load/store instruction
910   // get the reg+offset into a register.
911   if (needsLowering) {
912     Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
913                                  /*Op0IsKill*/false, Addr.Offset, MVT::i32);
914     Addr.Offset = 0;
915   }
916 }
917 
918 void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
919                                        const MachineInstrBuilder &MIB,
920                                        unsigned Flags, bool useAM3) {
921   // addrmode5 output depends on the selection dag addressing dividing the
922   // offset by 4 that it then later multiplies. Do this here as well.
923   if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
924       VT.getSimpleVT().SimpleTy == MVT::f64)
925     Addr.Offset /= 4;
926 
927   // Frame base works a bit differently. Handle it separately.
928   if (Addr.BaseType == Address::FrameIndexBase) {
929     int FI = Addr.Base.FI;
930     int Offset = Addr.Offset;
931     MachineMemOperand *MMO =
932           FuncInfo.MF->getMachineMemOperand(
933                                   MachinePointerInfo::getFixedStack(FI, Offset),
934                                   Flags,
935                                   MFI.getObjectSize(FI),
936                                   MFI.getObjectAlignment(FI));
937     // Now add the rest of the operands.
938     MIB.addFrameIndex(FI);
939 
940     // ARM halfword load/stores and signed byte loads need an additional operand.
941     if (useAM3) {
942       signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
943       MIB.addReg(0);
944       MIB.addImm(Imm);
945     } else {
946       MIB.addImm(Addr.Offset);
947     }
948     MIB.addMemOperand(MMO);
949   } else {
950     // Now add the rest of the operands.
951     MIB.addReg(Addr.Base.Reg);
952 
953     // ARM halfword load/stores and signed byte loads need an additional operand.
954     if (useAM3) {
955       signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
956       MIB.addReg(0);
957       MIB.addImm(Imm);
958     } else {
959       MIB.addImm(Addr.Offset);
960     }
961   }
962   AddOptionalDefs(MIB);
963 }
964 
965 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
966                               bool isZExt = true, bool allocReg = true) {
967   assert(VT.isSimple() && "Non-simple types are invalid here!");
968   unsigned Opc;
969   bool useAM3 = false;
970   TargetRegisterClass *RC;
971   switch (VT.getSimpleVT().SimpleTy) {
972     // This is mostly going to be Neon/vector support.
973     default: return false;
974     case MVT::i1:
975     case MVT::i8:
976       if (isThumb2) {
977         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
978           Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
979         else
980           Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
981       } else {
982         if (isZExt) {
983           Opc = ARM::LDRBi12;
984         } else {
985           Opc = ARM::LDRSB;
986           useAM3 = true;
987         }
988       }
989       RC = ARM::GPRRegisterClass;
990       break;
991     case MVT::i16:
992       if (isThumb2) {
993         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
994           Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
995         else
996           Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
997       } else {
998         Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
999         useAM3 = true;
1000       }
1001       RC = ARM::GPRRegisterClass;
1002       break;
1003     case MVT::i32:
1004       if (isThumb2) {
1005         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1006           Opc = ARM::t2LDRi8;
1007         else
1008           Opc = ARM::t2LDRi12;
1009       } else {
1010         Opc = ARM::LDRi12;
1011       }
1012       RC = ARM::GPRRegisterClass;
1013       break;
1014     case MVT::f32:
1015       Opc = ARM::VLDRS;
1016       RC = TLI.getRegClassFor(VT);
1017       break;
1018     case MVT::f64:
1019       Opc = ARM::VLDRD;
1020       RC = TLI.getRegClassFor(VT);
1021       break;
1022   }
1023   // Simplify this down to something we can handle.
1024   ARMSimplifyAddress(Addr, VT, useAM3);
1025 
1026   // Create the base instruction, then add the operands.
1027   if (allocReg)
1028     ResultReg = createResultReg(RC);
1029   assert (ResultReg > 255 && "Expected an allocated virtual register.");
1030   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1031                                     TII.get(Opc), ResultReg);
1032   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
1033   return true;
1034 }
1035 
1036 bool ARMFastISel::SelectLoad(const Instruction *I) {
1037   // Atomic loads need special handling.
1038   if (cast<LoadInst>(I)->isAtomic())
1039     return false;
1040 
1041   // Verify we have a legal type before going any further.
1042   MVT VT;
1043   if (!isLoadTypeLegal(I->getType(), VT))
1044     return false;
1045 
1046   // See if we can handle this address.
1047   Address Addr;
1048   if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
1049 
1050   unsigned ResultReg;
1051   if (!ARMEmitLoad(VT, ResultReg, Addr)) return false;
1052   UpdateValueMap(I, ResultReg);
1053   return true;
1054 }
1055 
1056 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
1057   unsigned StrOpc;
1058   bool useAM3 = false;
1059   switch (VT.getSimpleVT().SimpleTy) {
1060     // This is mostly going to be Neon/vector support.
1061     default: return false;
1062     case MVT::i1: {
1063       unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass :
1064                                                ARM::GPRRegisterClass);
1065       unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1066       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1067                               TII.get(Opc), Res)
1068                       .addReg(SrcReg).addImm(1));
1069       SrcReg = Res;
1070     } // Fallthrough here.
1071     case MVT::i8:
1072       if (isThumb2) {
1073         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1074           StrOpc = ARM::t2STRBi8;
1075         else
1076           StrOpc = ARM::t2STRBi12;
1077       } else {
1078         StrOpc = ARM::STRBi12;
1079       }
1080       break;
1081     case MVT::i16:
1082       if (isThumb2) {
1083         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1084           StrOpc = ARM::t2STRHi8;
1085         else
1086           StrOpc = ARM::t2STRHi12;
1087       } else {
1088         StrOpc = ARM::STRH;
1089         useAM3 = true;
1090       }
1091       break;
1092     case MVT::i32:
1093       if (isThumb2) {
1094         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1095           StrOpc = ARM::t2STRi8;
1096         else
1097           StrOpc = ARM::t2STRi12;
1098       } else {
1099         StrOpc = ARM::STRi12;
1100       }
1101       break;
1102     case MVT::f32:
1103       if (!Subtarget->hasVFP2()) return false;
1104       StrOpc = ARM::VSTRS;
1105       break;
1106     case MVT::f64:
1107       if (!Subtarget->hasVFP2()) return false;
1108       StrOpc = ARM::VSTRD;
1109       break;
1110   }
1111   // Simplify this down to something we can handle.
1112   ARMSimplifyAddress(Addr, VT, useAM3);
1113 
1114   // Create the base instruction, then add the operands.
1115   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1116                                     TII.get(StrOpc))
1117                             .addReg(SrcReg);
1118   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
1119   return true;
1120 }
1121 
1122 bool ARMFastISel::SelectStore(const Instruction *I) {
1123   Value *Op0 = I->getOperand(0);
1124   unsigned SrcReg = 0;
1125 
1126   // Atomic stores need special handling.
1127   if (cast<StoreInst>(I)->isAtomic())
1128     return false;
1129 
1130   // Verify we have a legal type before going any further.
1131   MVT VT;
1132   if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
1133     return false;
1134 
1135   // Get the value to be stored into a register.
1136   SrcReg = getRegForValue(Op0);
1137   if (SrcReg == 0) return false;
1138 
1139   // See if we can handle this address.
1140   Address Addr;
1141   if (!ARMComputeAddress(I->getOperand(1), Addr))
1142     return false;
1143 
1144   if (!ARMEmitStore(VT, SrcReg, Addr)) return false;
1145   return true;
1146 }
1147 
1148 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
1149   switch (Pred) {
1150     // Needs two compares...
1151     case CmpInst::FCMP_ONE:
1152     case CmpInst::FCMP_UEQ:
1153     default:
1154       // AL is our "false" for now. The other two need more compares.
1155       return ARMCC::AL;
1156     case CmpInst::ICMP_EQ:
1157     case CmpInst::FCMP_OEQ:
1158       return ARMCC::EQ;
1159     case CmpInst::ICMP_SGT:
1160     case CmpInst::FCMP_OGT:
1161       return ARMCC::GT;
1162     case CmpInst::ICMP_SGE:
1163     case CmpInst::FCMP_OGE:
1164       return ARMCC::GE;
1165     case CmpInst::ICMP_UGT:
1166     case CmpInst::FCMP_UGT:
1167       return ARMCC::HI;
1168     case CmpInst::FCMP_OLT:
1169       return ARMCC::MI;
1170     case CmpInst::ICMP_ULE:
1171     case CmpInst::FCMP_OLE:
1172       return ARMCC::LS;
1173     case CmpInst::FCMP_ORD:
1174       return ARMCC::VC;
1175     case CmpInst::FCMP_UNO:
1176       return ARMCC::VS;
1177     case CmpInst::FCMP_UGE:
1178       return ARMCC::PL;
1179     case CmpInst::ICMP_SLT:
1180     case CmpInst::FCMP_ULT:
1181       return ARMCC::LT;
1182     case CmpInst::ICMP_SLE:
1183     case CmpInst::FCMP_ULE:
1184       return ARMCC::LE;
1185     case CmpInst::FCMP_UNE:
1186     case CmpInst::ICMP_NE:
1187       return ARMCC::NE;
1188     case CmpInst::ICMP_UGE:
1189       return ARMCC::HS;
1190     case CmpInst::ICMP_ULT:
1191       return ARMCC::LO;
1192   }
1193 }
1194 
1195 bool ARMFastISel::SelectBranch(const Instruction *I) {
1196   const BranchInst *BI = cast<BranchInst>(I);
1197   MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1198   MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1199 
1200   // Simple branch support.
1201 
1202   // If we can, avoid recomputing the compare - redoing it could lead to wonky
1203   // behavior.
1204   if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1205     if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
1206 
1207       // Get the compare predicate.
1208       // Try to take advantage of fallthrough opportunities.
1209       CmpInst::Predicate Predicate = CI->getPredicate();
1210       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1211         std::swap(TBB, FBB);
1212         Predicate = CmpInst::getInversePredicate(Predicate);
1213       }
1214 
1215       ARMCC::CondCodes ARMPred = getComparePred(Predicate);
1216 
1217       // We may not handle every CC for now.
1218       if (ARMPred == ARMCC::AL) return false;
1219 
1220       // Emit the compare.
1221       if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1222         return false;
1223 
1224       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1225       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1226       .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
1227       FastEmitBranch(FBB, DL);
1228       FuncInfo.MBB->addSuccessor(TBB);
1229       return true;
1230     }
1231   } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1232     MVT SourceVT;
1233     if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1234         (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1235       unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1236       unsigned OpReg = getRegForValue(TI->getOperand(0));
1237       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1238                               TII.get(TstOpc))
1239                       .addReg(OpReg).addImm(1));
1240 
1241       unsigned CCMode = ARMCC::NE;
1242       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1243         std::swap(TBB, FBB);
1244         CCMode = ARMCC::EQ;
1245       }
1246 
1247       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1248       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1249       .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1250 
1251       FastEmitBranch(FBB, DL);
1252       FuncInfo.MBB->addSuccessor(TBB);
1253       return true;
1254     }
1255   } else if (const ConstantInt *CI =
1256              dyn_cast<ConstantInt>(BI->getCondition())) {
1257     uint64_t Imm = CI->getZExtValue();
1258     MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
1259     FastEmitBranch(Target, DL);
1260     return true;
1261   }
1262 
1263   unsigned CmpReg = getRegForValue(BI->getCondition());
1264   if (CmpReg == 0) return false;
1265 
1266   // We've been divorced from our compare!  Our block was split, and
1267   // now our compare lives in a predecessor block.  We musn't
1268   // re-compare here, as the children of the compare aren't guaranteed
1269   // live across the block boundary (we *could* check for this).
1270   // Regardless, the compare has been done in the predecessor block,
1271   // and it left a value for us in a virtual register.  Ergo, we test
1272   // the one-bit value left in the virtual register.
1273   unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1274   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
1275                   .addReg(CmpReg).addImm(1));
1276 
1277   unsigned CCMode = ARMCC::NE;
1278   if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1279     std::swap(TBB, FBB);
1280     CCMode = ARMCC::EQ;
1281   }
1282 
1283   unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1284   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
1285                   .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1286   FastEmitBranch(FBB, DL);
1287   FuncInfo.MBB->addSuccessor(TBB);
1288   return true;
1289 }
1290 
1291 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
1292                              bool isZExt) {
1293   Type *Ty = Src1Value->getType();
1294   EVT SrcVT = TLI.getValueType(Ty, true);
1295   if (!SrcVT.isSimple()) return false;
1296 
1297   bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
1298   if (isFloat && !Subtarget->hasVFP2())
1299     return false;
1300 
1301   // Check to see if the 2nd operand is a constant that we can encode directly
1302   // in the compare.
1303   int Imm = 0;
1304   bool UseImm = false;
1305   bool isNegativeImm = false;
1306   // FIXME: At -O0 we don't have anything that canonicalizes operand order.
1307   // Thus, Src1Value may be a ConstantInt, but we're missing it.
1308   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1309     if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1310         SrcVT == MVT::i1) {
1311       const APInt &CIVal = ConstInt->getValue();
1312       Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
1313       if (Imm < 0) {
1314         isNegativeImm = true;
1315         Imm = -Imm;
1316       }
1317       UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1318         (ARM_AM::getSOImmVal(Imm) != -1);
1319     }
1320   } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1321     if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1322       if (ConstFP->isZero() && !ConstFP->isNegative())
1323         UseImm = true;
1324   }
1325 
1326   unsigned CmpOpc;
1327   bool isICmp = true;
1328   bool needsExt = false;
1329   switch (SrcVT.getSimpleVT().SimpleTy) {
1330     default: return false;
1331     // TODO: Verify compares.
1332     case MVT::f32:
1333       isICmp = false;
1334       CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
1335       break;
1336     case MVT::f64:
1337       isICmp = false;
1338       CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
1339       break;
1340     case MVT::i1:
1341     case MVT::i8:
1342     case MVT::i16:
1343       needsExt = true;
1344     // Intentional fall-through.
1345     case MVT::i32:
1346       if (isThumb2) {
1347         if (!UseImm)
1348           CmpOpc = ARM::t2CMPrr;
1349         else
1350           CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri;
1351       } else {
1352         if (!UseImm)
1353           CmpOpc = ARM::CMPrr;
1354         else
1355           CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri;
1356       }
1357       break;
1358   }
1359 
1360   unsigned SrcReg1 = getRegForValue(Src1Value);
1361   if (SrcReg1 == 0) return false;
1362 
1363   unsigned SrcReg2 = 0;
1364   if (!UseImm) {
1365     SrcReg2 = getRegForValue(Src2Value);
1366     if (SrcReg2 == 0) return false;
1367   }
1368 
1369   // We have i1, i8, or i16, we need to either zero extend or sign extend.
1370   if (needsExt) {
1371     unsigned ResultReg;
1372     ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1373     if (ResultReg == 0) return false;
1374     SrcReg1 = ResultReg;
1375     if (!UseImm) {
1376       ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1377       if (ResultReg == 0) return false;
1378       SrcReg2 = ResultReg;
1379     }
1380   }
1381 
1382   if (!UseImm) {
1383     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1384                             TII.get(CmpOpc))
1385                     .addReg(SrcReg1).addReg(SrcReg2));
1386   } else {
1387     MachineInstrBuilder MIB;
1388     MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
1389       .addReg(SrcReg1);
1390 
1391     // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
1392     if (isICmp)
1393       MIB.addImm(Imm);
1394     AddOptionalDefs(MIB);
1395   }
1396 
1397   // For floating point we need to move the result to a comparison register
1398   // that we can then use for branches.
1399   if (Ty->isFloatTy() || Ty->isDoubleTy())
1400     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1401                             TII.get(ARM::FMSTAT)));
1402   return true;
1403 }
1404 
1405 bool ARMFastISel::SelectCmp(const Instruction *I) {
1406   const CmpInst *CI = cast<CmpInst>(I);
1407   Type *Ty = CI->getOperand(0)->getType();
1408 
1409   // Get the compare predicate.
1410   ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
1411 
1412   // We may not handle every CC for now.
1413   if (ARMPred == ARMCC::AL) return false;
1414 
1415   // Emit the compare.
1416   if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1417     return false;
1418 
1419   // Now set a register based on the comparison. Explicitly set the predicates
1420   // here.
1421   unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1422   TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass
1423                                     : ARM::GPRRegisterClass;
1424   unsigned DestReg = createResultReg(RC);
1425   Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
1426   unsigned ZeroReg = TargetMaterializeConstant(Zero);
1427   bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
1428   unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR;
1429   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
1430           .addReg(ZeroReg).addImm(1)
1431           .addImm(ARMPred).addReg(CondReg);
1432 
1433   UpdateValueMap(I, DestReg);
1434   return true;
1435 }
1436 
1437 bool ARMFastISel::SelectFPExt(const Instruction *I) {
1438   // Make sure we have VFP and that we're extending float to double.
1439   if (!Subtarget->hasVFP2()) return false;
1440 
1441   Value *V = I->getOperand(0);
1442   if (!I->getType()->isDoubleTy() ||
1443       !V->getType()->isFloatTy()) return false;
1444 
1445   unsigned Op = getRegForValue(V);
1446   if (Op == 0) return false;
1447 
1448   unsigned Result = createResultReg(ARM::DPRRegisterClass);
1449   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1450                           TII.get(ARM::VCVTDS), Result)
1451                   .addReg(Op));
1452   UpdateValueMap(I, Result);
1453   return true;
1454 }
1455 
1456 bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
1457   // Make sure we have VFP and that we're truncating double to float.
1458   if (!Subtarget->hasVFP2()) return false;
1459 
1460   Value *V = I->getOperand(0);
1461   if (!(I->getType()->isFloatTy() &&
1462         V->getType()->isDoubleTy())) return false;
1463 
1464   unsigned Op = getRegForValue(V);
1465   if (Op == 0) return false;
1466 
1467   unsigned Result = createResultReg(ARM::SPRRegisterClass);
1468   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1469                           TII.get(ARM::VCVTSD), Result)
1470                   .addReg(Op));
1471   UpdateValueMap(I, Result);
1472   return true;
1473 }
1474 
1475 bool ARMFastISel::SelectSIToFP(const Instruction *I) {
1476   // Make sure we have VFP.
1477   if (!Subtarget->hasVFP2()) return false;
1478 
1479   MVT DstVT;
1480   Type *Ty = I->getType();
1481   if (!isTypeLegal(Ty, DstVT))
1482     return false;
1483 
1484   Value *Src = I->getOperand(0);
1485   EVT SrcVT = TLI.getValueType(Src->getType(), true);
1486   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1487     return false;
1488 
1489   unsigned SrcReg = getRegForValue(Src);
1490   if (SrcReg == 0) return false;
1491 
1492   // Handle sign-extension.
1493   if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1494     EVT DestVT = MVT::i32;
1495     unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, /*isZExt*/ false);
1496     if (ResultReg == 0) return false;
1497     SrcReg = ResultReg;
1498   }
1499 
1500   // The conversion routine works on fp-reg to fp-reg and the operand above
1501   // was an integer, move it to the fp registers if possible.
1502   unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1503   if (FP == 0) return false;
1504 
1505   unsigned Opc;
1506   if (Ty->isFloatTy()) Opc = ARM::VSITOS;
1507   else if (Ty->isDoubleTy()) Opc = ARM::VSITOD;
1508   else return false;
1509 
1510   unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1511   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1512                           ResultReg)
1513                   .addReg(FP));
1514   UpdateValueMap(I, ResultReg);
1515   return true;
1516 }
1517 
1518 bool ARMFastISel::SelectFPToSI(const Instruction *I) {
1519   // Make sure we have VFP.
1520   if (!Subtarget->hasVFP2()) return false;
1521 
1522   MVT DstVT;
1523   Type *RetTy = I->getType();
1524   if (!isTypeLegal(RetTy, DstVT))
1525     return false;
1526 
1527   unsigned Op = getRegForValue(I->getOperand(0));
1528   if (Op == 0) return false;
1529 
1530   unsigned Opc;
1531   Type *OpTy = I->getOperand(0)->getType();
1532   if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS;
1533   else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD;
1534   else return false;
1535 
1536   // f64->s32 or f32->s32 both need an intermediate f32 reg.
1537   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1538   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
1539                           ResultReg)
1540                   .addReg(Op));
1541 
1542   // This result needs to be in an integer register, but the conversion only
1543   // takes place in fp-regs.
1544   unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1545   if (IntReg == 0) return false;
1546 
1547   UpdateValueMap(I, IntReg);
1548   return true;
1549 }
1550 
1551 bool ARMFastISel::SelectSelect(const Instruction *I) {
1552   MVT VT;
1553   if (!isTypeLegal(I->getType(), VT))
1554     return false;
1555 
1556   // Things need to be register sized for register moves.
1557   if (VT != MVT::i32) return false;
1558   const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
1559 
1560   unsigned CondReg = getRegForValue(I->getOperand(0));
1561   if (CondReg == 0) return false;
1562   unsigned Op1Reg = getRegForValue(I->getOperand(1));
1563   if (Op1Reg == 0) return false;
1564 
1565   // Check to see if we can use an immediate in the conditional move.
1566   int Imm = 0;
1567   bool UseImm = false;
1568   bool isNegativeImm = false;
1569   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
1570     assert (VT == MVT::i32 && "Expecting an i32.");
1571     Imm = (int)ConstInt->getValue().getZExtValue();
1572     if (Imm < 0) {
1573       isNegativeImm = true;
1574       Imm = ~Imm;
1575     }
1576     UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1577       (ARM_AM::getSOImmVal(Imm) != -1);
1578   }
1579 
1580   unsigned Op2Reg = 0;
1581   if (!UseImm) {
1582     Op2Reg = getRegForValue(I->getOperand(2));
1583     if (Op2Reg == 0) return false;
1584   }
1585 
1586   unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
1587   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
1588                   .addReg(CondReg).addImm(0));
1589 
1590   unsigned MovCCOpc;
1591   if (!UseImm) {
1592     MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1593   } else {
1594     if (!isNegativeImm) {
1595       MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1596     } else {
1597       MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1598     }
1599   }
1600   unsigned ResultReg = createResultReg(RC);
1601   if (!UseImm)
1602     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1603     .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
1604   else
1605     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
1606     .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
1607   UpdateValueMap(I, ResultReg);
1608   return true;
1609 }
1610 
1611 bool ARMFastISel::SelectSDiv(const Instruction *I) {
1612   MVT VT;
1613   Type *Ty = I->getType();
1614   if (!isTypeLegal(Ty, VT))
1615     return false;
1616 
1617   // If we have integer div support we should have selected this automagically.
1618   // In case we have a real miss go ahead and return false and we'll pick
1619   // it up later.
1620   if (Subtarget->hasDivide()) return false;
1621 
1622   // Otherwise emit a libcall.
1623   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1624   if (VT == MVT::i8)
1625     LC = RTLIB::SDIV_I8;
1626   else if (VT == MVT::i16)
1627     LC = RTLIB::SDIV_I16;
1628   else if (VT == MVT::i32)
1629     LC = RTLIB::SDIV_I32;
1630   else if (VT == MVT::i64)
1631     LC = RTLIB::SDIV_I64;
1632   else if (VT == MVT::i128)
1633     LC = RTLIB::SDIV_I128;
1634   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
1635 
1636   return ARMEmitLibcall(I, LC);
1637 }
1638 
1639 bool ARMFastISel::SelectSRem(const Instruction *I) {
1640   MVT VT;
1641   Type *Ty = I->getType();
1642   if (!isTypeLegal(Ty, VT))
1643     return false;
1644 
1645   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1646   if (VT == MVT::i8)
1647     LC = RTLIB::SREM_I8;
1648   else if (VT == MVT::i16)
1649     LC = RTLIB::SREM_I16;
1650   else if (VT == MVT::i32)
1651     LC = RTLIB::SREM_I32;
1652   else if (VT == MVT::i64)
1653     LC = RTLIB::SREM_I64;
1654   else if (VT == MVT::i128)
1655     LC = RTLIB::SREM_I128;
1656   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
1657 
1658   return ARMEmitLibcall(I, LC);
1659 }
1660 
1661 bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
1662   EVT VT  = TLI.getValueType(I->getType(), true);
1663 
1664   // We can get here in the case when we want to use NEON for our fp
1665   // operations, but can't figure out how to. Just use the vfp instructions
1666   // if we have them.
1667   // FIXME: It'd be nice to use NEON instructions.
1668   Type *Ty = I->getType();
1669   bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
1670   if (isFloat && !Subtarget->hasVFP2())
1671     return false;
1672 
1673   unsigned Opc;
1674   bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1675   switch (ISDOpcode) {
1676     default: return false;
1677     case ISD::FADD:
1678       Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1679       break;
1680     case ISD::FSUB:
1681       Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1682       break;
1683     case ISD::FMUL:
1684       Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1685       break;
1686   }
1687   unsigned Op1 = getRegForValue(I->getOperand(0));
1688   if (Op1 == 0) return false;
1689 
1690   unsigned Op2 = getRegForValue(I->getOperand(1));
1691   if (Op2 == 0) return false;
1692 
1693   unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
1694   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1695                           TII.get(Opc), ResultReg)
1696                   .addReg(Op1).addReg(Op2));
1697   UpdateValueMap(I, ResultReg);
1698   return true;
1699 }
1700 
1701 // Call Handling Code
1702 
1703 // This is largely taken directly from CCAssignFnForNode - we don't support
1704 // varargs in FastISel so that part has been removed.
1705 // TODO: We may not support all of this.
1706 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) {
1707   switch (CC) {
1708   default:
1709     llvm_unreachable("Unsupported calling convention");
1710   case CallingConv::Fast:
1711     // Ignore fastcc. Silence compiler warnings.
1712     (void)RetFastCC_ARM_APCS;
1713     (void)FastCC_ARM_APCS;
1714     // Fallthrough
1715   case CallingConv::C:
1716     // Use target triple & subtarget features to do actual dispatch.
1717     if (Subtarget->isAAPCS_ABI()) {
1718       if (Subtarget->hasVFP2() &&
1719           FloatABIType == FloatABI::Hard)
1720         return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1721       else
1722         return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1723     } else
1724         return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1725   case CallingConv::ARM_AAPCS_VFP:
1726     return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1727   case CallingConv::ARM_AAPCS:
1728     return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1729   case CallingConv::ARM_APCS:
1730     return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1731   }
1732 }
1733 
1734 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1735                                   SmallVectorImpl<unsigned> &ArgRegs,
1736                                   SmallVectorImpl<MVT> &ArgVTs,
1737                                   SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1738                                   SmallVectorImpl<unsigned> &RegArgs,
1739                                   CallingConv::ID CC,
1740                                   unsigned &NumBytes) {
1741   SmallVector<CCValAssign, 16> ArgLocs;
1742   CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context);
1743   CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
1744 
1745   // Get a count of how many bytes are to be pushed on the stack.
1746   NumBytes = CCInfo.getNextStackOffset();
1747 
1748   // Issue CALLSEQ_START
1749   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1750   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1751                           TII.get(AdjStackDown))
1752                   .addImm(NumBytes));
1753 
1754   // Process the args.
1755   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1756     CCValAssign &VA = ArgLocs[i];
1757     unsigned Arg = ArgRegs[VA.getValNo()];
1758     MVT ArgVT = ArgVTs[VA.getValNo()];
1759 
1760     // We don't handle NEON/vector parameters yet.
1761     if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
1762       return false;
1763 
1764     // Handle arg promotion, etc.
1765     switch (VA.getLocInfo()) {
1766       case CCValAssign::Full: break;
1767       case CCValAssign::SExt: {
1768         EVT DestVT = VA.getLocVT();
1769         unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT,
1770                                            /*isZExt*/false);
1771         assert (ResultReg != 0 && "Failed to emit a sext");
1772         Arg = ResultReg;
1773         break;
1774       }
1775       case CCValAssign::AExt:
1776         // Intentional fall-through.  Handle AExt and ZExt.
1777       case CCValAssign::ZExt: {
1778         EVT DestVT = VA.getLocVT();
1779         unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT,
1780                                            /*isZExt*/true);
1781         assert (ResultReg != 0 && "Failed to emit a sext");
1782         Arg = ResultReg;
1783         break;
1784       }
1785       case CCValAssign::BCvt: {
1786         unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
1787                                  /*TODO: Kill=*/false);
1788         assert(BC != 0 && "Failed to emit a bitcast!");
1789         Arg = BC;
1790         ArgVT = VA.getLocVT();
1791         break;
1792       }
1793       default: llvm_unreachable("Unknown arg promotion!");
1794     }
1795 
1796     // Now copy/store arg to correct locations.
1797     if (VA.isRegLoc() && !VA.needsCustom()) {
1798       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1799               VA.getLocReg())
1800         .addReg(Arg);
1801       RegArgs.push_back(VA.getLocReg());
1802     } else if (VA.needsCustom()) {
1803       // TODO: We need custom lowering for vector (v2f64) args.
1804       if (VA.getLocVT() != MVT::f64) return false;
1805 
1806       CCValAssign &NextVA = ArgLocs[++i];
1807 
1808       // TODO: Only handle register args for now.
1809       if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false;
1810 
1811       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1812                               TII.get(ARM::VMOVRRD), VA.getLocReg())
1813                       .addReg(NextVA.getLocReg(), RegState::Define)
1814                       .addReg(Arg));
1815       RegArgs.push_back(VA.getLocReg());
1816       RegArgs.push_back(NextVA.getLocReg());
1817     } else {
1818       assert(VA.isMemLoc());
1819       // Need to store on the stack.
1820       Address Addr;
1821       Addr.BaseType = Address::RegBase;
1822       Addr.Base.Reg = ARM::SP;
1823       Addr.Offset = VA.getLocMemOffset();
1824 
1825       if (!ARMEmitStore(ArgVT, Arg, Addr)) return false;
1826     }
1827   }
1828   return true;
1829 }
1830 
1831 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
1832                              const Instruction *I, CallingConv::ID CC,
1833                              unsigned &NumBytes) {
1834   // Issue CALLSEQ_END
1835   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
1836   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1837                           TII.get(AdjStackUp))
1838                   .addImm(NumBytes).addImm(0));
1839 
1840   // Now the return value.
1841   if (RetVT != MVT::isVoid) {
1842     SmallVector<CCValAssign, 16> RVLocs;
1843     CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
1844     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true));
1845 
1846     // Copy all of the result registers out of their specified physreg.
1847     if (RVLocs.size() == 2 && RetVT == MVT::f64) {
1848       // For this move we copy into two registers and then move into the
1849       // double fp reg we want.
1850       EVT DestVT = RVLocs[0].getValVT();
1851       TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
1852       unsigned ResultReg = createResultReg(DstRC);
1853       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1854                               TII.get(ARM::VMOVDRR), ResultReg)
1855                       .addReg(RVLocs[0].getLocReg())
1856                       .addReg(RVLocs[1].getLocReg()));
1857 
1858       UsedRegs.push_back(RVLocs[0].getLocReg());
1859       UsedRegs.push_back(RVLocs[1].getLocReg());
1860 
1861       // Finally update the result.
1862       UpdateValueMap(I, ResultReg);
1863     } else {
1864       assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
1865       EVT CopyVT = RVLocs[0].getValVT();
1866 
1867       // Special handling for extended integers.
1868       if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1869         CopyVT = MVT::i32;
1870 
1871       TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
1872 
1873       unsigned ResultReg = createResultReg(DstRC);
1874       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1875               ResultReg).addReg(RVLocs[0].getLocReg());
1876       UsedRegs.push_back(RVLocs[0].getLocReg());
1877 
1878       // Finally update the result.
1879       UpdateValueMap(I, ResultReg);
1880     }
1881   }
1882 
1883   return true;
1884 }
1885 
1886 bool ARMFastISel::SelectRet(const Instruction *I) {
1887   const ReturnInst *Ret = cast<ReturnInst>(I);
1888   const Function &F = *I->getParent()->getParent();
1889 
1890   if (!FuncInfo.CanLowerReturn)
1891     return false;
1892 
1893   if (F.isVarArg())
1894     return false;
1895 
1896   CallingConv::ID CC = F.getCallingConv();
1897   if (Ret->getNumOperands() > 0) {
1898     SmallVector<ISD::OutputArg, 4> Outs;
1899     GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
1900                   Outs, TLI);
1901 
1902     // Analyze operands of the call, assigning locations to each operand.
1903     SmallVector<CCValAssign, 16> ValLocs;
1904     CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext());
1905     CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */));
1906 
1907     const Value *RV = Ret->getOperand(0);
1908     unsigned Reg = getRegForValue(RV);
1909     if (Reg == 0)
1910       return false;
1911 
1912     // Only handle a single return value for now.
1913     if (ValLocs.size() != 1)
1914       return false;
1915 
1916     CCValAssign &VA = ValLocs[0];
1917 
1918     // Don't bother handling odd stuff for now.
1919     if (VA.getLocInfo() != CCValAssign::Full)
1920       return false;
1921     // Only handle register returns for now.
1922     if (!VA.isRegLoc())
1923       return false;
1924 
1925     unsigned SrcReg = Reg + VA.getValNo();
1926     EVT RVVT = TLI.getValueType(RV->getType());
1927     EVT DestVT = VA.getValVT();
1928     // Special handling for extended integers.
1929     if (RVVT != DestVT) {
1930       if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1931         return false;
1932 
1933       if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
1934         return false;
1935 
1936       assert(DestVT == MVT::i32 && "ARM should always ext to i32");
1937 
1938       bool isZExt = Outs[0].Flags.isZExt();
1939       unsigned ResultReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, isZExt);
1940       if (ResultReg == 0) return false;
1941       SrcReg = ResultReg;
1942     }
1943 
1944     // Make the copy.
1945     unsigned DstReg = VA.getLocReg();
1946     const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
1947     // Avoid a cross-class copy. This is very unlikely.
1948     if (!SrcRC->contains(DstReg))
1949       return false;
1950     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1951             DstReg).addReg(SrcReg);
1952 
1953     // Mark the register as live out of the function.
1954     MRI.addLiveOut(VA.getLocReg());
1955   }
1956 
1957   unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
1958   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1959                           TII.get(RetOpc)));
1960   return true;
1961 }
1962 
1963 unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) {
1964 
1965   // Darwin needs the r9 versions of the opcodes.
1966   bool isDarwin = Subtarget->isTargetDarwin();
1967   if (isThumb2) {
1968     return isDarwin ? ARM::tBLr9 : ARM::tBL;
1969   } else  {
1970     return isDarwin ? ARM::BLr9 : ARM::BL;
1971   }
1972 }
1973 
1974 // A quick function that will emit a call for a named libcall in F with the
1975 // vector of passed arguments for the Instruction in I. We can assume that we
1976 // can emit a call for any libcall we can produce. This is an abridged version
1977 // of the full call infrastructure since we won't need to worry about things
1978 // like computed function pointers or strange arguments at call sites.
1979 // TODO: Try to unify this and the normal call bits for ARM, then try to unify
1980 // with X86.
1981 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
1982   CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
1983 
1984   // Handle *simple* calls for now.
1985   Type *RetTy = I->getType();
1986   MVT RetVT;
1987   if (RetTy->isVoidTy())
1988     RetVT = MVT::isVoid;
1989   else if (!isTypeLegal(RetTy, RetVT))
1990     return false;
1991 
1992   // TODO: For now if we have long calls specified we don't handle the call.
1993   if (EnableARMLongCalls) return false;
1994 
1995   // Set up the argument vectors.
1996   SmallVector<Value*, 8> Args;
1997   SmallVector<unsigned, 8> ArgRegs;
1998   SmallVector<MVT, 8> ArgVTs;
1999   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2000   Args.reserve(I->getNumOperands());
2001   ArgRegs.reserve(I->getNumOperands());
2002   ArgVTs.reserve(I->getNumOperands());
2003   ArgFlags.reserve(I->getNumOperands());
2004   for (unsigned i = 0; i < I->getNumOperands(); ++i) {
2005     Value *Op = I->getOperand(i);
2006     unsigned Arg = getRegForValue(Op);
2007     if (Arg == 0) return false;
2008 
2009     Type *ArgTy = Op->getType();
2010     MVT ArgVT;
2011     if (!isTypeLegal(ArgTy, ArgVT)) return false;
2012 
2013     ISD::ArgFlagsTy Flags;
2014     unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2015     Flags.setOrigAlign(OriginalAlignment);
2016 
2017     Args.push_back(Op);
2018     ArgRegs.push_back(Arg);
2019     ArgVTs.push_back(ArgVT);
2020     ArgFlags.push_back(Flags);
2021   }
2022 
2023   // Handle the arguments now that we've gotten them.
2024   SmallVector<unsigned, 4> RegArgs;
2025   unsigned NumBytes;
2026   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
2027     return false;
2028 
2029   // Issue the call, BLr9 for darwin, BL otherwise.
2030   // TODO: Turn this into the table of arm call ops.
2031   MachineInstrBuilder MIB;
2032   unsigned CallOpc = ARMSelectCallOp(NULL);
2033   if(isThumb2)
2034     // Explicitly adding the predicate here.
2035     MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2036                          TII.get(CallOpc)))
2037                          .addExternalSymbol(TLI.getLibcallName(Call));
2038   else
2039     // Explicitly adding the predicate here.
2040     MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2041                          TII.get(CallOpc))
2042           .addExternalSymbol(TLI.getLibcallName(Call)));
2043 
2044   // Add implicit physical register uses to the call.
2045   for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
2046     MIB.addReg(RegArgs[i]);
2047 
2048   // Finish off the call including any return values.
2049   SmallVector<unsigned, 4> UsedRegs;
2050   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
2051 
2052   // Set all unused physreg defs as dead.
2053   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2054 
2055   return true;
2056 }
2057 
2058 bool ARMFastISel::SelectCall(const Instruction *I,
2059                              const char *IntrMemName = 0) {
2060   const CallInst *CI = cast<CallInst>(I);
2061   const Value *Callee = CI->getCalledValue();
2062 
2063   // Can't handle inline asm.
2064   if (isa<InlineAsm>(Callee)) return false;
2065 
2066   // Only handle global variable Callees.
2067   const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2068   if (!GV)
2069     return false;
2070 
2071   // Check the calling convention.
2072   ImmutableCallSite CS(CI);
2073   CallingConv::ID CC = CS.getCallingConv();
2074 
2075   // TODO: Avoid some calling conventions?
2076 
2077   // Let SDISel handle vararg functions.
2078   PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
2079   FunctionType *FTy = cast<FunctionType>(PT->getElementType());
2080   if (FTy->isVarArg())
2081     return false;
2082 
2083   // Handle *simple* calls for now.
2084   Type *RetTy = I->getType();
2085   MVT RetVT;
2086   if (RetTy->isVoidTy())
2087     RetVT = MVT::isVoid;
2088   else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2089            RetVT != MVT::i8  && RetVT != MVT::i1)
2090     return false;
2091 
2092   // TODO: For now if we have long calls specified we don't handle the call.
2093   if (EnableARMLongCalls) return false;
2094 
2095   // Set up the argument vectors.
2096   SmallVector<Value*, 8> Args;
2097   SmallVector<unsigned, 8> ArgRegs;
2098   SmallVector<MVT, 8> ArgVTs;
2099   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
2100   Args.reserve(CS.arg_size());
2101   ArgRegs.reserve(CS.arg_size());
2102   ArgVTs.reserve(CS.arg_size());
2103   ArgFlags.reserve(CS.arg_size());
2104   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
2105        i != e; ++i) {
2106     // If we're lowering a memory intrinsic instead of a regular call, skip the
2107     // last two arguments, which shouldn't be passed to the underlying function.
2108     if (IntrMemName && e-i <= 2)
2109       break;
2110 
2111     ISD::ArgFlagsTy Flags;
2112     unsigned AttrInd = i - CS.arg_begin() + 1;
2113     if (CS.paramHasAttr(AttrInd, Attribute::SExt))
2114       Flags.setSExt();
2115     if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
2116       Flags.setZExt();
2117 
2118     // FIXME: Only handle *easy* calls for now.
2119     if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
2120         CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
2121         CS.paramHasAttr(AttrInd, Attribute::Nest) ||
2122         CS.paramHasAttr(AttrInd, Attribute::ByVal))
2123       return false;
2124 
2125     Type *ArgTy = (*i)->getType();
2126     MVT ArgVT;
2127     if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2128         ArgVT != MVT::i1)
2129       return false;
2130 
2131     unsigned Arg = getRegForValue(*i);
2132     if (Arg == 0)
2133       return false;
2134 
2135     unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
2136     Flags.setOrigAlign(OriginalAlignment);
2137 
2138     Args.push_back(*i);
2139     ArgRegs.push_back(Arg);
2140     ArgVTs.push_back(ArgVT);
2141     ArgFlags.push_back(Flags);
2142   }
2143 
2144   // Handle the arguments now that we've gotten them.
2145   SmallVector<unsigned, 4> RegArgs;
2146   unsigned NumBytes;
2147   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
2148     return false;
2149 
2150   // Issue the call, BLr9 for darwin, BL otherwise.
2151   // TODO: Turn this into the table of arm call ops.
2152   MachineInstrBuilder MIB;
2153   unsigned CallOpc = ARMSelectCallOp(GV);
2154   // Explicitly adding the predicate here.
2155   if(isThumb2) {
2156     // Explicitly adding the predicate here.
2157     MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2158                                  TII.get(CallOpc)));
2159     if (!IntrMemName)
2160       MIB.addGlobalAddress(GV, 0, 0);
2161     else
2162       MIB.addExternalSymbol(IntrMemName, 0);
2163   } else {
2164     if (!IntrMemName)
2165       // Explicitly adding the predicate here.
2166       MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2167                                    TII.get(CallOpc))
2168             .addGlobalAddress(GV, 0, 0));
2169     else
2170       MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
2171                                    TII.get(CallOpc))
2172             .addExternalSymbol(IntrMemName, 0));
2173   }
2174 
2175   // Add implicit physical register uses to the call.
2176   for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
2177     MIB.addReg(RegArgs[i]);
2178 
2179   // Finish off the call including any return values.
2180   SmallVector<unsigned, 4> UsedRegs;
2181   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
2182 
2183   // Set all unused physreg defs as dead.
2184   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2185 
2186   return true;
2187 }
2188 
2189 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2190   return Len <= 16;
2191 }
2192 
2193 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) {
2194   // Make sure we don't bloat code by inlining very large memcpy's.
2195   if (!ARMIsMemCpySmall(Len))
2196     return false;
2197 
2198   // We don't care about alignment here since we just emit integer accesses.
2199   while (Len) {
2200     MVT VT;
2201     if (Len >= 4)
2202       VT = MVT::i32;
2203     else if (Len >= 2)
2204       VT = MVT::i16;
2205     else {
2206       assert(Len == 1);
2207       VT = MVT::i8;
2208     }
2209 
2210     bool RV;
2211     unsigned ResultReg;
2212     RV = ARMEmitLoad(VT, ResultReg, Src);
2213     assert (RV = true && "Should be able to handle this load.");
2214     RV = ARMEmitStore(VT, ResultReg, Dest);
2215     assert (RV = true && "Should be able to handle this store.");
2216 
2217     unsigned Size = VT.getSizeInBits()/8;
2218     Len -= Size;
2219     Dest.Offset += Size;
2220     Src.Offset += Size;
2221   }
2222 
2223   return true;
2224 }
2225 
2226 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
2227   // FIXME: Handle more intrinsics.
2228   switch (I.getIntrinsicID()) {
2229   default: return false;
2230   case Intrinsic::memcpy:
2231   case Intrinsic::memmove: {
2232     const MemTransferInst &MTI = cast<MemTransferInst>(I);
2233     // Don't handle volatile.
2234     if (MTI.isVolatile())
2235       return false;
2236 
2237     // Disable inlining for memmove before calls to ComputeAddress.  Otherwise,
2238     // we would emit dead code because we don't currently handle memmoves.
2239     bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
2240     if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
2241       // Small memcpy's are common enough that we want to do them without a call
2242       // if possible.
2243       uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
2244       if (ARMIsMemCpySmall(Len)) {
2245         Address Dest, Src;
2246         if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
2247             !ARMComputeAddress(MTI.getRawSource(), Src))
2248           return false;
2249         if (ARMTryEmitSmallMemCpy(Dest, Src, Len))
2250           return true;
2251       }
2252     }
2253 
2254     if (!MTI.getLength()->getType()->isIntegerTy(32))
2255       return false;
2256 
2257     if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
2258       return false;
2259 
2260     const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
2261     return SelectCall(&I, IntrMemName);
2262   }
2263   case Intrinsic::memset: {
2264     const MemSetInst &MSI = cast<MemSetInst>(I);
2265     // Don't handle volatile.
2266     if (MSI.isVolatile())
2267       return false;
2268 
2269     if (!MSI.getLength()->getType()->isIntegerTy(32))
2270       return false;
2271 
2272     if (MSI.getDestAddressSpace() > 255)
2273       return false;
2274 
2275     return SelectCall(&I, "memset");
2276   }
2277   }
2278   return false;
2279 }
2280 
2281 bool ARMFastISel::SelectTrunc(const Instruction *I) {
2282   // The high bits for a type smaller than the register size are assumed to be
2283   // undefined.
2284   Value *Op = I->getOperand(0);
2285 
2286   EVT SrcVT, DestVT;
2287   SrcVT = TLI.getValueType(Op->getType(), true);
2288   DestVT = TLI.getValueType(I->getType(), true);
2289 
2290   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2291     return false;
2292   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2293     return false;
2294 
2295   unsigned SrcReg = getRegForValue(Op);
2296   if (!SrcReg) return false;
2297 
2298   // Because the high bits are undefined, a truncate doesn't generate
2299   // any code.
2300   UpdateValueMap(I, SrcReg);
2301   return true;
2302 }
2303 
2304 unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
2305                                     bool isZExt) {
2306   if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2307     return 0;
2308 
2309   unsigned Opc;
2310   bool isBoolZext = false;
2311   if (!SrcVT.isSimple()) return 0;
2312   switch (SrcVT.getSimpleVT().SimpleTy) {
2313   default: return 0;
2314   case MVT::i16:
2315     if (!Subtarget->hasV6Ops()) return 0;
2316     if (isZExt)
2317       Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH;
2318     else
2319       Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
2320     break;
2321   case MVT::i8:
2322     if (!Subtarget->hasV6Ops()) return 0;
2323     if (isZExt)
2324       Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB;
2325     else
2326       Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
2327     break;
2328   case MVT::i1:
2329     if (isZExt) {
2330       Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
2331       isBoolZext = true;
2332       break;
2333     }
2334     return 0;
2335   }
2336 
2337   unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
2338   MachineInstrBuilder MIB;
2339   MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
2340         .addReg(SrcReg);
2341   if (isBoolZext)
2342     MIB.addImm(1);
2343   else
2344     MIB.addImm(0);
2345   AddOptionalDefs(MIB);
2346   return ResultReg;
2347 }
2348 
2349 bool ARMFastISel::SelectIntExt(const Instruction *I) {
2350   // On ARM, in general, integer casts don't involve legal types; this code
2351   // handles promotable integers.
2352   Type *DestTy = I->getType();
2353   Value *Src = I->getOperand(0);
2354   Type *SrcTy = Src->getType();
2355 
2356   EVT SrcVT, DestVT;
2357   SrcVT = TLI.getValueType(SrcTy, true);
2358   DestVT = TLI.getValueType(DestTy, true);
2359 
2360   bool isZExt = isa<ZExtInst>(I);
2361   unsigned SrcReg = getRegForValue(Src);
2362   if (!SrcReg) return false;
2363 
2364   unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2365   if (ResultReg == 0) return false;
2366   UpdateValueMap(I, ResultReg);
2367   return true;
2368 }
2369 
2370 // TODO: SoftFP support.
2371 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
2372 
2373   switch (I->getOpcode()) {
2374     case Instruction::Load:
2375       return SelectLoad(I);
2376     case Instruction::Store:
2377       return SelectStore(I);
2378     case Instruction::Br:
2379       return SelectBranch(I);
2380     case Instruction::ICmp:
2381     case Instruction::FCmp:
2382       return SelectCmp(I);
2383     case Instruction::FPExt:
2384       return SelectFPExt(I);
2385     case Instruction::FPTrunc:
2386       return SelectFPTrunc(I);
2387     case Instruction::SIToFP:
2388       return SelectSIToFP(I);
2389     case Instruction::FPToSI:
2390       return SelectFPToSI(I);
2391     case Instruction::FAdd:
2392       return SelectBinaryOp(I, ISD::FADD);
2393     case Instruction::FSub:
2394       return SelectBinaryOp(I, ISD::FSUB);
2395     case Instruction::FMul:
2396       return SelectBinaryOp(I, ISD::FMUL);
2397     case Instruction::SDiv:
2398       return SelectSDiv(I);
2399     case Instruction::SRem:
2400       return SelectSRem(I);
2401     case Instruction::Call:
2402       if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2403         return SelectIntrinsicCall(*II);
2404       return SelectCall(I);
2405     case Instruction::Select:
2406       return SelectSelect(I);
2407     case Instruction::Ret:
2408       return SelectRet(I);
2409     case Instruction::Trunc:
2410       return SelectTrunc(I);
2411     case Instruction::ZExt:
2412     case Instruction::SExt:
2413       return SelectIntExt(I);
2414     default: break;
2415   }
2416   return false;
2417 }
2418 
2419 /// TryToFoldLoad - The specified machine instr operand is a vreg, and that
2420 /// vreg is being provided by the specified load instruction.  If possible,
2421 /// try to fold the load as an operand to the instruction, returning true if
2422 /// successful.
2423 bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
2424                                 const LoadInst *LI) {
2425   // Verify we have a legal type before going any further.
2426   MVT VT;
2427   if (!isLoadTypeLegal(LI->getType(), VT))
2428     return false;
2429 
2430   // Combine load followed by zero- or sign-extend.
2431   // ldrb r1, [r0]       ldrb r1, [r0]
2432   // uxtb r2, r1     =>
2433   // mov  r3, r2         mov  r3, r1
2434   bool isZExt = true;
2435   switch(MI->getOpcode()) {
2436     default: return false;
2437     case ARM::SXTH:
2438     case ARM::t2SXTH:
2439       isZExt = false;
2440     case ARM::UXTH:
2441     case ARM::t2UXTH:
2442       if (VT != MVT::i16)
2443         return false;
2444     break;
2445     case ARM::SXTB:
2446     case ARM::t2SXTB:
2447       isZExt = false;
2448     case ARM::UXTB:
2449     case ARM::t2UXTB:
2450       if (VT != MVT::i8)
2451         return false;
2452     break;
2453   }
2454   // See if we can handle this address.
2455   Address Addr;
2456   if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
2457 
2458   unsigned ResultReg = MI->getOperand(0).getReg();
2459   if (!ARMEmitLoad(VT, ResultReg, Addr, isZExt, false))
2460     return false;
2461   MI->eraseFromParent();
2462   return true;
2463 }
2464 
2465 namespace llvm {
2466   llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
2467     // Completely untested on non-darwin.
2468     const TargetMachine &TM = funcInfo.MF->getTarget();
2469 
2470     // Darwin and thumb1 only for now.
2471     const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
2472     if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() &&
2473         !DisableARMFastISel)
2474       return new ARMFastISel(funcInfo);
2475     return 0;
2476   }
2477 }
2478