1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseInstrInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMTargetMachine.h" 20 #include "ARMSubtarget.h" 21 #include "ARMConstantPoolValue.h" 22 #include "MCTargetDesc/ARMAddressingModes.h" 23 #include "llvm/CallingConv.h" 24 #include "llvm/DerivedTypes.h" 25 #include "llvm/GlobalVariable.h" 26 #include "llvm/Instructions.h" 27 #include "llvm/IntrinsicInst.h" 28 #include "llvm/Module.h" 29 #include "llvm/Operator.h" 30 #include "llvm/CodeGen/Analysis.h" 31 #include "llvm/CodeGen/FastISel.h" 32 #include "llvm/CodeGen/FunctionLoweringInfo.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineModuleInfo.h" 35 #include "llvm/CodeGen/MachineConstantPool.h" 36 #include "llvm/CodeGen/MachineFrameInfo.h" 37 #include "llvm/CodeGen/MachineMemOperand.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/Support/CallSite.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include "llvm/Support/GetElementPtrTypeIterator.h" 43 #include "llvm/Target/TargetData.h" 44 #include "llvm/Target/TargetInstrInfo.h" 45 #include "llvm/Target/TargetLowering.h" 46 #include "llvm/Target/TargetMachine.h" 47 #include "llvm/Target/TargetOptions.h" 48 using namespace llvm; 49 50 extern cl::opt<bool> EnableARMLongCalls; 51 52 namespace { 53 54 // All possible address modes, plus some. 55 typedef struct Address { 56 enum { 57 RegBase, 58 FrameIndexBase 59 } BaseType; 60 61 union { 62 unsigned Reg; 63 int FI; 64 } Base; 65 66 int Offset; 67 68 // Innocuous defaults for our address. 69 Address() 70 : BaseType(RegBase), Offset(0) { 71 Base.Reg = 0; 72 } 73 } Address; 74 75 class ARMFastISel : public FastISel { 76 77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 78 /// make the right decision when generating code for different targets. 79 const ARMSubtarget *Subtarget; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 91 : FastISel(funcInfo), 92 TM(funcInfo.MF->getTarget()), 93 TII(*TM.getInstrInfo()), 94 TLI(*TM.getTargetLowering()) { 95 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 96 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 97 isThumb2 = AFI->isThumbFunction(); 98 Context = &funcInfo.Fn->getContext(); 99 } 100 101 // Code from FastISel.cpp. 102 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 103 const TargetRegisterClass *RC); 104 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 unsigned Op0, bool Op0IsKill); 107 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC, 109 unsigned Op0, bool Op0IsKill, 110 unsigned Op1, bool Op1IsKill); 111 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill, 114 unsigned Op1, bool Op1IsKill, 115 unsigned Op2, bool Op2IsKill); 116 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 117 const TargetRegisterClass *RC, 118 unsigned Op0, bool Op0IsKill, 119 uint64_t Imm); 120 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 unsigned Op0, bool Op0IsKill, 123 const ConstantFP *FPImm); 124 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 125 const TargetRegisterClass *RC, 126 unsigned Op0, bool Op0IsKill, 127 unsigned Op1, bool Op1IsKill, 128 uint64_t Imm); 129 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 uint64_t Imm); 132 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 133 const TargetRegisterClass *RC, 134 uint64_t Imm1, uint64_t Imm2); 135 136 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 137 unsigned Op0, bool Op0IsKill, 138 uint32_t Idx); 139 140 // Backend specific FastISel code. 141 virtual bool TargetSelectInstruction(const Instruction *I); 142 virtual unsigned TargetMaterializeConstant(const Constant *C); 143 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 144 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 145 const LoadInst *LI); 146 147 #include "ARMGenFastISel.inc" 148 149 // Instruction selection routines. 150 private: 151 bool SelectLoad(const Instruction *I); 152 bool SelectStore(const Instruction *I); 153 bool SelectBranch(const Instruction *I); 154 bool SelectIndirectBr(const Instruction *I); 155 bool SelectCmp(const Instruction *I); 156 bool SelectFPExt(const Instruction *I); 157 bool SelectFPTrunc(const Instruction *I); 158 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 159 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 160 bool SelectIToFP(const Instruction *I, bool isSigned); 161 bool SelectFPToI(const Instruction *I, bool isSigned); 162 bool SelectDiv(const Instruction *I, bool isSigned); 163 bool SelectRem(const Instruction *I, bool isSigned); 164 bool SelectCall(const Instruction *I, const char *IntrMemName); 165 bool SelectIntrinsicCall(const IntrinsicInst &I); 166 bool SelectSelect(const Instruction *I); 167 bool SelectRet(const Instruction *I); 168 bool SelectTrunc(const Instruction *I); 169 bool SelectIntExt(const Instruction *I); 170 171 // Utility routines. 172 private: 173 bool isTypeLegal(Type *Ty, MVT &VT); 174 bool isLoadTypeLegal(Type *Ty, MVT &VT); 175 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 176 bool isZExt); 177 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 178 unsigned Alignment = 0, bool isZExt = true, 179 bool allocReg = true); 180 181 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 182 unsigned Alignment = 0); 183 bool ARMComputeAddress(const Value *Obj, Address &Addr); 184 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 185 bool ARMIsMemCpySmall(uint64_t Len); 186 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 187 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 188 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 189 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 190 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 191 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 192 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 193 unsigned ARMSelectCallOp(bool UseReg); 194 195 // Call handling routines. 196 private: 197 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 198 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 199 SmallVectorImpl<unsigned> &ArgRegs, 200 SmallVectorImpl<MVT> &ArgVTs, 201 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 202 SmallVectorImpl<unsigned> &RegArgs, 203 CallingConv::ID CC, 204 unsigned &NumBytes); 205 unsigned getLibcallReg(const Twine &Name); 206 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 207 const Instruction *I, CallingConv::ID CC, 208 unsigned &NumBytes); 209 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 210 211 // OptionalDef handling routines. 212 private: 213 bool isARMNEONPred(const MachineInstr *MI); 214 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 215 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 216 void AddLoadStoreOperands(EVT VT, Address &Addr, 217 const MachineInstrBuilder &MIB, 218 unsigned Flags, bool useAM3); 219 }; 220 221 } // end anonymous namespace 222 223 #include "ARMGenCallingConv.inc" 224 225 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 226 // we don't care about implicit defs here, just places we'll need to add a 227 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 228 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 229 if (!MI->hasOptionalDef()) 230 return false; 231 232 // Look to see if our OptionalDef is defining CPSR or CCR. 233 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 234 const MachineOperand &MO = MI->getOperand(i); 235 if (!MO.isReg() || !MO.isDef()) continue; 236 if (MO.getReg() == ARM::CPSR) 237 *CPSR = true; 238 } 239 return true; 240 } 241 242 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 243 const MCInstrDesc &MCID = MI->getDesc(); 244 245 // If we're a thumb2 or not NEON function we were handled via isPredicable. 246 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 247 AFI->isThumb2Function()) 248 return false; 249 250 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 251 if (MCID.OpInfo[i].isPredicate()) 252 return true; 253 254 return false; 255 } 256 257 // If the machine is predicable go ahead and add the predicate operands, if 258 // it needs default CC operands add those. 259 // TODO: If we want to support thumb1 then we'll need to deal with optional 260 // CPSR defs that need to be added before the remaining operands. See s_cc_out 261 // for descriptions why. 262 const MachineInstrBuilder & 263 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 264 MachineInstr *MI = &*MIB; 265 266 // Do we use a predicate? or... 267 // Are we NEON in ARM mode and have a predicate operand? If so, I know 268 // we're not predicable but add it anyways. 269 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 270 AddDefaultPred(MIB); 271 272 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 273 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 274 bool CPSR = false; 275 if (DefinesOptionalPredicate(MI, &CPSR)) { 276 if (CPSR) 277 AddDefaultT1CC(MIB); 278 else 279 AddDefaultCC(MIB); 280 } 281 return MIB; 282 } 283 284 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 285 const TargetRegisterClass* RC) { 286 unsigned ResultReg = createResultReg(RC); 287 const MCInstrDesc &II = TII.get(MachineInstOpcode); 288 289 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 290 return ResultReg; 291 } 292 293 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 294 const TargetRegisterClass *RC, 295 unsigned Op0, bool Op0IsKill) { 296 unsigned ResultReg = createResultReg(RC); 297 const MCInstrDesc &II = TII.get(MachineInstOpcode); 298 299 if (II.getNumDefs() >= 1) { 300 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 301 .addReg(Op0, Op0IsKill * RegState::Kill)); 302 } else { 303 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 304 .addReg(Op0, Op0IsKill * RegState::Kill)); 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 306 TII.get(TargetOpcode::COPY), ResultReg) 307 .addReg(II.ImplicitDefs[0])); 308 } 309 return ResultReg; 310 } 311 312 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 313 const TargetRegisterClass *RC, 314 unsigned Op0, bool Op0IsKill, 315 unsigned Op1, bool Op1IsKill) { 316 unsigned ResultReg = createResultReg(RC); 317 const MCInstrDesc &II = TII.get(MachineInstOpcode); 318 319 if (II.getNumDefs() >= 1) { 320 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 321 .addReg(Op0, Op0IsKill * RegState::Kill) 322 .addReg(Op1, Op1IsKill * RegState::Kill)); 323 } else { 324 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 325 .addReg(Op0, Op0IsKill * RegState::Kill) 326 .addReg(Op1, Op1IsKill * RegState::Kill)); 327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 328 TII.get(TargetOpcode::COPY), ResultReg) 329 .addReg(II.ImplicitDefs[0])); 330 } 331 return ResultReg; 332 } 333 334 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 335 const TargetRegisterClass *RC, 336 unsigned Op0, bool Op0IsKill, 337 unsigned Op1, bool Op1IsKill, 338 unsigned Op2, bool Op2IsKill) { 339 unsigned ResultReg = createResultReg(RC); 340 const MCInstrDesc &II = TII.get(MachineInstOpcode); 341 342 if (II.getNumDefs() >= 1) { 343 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 344 .addReg(Op0, Op0IsKill * RegState::Kill) 345 .addReg(Op1, Op1IsKill * RegState::Kill) 346 .addReg(Op2, Op2IsKill * RegState::Kill)); 347 } else { 348 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 349 .addReg(Op0, Op0IsKill * RegState::Kill) 350 .addReg(Op1, Op1IsKill * RegState::Kill) 351 .addReg(Op2, Op2IsKill * RegState::Kill)); 352 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 353 TII.get(TargetOpcode::COPY), ResultReg) 354 .addReg(II.ImplicitDefs[0])); 355 } 356 return ResultReg; 357 } 358 359 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 360 const TargetRegisterClass *RC, 361 unsigned Op0, bool Op0IsKill, 362 uint64_t Imm) { 363 unsigned ResultReg = createResultReg(RC); 364 const MCInstrDesc &II = TII.get(MachineInstOpcode); 365 366 if (II.getNumDefs() >= 1) { 367 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 368 .addReg(Op0, Op0IsKill * RegState::Kill) 369 .addImm(Imm)); 370 } else { 371 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 372 .addReg(Op0, Op0IsKill * RegState::Kill) 373 .addImm(Imm)); 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 375 TII.get(TargetOpcode::COPY), ResultReg) 376 .addReg(II.ImplicitDefs[0])); 377 } 378 return ResultReg; 379 } 380 381 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 382 const TargetRegisterClass *RC, 383 unsigned Op0, bool Op0IsKill, 384 const ConstantFP *FPImm) { 385 unsigned ResultReg = createResultReg(RC); 386 const MCInstrDesc &II = TII.get(MachineInstOpcode); 387 388 if (II.getNumDefs() >= 1) { 389 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 390 .addReg(Op0, Op0IsKill * RegState::Kill) 391 .addFPImm(FPImm)); 392 } else { 393 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 394 .addReg(Op0, Op0IsKill * RegState::Kill) 395 .addFPImm(FPImm)); 396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 397 TII.get(TargetOpcode::COPY), ResultReg) 398 .addReg(II.ImplicitDefs[0])); 399 } 400 return ResultReg; 401 } 402 403 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 404 const TargetRegisterClass *RC, 405 unsigned Op0, bool Op0IsKill, 406 unsigned Op1, bool Op1IsKill, 407 uint64_t Imm) { 408 unsigned ResultReg = createResultReg(RC); 409 const MCInstrDesc &II = TII.get(MachineInstOpcode); 410 411 if (II.getNumDefs() >= 1) { 412 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 413 .addReg(Op0, Op0IsKill * RegState::Kill) 414 .addReg(Op1, Op1IsKill * RegState::Kill) 415 .addImm(Imm)); 416 } else { 417 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 418 .addReg(Op0, Op0IsKill * RegState::Kill) 419 .addReg(Op1, Op1IsKill * RegState::Kill) 420 .addImm(Imm)); 421 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 422 TII.get(TargetOpcode::COPY), ResultReg) 423 .addReg(II.ImplicitDefs[0])); 424 } 425 return ResultReg; 426 } 427 428 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 429 const TargetRegisterClass *RC, 430 uint64_t Imm) { 431 unsigned ResultReg = createResultReg(RC); 432 const MCInstrDesc &II = TII.get(MachineInstOpcode); 433 434 if (II.getNumDefs() >= 1) { 435 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 436 .addImm(Imm)); 437 } else { 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 439 .addImm(Imm)); 440 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 441 TII.get(TargetOpcode::COPY), ResultReg) 442 .addReg(II.ImplicitDefs[0])); 443 } 444 return ResultReg; 445 } 446 447 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 448 const TargetRegisterClass *RC, 449 uint64_t Imm1, uint64_t Imm2) { 450 unsigned ResultReg = createResultReg(RC); 451 const MCInstrDesc &II = TII.get(MachineInstOpcode); 452 453 if (II.getNumDefs() >= 1) { 454 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 455 .addImm(Imm1).addImm(Imm2)); 456 } else { 457 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 458 .addImm(Imm1).addImm(Imm2)); 459 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 460 TII.get(TargetOpcode::COPY), 461 ResultReg) 462 .addReg(II.ImplicitDefs[0])); 463 } 464 return ResultReg; 465 } 466 467 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 468 unsigned Op0, bool Op0IsKill, 469 uint32_t Idx) { 470 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 471 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 472 "Cannot yet extract from physregs"); 473 474 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 475 DL, TII.get(TargetOpcode::COPY), ResultReg) 476 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 477 return ResultReg; 478 } 479 480 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 481 // checks from the various callers. 482 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 483 if (VT == MVT::f64) return 0; 484 485 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 486 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 487 TII.get(ARM::VMOVSR), MoveReg) 488 .addReg(SrcReg)); 489 return MoveReg; 490 } 491 492 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 493 if (VT == MVT::i64) return 0; 494 495 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 496 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 497 TII.get(ARM::VMOVRS), MoveReg) 498 .addReg(SrcReg)); 499 return MoveReg; 500 } 501 502 // For double width floating point we need to materialize two constants 503 // (the high and the low) into integer registers then use a move to get 504 // the combined constant into an FP reg. 505 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 506 const APFloat Val = CFP->getValueAPF(); 507 bool is64bit = VT == MVT::f64; 508 509 // This checks to see if we can use VFP3 instructions to materialize 510 // a constant, otherwise we have to go through the constant pool. 511 if (TLI.isFPImmLegal(Val, VT)) { 512 int Imm; 513 unsigned Opc; 514 if (is64bit) { 515 Imm = ARM_AM::getFP64Imm(Val); 516 Opc = ARM::FCONSTD; 517 } else { 518 Imm = ARM_AM::getFP32Imm(Val); 519 Opc = ARM::FCONSTS; 520 } 521 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 522 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 523 DestReg) 524 .addImm(Imm)); 525 return DestReg; 526 } 527 528 // Require VFP2 for loading fp constants. 529 if (!Subtarget->hasVFP2()) return false; 530 531 // MachineConstantPool wants an explicit alignment. 532 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 533 if (Align == 0) { 534 // TODO: Figure out if this is correct. 535 Align = TD.getTypeAllocSize(CFP->getType()); 536 } 537 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 538 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 539 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 540 541 // The extra reg is for addrmode5. 542 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 543 DestReg) 544 .addConstantPoolIndex(Idx) 545 .addReg(0)); 546 return DestReg; 547 } 548 549 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 550 551 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 552 return false; 553 554 // If we can do this in a single instruction without a constant pool entry 555 // do so now. 556 const ConstantInt *CI = cast<ConstantInt>(C); 557 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 558 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 559 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 560 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 561 TII.get(Opc), ImmReg) 562 .addImm(CI->getZExtValue())); 563 return ImmReg; 564 } 565 566 // Use MVN to emit negative constants. 567 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 568 unsigned Imm = (unsigned)~(CI->getSExtValue()); 569 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 570 (ARM_AM::getSOImmVal(Imm) != -1); 571 if (UseImm) { 572 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 573 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 574 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 575 TII.get(Opc), ImmReg) 576 .addImm(Imm)); 577 return ImmReg; 578 } 579 } 580 581 // Load from constant pool. For now 32-bit only. 582 if (VT != MVT::i32) 583 return false; 584 585 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 586 587 // MachineConstantPool wants an explicit alignment. 588 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 589 if (Align == 0) { 590 // TODO: Figure out if this is correct. 591 Align = TD.getTypeAllocSize(C->getType()); 592 } 593 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 594 595 if (isThumb2) 596 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 597 TII.get(ARM::t2LDRpci), DestReg) 598 .addConstantPoolIndex(Idx)); 599 else 600 // The extra immediate is for addrmode2. 601 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 602 TII.get(ARM::LDRcp), DestReg) 603 .addConstantPoolIndex(Idx) 604 .addImm(0)); 605 606 return DestReg; 607 } 608 609 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 610 // For now 32-bit only. 611 if (VT != MVT::i32) return 0; 612 613 Reloc::Model RelocM = TM.getRelocationModel(); 614 615 // TODO: Need more magic for ARM PIC. 616 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 617 618 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 619 620 // Use movw+movt when possible, it avoids constant pool entries. 621 // Darwin targets don't support movt with Reloc::Static, see 622 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 623 // static movt relocations. 624 if (Subtarget->useMovt() && 625 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 626 unsigned Opc; 627 switch (RelocM) { 628 case Reloc::PIC_: 629 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 630 break; 631 case Reloc::DynamicNoPIC: 632 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 633 break; 634 default: 635 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 636 break; 637 } 638 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 639 DestReg).addGlobalAddress(GV)); 640 } else { 641 // MachineConstantPool wants an explicit alignment. 642 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 643 if (Align == 0) { 644 // TODO: Figure out if this is correct. 645 Align = TD.getTypeAllocSize(GV->getType()); 646 } 647 648 // Grab index. 649 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 650 (Subtarget->isThumb() ? 4 : 8); 651 unsigned Id = AFI->createPICLabelUId(); 652 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 653 ARMCP::CPValue, 654 PCAdj); 655 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 656 657 // Load value. 658 MachineInstrBuilder MIB; 659 if (isThumb2) { 660 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 661 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 662 .addConstantPoolIndex(Idx); 663 if (RelocM == Reloc::PIC_) 664 MIB.addImm(Id); 665 } else { 666 // The extra immediate is for addrmode2. 667 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 668 DestReg) 669 .addConstantPoolIndex(Idx) 670 .addImm(0); 671 } 672 AddOptionalDefs(MIB); 673 } 674 675 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 676 MachineInstrBuilder MIB; 677 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 678 if (isThumb2) 679 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 680 TII.get(ARM::t2LDRi12), NewDestReg) 681 .addReg(DestReg) 682 .addImm(0); 683 else 684 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 685 NewDestReg) 686 .addReg(DestReg) 687 .addImm(0); 688 DestReg = NewDestReg; 689 AddOptionalDefs(MIB); 690 } 691 692 return DestReg; 693 } 694 695 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 696 EVT VT = TLI.getValueType(C->getType(), true); 697 698 // Only handle simple types. 699 if (!VT.isSimple()) return 0; 700 701 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 702 return ARMMaterializeFP(CFP, VT); 703 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 704 return ARMMaterializeGV(GV, VT); 705 else if (isa<ConstantInt>(C)) 706 return ARMMaterializeInt(C, VT); 707 708 return 0; 709 } 710 711 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 712 713 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 714 // Don't handle dynamic allocas. 715 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 716 717 MVT VT; 718 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 719 720 DenseMap<const AllocaInst*, int>::iterator SI = 721 FuncInfo.StaticAllocaMap.find(AI); 722 723 // This will get lowered later into the correct offsets and registers 724 // via rewriteXFrameIndex. 725 if (SI != FuncInfo.StaticAllocaMap.end()) { 726 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 727 unsigned ResultReg = createResultReg(RC); 728 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 729 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 730 TII.get(Opc), ResultReg) 731 .addFrameIndex(SI->second) 732 .addImm(0)); 733 return ResultReg; 734 } 735 736 return 0; 737 } 738 739 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 740 EVT evt = TLI.getValueType(Ty, true); 741 742 // Only handle simple types. 743 if (evt == MVT::Other || !evt.isSimple()) return false; 744 VT = evt.getSimpleVT(); 745 746 // Handle all legal types, i.e. a register that will directly hold this 747 // value. 748 return TLI.isTypeLegal(VT); 749 } 750 751 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 752 if (isTypeLegal(Ty, VT)) return true; 753 754 // If this is a type than can be sign or zero-extended to a basic operation 755 // go ahead and accept it now. 756 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 757 return true; 758 759 return false; 760 } 761 762 // Computes the address to get to an object. 763 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 764 // Some boilerplate from the X86 FastISel. 765 const User *U = NULL; 766 unsigned Opcode = Instruction::UserOp1; 767 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 768 // Don't walk into other basic blocks unless the object is an alloca from 769 // another block, otherwise it may not have a virtual register assigned. 770 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 771 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 772 Opcode = I->getOpcode(); 773 U = I; 774 } 775 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 776 Opcode = C->getOpcode(); 777 U = C; 778 } 779 780 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 781 if (Ty->getAddressSpace() > 255) 782 // Fast instruction selection doesn't support the special 783 // address spaces. 784 return false; 785 786 switch (Opcode) { 787 default: 788 break; 789 case Instruction::BitCast: { 790 // Look through bitcasts. 791 return ARMComputeAddress(U->getOperand(0), Addr); 792 } 793 case Instruction::IntToPtr: { 794 // Look past no-op inttoptrs. 795 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 796 return ARMComputeAddress(U->getOperand(0), Addr); 797 break; 798 } 799 case Instruction::PtrToInt: { 800 // Look past no-op ptrtoints. 801 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 802 return ARMComputeAddress(U->getOperand(0), Addr); 803 break; 804 } 805 case Instruction::GetElementPtr: { 806 Address SavedAddr = Addr; 807 int TmpOffset = Addr.Offset; 808 809 // Iterate through the GEP folding the constants into offsets where 810 // we can. 811 gep_type_iterator GTI = gep_type_begin(U); 812 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 813 i != e; ++i, ++GTI) { 814 const Value *Op = *i; 815 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 816 const StructLayout *SL = TD.getStructLayout(STy); 817 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 818 TmpOffset += SL->getElementOffset(Idx); 819 } else { 820 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 821 for (;;) { 822 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 823 // Constant-offset addressing. 824 TmpOffset += CI->getSExtValue() * S; 825 break; 826 } 827 if (isa<AddOperator>(Op) && 828 (!isa<Instruction>(Op) || 829 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 830 == FuncInfo.MBB) && 831 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 832 // An add (in the same block) with a constant operand. Fold the 833 // constant. 834 ConstantInt *CI = 835 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 836 TmpOffset += CI->getSExtValue() * S; 837 // Iterate on the other operand. 838 Op = cast<AddOperator>(Op)->getOperand(0); 839 continue; 840 } 841 // Unsupported 842 goto unsupported_gep; 843 } 844 } 845 } 846 847 // Try to grab the base operand now. 848 Addr.Offset = TmpOffset; 849 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 850 851 // We failed, restore everything and try the other options. 852 Addr = SavedAddr; 853 854 unsupported_gep: 855 break; 856 } 857 case Instruction::Alloca: { 858 const AllocaInst *AI = cast<AllocaInst>(Obj); 859 DenseMap<const AllocaInst*, int>::iterator SI = 860 FuncInfo.StaticAllocaMap.find(AI); 861 if (SI != FuncInfo.StaticAllocaMap.end()) { 862 Addr.BaseType = Address::FrameIndexBase; 863 Addr.Base.FI = SI->second; 864 return true; 865 } 866 break; 867 } 868 } 869 870 // Try to get this in a register if nothing else has worked. 871 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 872 return Addr.Base.Reg != 0; 873 } 874 875 void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 876 877 assert(VT.isSimple() && "Non-simple types are invalid here!"); 878 879 bool needsLowering = false; 880 switch (VT.getSimpleVT().SimpleTy) { 881 default: llvm_unreachable("Unhandled load/store type!"); 882 case MVT::i1: 883 case MVT::i8: 884 case MVT::i16: 885 case MVT::i32: 886 if (!useAM3) { 887 // Integer loads/stores handle 12-bit offsets. 888 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 889 // Handle negative offsets. 890 if (needsLowering && isThumb2) 891 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 892 Addr.Offset > -256); 893 } else { 894 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 895 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 896 } 897 break; 898 case MVT::f32: 899 case MVT::f64: 900 // Floating point operands handle 8-bit offsets. 901 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 902 break; 903 } 904 905 // If this is a stack pointer and the offset needs to be simplified then 906 // put the alloca address into a register, set the base type back to 907 // register and continue. This should almost never happen. 908 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 909 const TargetRegisterClass *RC = isThumb2 ? 910 (const TargetRegisterClass*)&ARM::tGPRRegClass : 911 (const TargetRegisterClass*)&ARM::GPRRegClass; 912 unsigned ResultReg = createResultReg(RC); 913 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 914 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 915 TII.get(Opc), ResultReg) 916 .addFrameIndex(Addr.Base.FI) 917 .addImm(0)); 918 Addr.Base.Reg = ResultReg; 919 Addr.BaseType = Address::RegBase; 920 } 921 922 // Since the offset is too large for the load/store instruction 923 // get the reg+offset into a register. 924 if (needsLowering) { 925 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 926 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 927 Addr.Offset = 0; 928 } 929 } 930 931 void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 932 const MachineInstrBuilder &MIB, 933 unsigned Flags, bool useAM3) { 934 // addrmode5 output depends on the selection dag addressing dividing the 935 // offset by 4 that it then later multiplies. Do this here as well. 936 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 937 VT.getSimpleVT().SimpleTy == MVT::f64) 938 Addr.Offset /= 4; 939 940 // Frame base works a bit differently. Handle it separately. 941 if (Addr.BaseType == Address::FrameIndexBase) { 942 int FI = Addr.Base.FI; 943 int Offset = Addr.Offset; 944 MachineMemOperand *MMO = 945 FuncInfo.MF->getMachineMemOperand( 946 MachinePointerInfo::getFixedStack(FI, Offset), 947 Flags, 948 MFI.getObjectSize(FI), 949 MFI.getObjectAlignment(FI)); 950 // Now add the rest of the operands. 951 MIB.addFrameIndex(FI); 952 953 // ARM halfword load/stores and signed byte loads need an additional 954 // operand. 955 if (useAM3) { 956 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 957 MIB.addReg(0); 958 MIB.addImm(Imm); 959 } else { 960 MIB.addImm(Addr.Offset); 961 } 962 MIB.addMemOperand(MMO); 963 } else { 964 // Now add the rest of the operands. 965 MIB.addReg(Addr.Base.Reg); 966 967 // ARM halfword load/stores and signed byte loads need an additional 968 // operand. 969 if (useAM3) { 970 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 971 MIB.addReg(0); 972 MIB.addImm(Imm); 973 } else { 974 MIB.addImm(Addr.Offset); 975 } 976 } 977 AddOptionalDefs(MIB); 978 } 979 980 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 981 unsigned Alignment, bool isZExt, bool allocReg) { 982 assert(VT.isSimple() && "Non-simple types are invalid here!"); 983 unsigned Opc; 984 bool useAM3 = false; 985 bool needVMOV = false; 986 const TargetRegisterClass *RC; 987 switch (VT.getSimpleVT().SimpleTy) { 988 // This is mostly going to be Neon/vector support. 989 default: return false; 990 case MVT::i1: 991 case MVT::i8: 992 if (isThumb2) { 993 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 994 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 995 else 996 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 997 } else { 998 if (isZExt) { 999 Opc = ARM::LDRBi12; 1000 } else { 1001 Opc = ARM::LDRSB; 1002 useAM3 = true; 1003 } 1004 } 1005 RC = &ARM::GPRRegClass; 1006 break; 1007 case MVT::i16: 1008 if (isThumb2) { 1009 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1010 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1011 else 1012 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1013 } else { 1014 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1015 useAM3 = true; 1016 } 1017 RC = &ARM::GPRRegClass; 1018 break; 1019 case MVT::i32: 1020 if (isThumb2) { 1021 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1022 Opc = ARM::t2LDRi8; 1023 else 1024 Opc = ARM::t2LDRi12; 1025 } else { 1026 Opc = ARM::LDRi12; 1027 } 1028 RC = &ARM::GPRRegClass; 1029 break; 1030 case MVT::f32: 1031 if (!Subtarget->hasVFP2()) return false; 1032 // Unaligned loads need special handling. Floats require word-alignment. 1033 if (Alignment && Alignment < 4) { 1034 needVMOV = true; 1035 VT = MVT::i32; 1036 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1037 RC = &ARM::GPRRegClass; 1038 } else { 1039 Opc = ARM::VLDRS; 1040 RC = TLI.getRegClassFor(VT); 1041 } 1042 break; 1043 case MVT::f64: 1044 if (!Subtarget->hasVFP2()) return false; 1045 // FIXME: Unaligned loads need special handling. Doublewords require 1046 // word-alignment. 1047 if (Alignment && Alignment < 4) 1048 return false; 1049 1050 Opc = ARM::VLDRD; 1051 RC = TLI.getRegClassFor(VT); 1052 break; 1053 } 1054 // Simplify this down to something we can handle. 1055 ARMSimplifyAddress(Addr, VT, useAM3); 1056 1057 // Create the base instruction, then add the operands. 1058 if (allocReg) 1059 ResultReg = createResultReg(RC); 1060 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1061 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1062 TII.get(Opc), ResultReg); 1063 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1064 1065 // If we had an unaligned load of a float we've converted it to an regular 1066 // load. Now we must move from the GRP to the FP register. 1067 if (needVMOV) { 1068 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1069 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1070 TII.get(ARM::VMOVSR), MoveReg) 1071 .addReg(ResultReg)); 1072 ResultReg = MoveReg; 1073 } 1074 return true; 1075 } 1076 1077 bool ARMFastISel::SelectLoad(const Instruction *I) { 1078 // Atomic loads need special handling. 1079 if (cast<LoadInst>(I)->isAtomic()) 1080 return false; 1081 1082 // Verify we have a legal type before going any further. 1083 MVT VT; 1084 if (!isLoadTypeLegal(I->getType(), VT)) 1085 return false; 1086 1087 // See if we can handle this address. 1088 Address Addr; 1089 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1090 1091 unsigned ResultReg; 1092 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1093 return false; 1094 UpdateValueMap(I, ResultReg); 1095 return true; 1096 } 1097 1098 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1099 unsigned Alignment) { 1100 unsigned StrOpc; 1101 bool useAM3 = false; 1102 switch (VT.getSimpleVT().SimpleTy) { 1103 // This is mostly going to be Neon/vector support. 1104 default: return false; 1105 case MVT::i1: { 1106 unsigned Res = createResultReg(isThumb2 ? 1107 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1108 (const TargetRegisterClass*)&ARM::GPRRegClass); 1109 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1110 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1111 TII.get(Opc), Res) 1112 .addReg(SrcReg).addImm(1)); 1113 SrcReg = Res; 1114 } // Fallthrough here. 1115 case MVT::i8: 1116 if (isThumb2) { 1117 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1118 StrOpc = ARM::t2STRBi8; 1119 else 1120 StrOpc = ARM::t2STRBi12; 1121 } else { 1122 StrOpc = ARM::STRBi12; 1123 } 1124 break; 1125 case MVT::i16: 1126 if (isThumb2) { 1127 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1128 StrOpc = ARM::t2STRHi8; 1129 else 1130 StrOpc = ARM::t2STRHi12; 1131 } else { 1132 StrOpc = ARM::STRH; 1133 useAM3 = true; 1134 } 1135 break; 1136 case MVT::i32: 1137 if (isThumb2) { 1138 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1139 StrOpc = ARM::t2STRi8; 1140 else 1141 StrOpc = ARM::t2STRi12; 1142 } else { 1143 StrOpc = ARM::STRi12; 1144 } 1145 break; 1146 case MVT::f32: 1147 if (!Subtarget->hasVFP2()) return false; 1148 // Unaligned stores need special handling. Floats require word-alignment. 1149 if (Alignment && Alignment < 4) { 1150 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1151 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1152 TII.get(ARM::VMOVRS), MoveReg) 1153 .addReg(SrcReg)); 1154 SrcReg = MoveReg; 1155 VT = MVT::i32; 1156 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1157 } else { 1158 StrOpc = ARM::VSTRS; 1159 } 1160 break; 1161 case MVT::f64: 1162 if (!Subtarget->hasVFP2()) return false; 1163 // FIXME: Unaligned stores need special handling. Doublewords require 1164 // word-alignment. 1165 if (Alignment && Alignment < 4) 1166 return false; 1167 1168 StrOpc = ARM::VSTRD; 1169 break; 1170 } 1171 // Simplify this down to something we can handle. 1172 ARMSimplifyAddress(Addr, VT, useAM3); 1173 1174 // Create the base instruction, then add the operands. 1175 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1176 TII.get(StrOpc)) 1177 .addReg(SrcReg); 1178 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1179 return true; 1180 } 1181 1182 bool ARMFastISel::SelectStore(const Instruction *I) { 1183 Value *Op0 = I->getOperand(0); 1184 unsigned SrcReg = 0; 1185 1186 // Atomic stores need special handling. 1187 if (cast<StoreInst>(I)->isAtomic()) 1188 return false; 1189 1190 // Verify we have a legal type before going any further. 1191 MVT VT; 1192 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1193 return false; 1194 1195 // Get the value to be stored into a register. 1196 SrcReg = getRegForValue(Op0); 1197 if (SrcReg == 0) return false; 1198 1199 // See if we can handle this address. 1200 Address Addr; 1201 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1202 return false; 1203 1204 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1205 return false; 1206 return true; 1207 } 1208 1209 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1210 switch (Pred) { 1211 // Needs two compares... 1212 case CmpInst::FCMP_ONE: 1213 case CmpInst::FCMP_UEQ: 1214 default: 1215 // AL is our "false" for now. The other two need more compares. 1216 return ARMCC::AL; 1217 case CmpInst::ICMP_EQ: 1218 case CmpInst::FCMP_OEQ: 1219 return ARMCC::EQ; 1220 case CmpInst::ICMP_SGT: 1221 case CmpInst::FCMP_OGT: 1222 return ARMCC::GT; 1223 case CmpInst::ICMP_SGE: 1224 case CmpInst::FCMP_OGE: 1225 return ARMCC::GE; 1226 case CmpInst::ICMP_UGT: 1227 case CmpInst::FCMP_UGT: 1228 return ARMCC::HI; 1229 case CmpInst::FCMP_OLT: 1230 return ARMCC::MI; 1231 case CmpInst::ICMP_ULE: 1232 case CmpInst::FCMP_OLE: 1233 return ARMCC::LS; 1234 case CmpInst::FCMP_ORD: 1235 return ARMCC::VC; 1236 case CmpInst::FCMP_UNO: 1237 return ARMCC::VS; 1238 case CmpInst::FCMP_UGE: 1239 return ARMCC::PL; 1240 case CmpInst::ICMP_SLT: 1241 case CmpInst::FCMP_ULT: 1242 return ARMCC::LT; 1243 case CmpInst::ICMP_SLE: 1244 case CmpInst::FCMP_ULE: 1245 return ARMCC::LE; 1246 case CmpInst::FCMP_UNE: 1247 case CmpInst::ICMP_NE: 1248 return ARMCC::NE; 1249 case CmpInst::ICMP_UGE: 1250 return ARMCC::HS; 1251 case CmpInst::ICMP_ULT: 1252 return ARMCC::LO; 1253 } 1254 } 1255 1256 bool ARMFastISel::SelectBranch(const Instruction *I) { 1257 const BranchInst *BI = cast<BranchInst>(I); 1258 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1259 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1260 1261 // Simple branch support. 1262 1263 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1264 // behavior. 1265 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1266 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1267 1268 // Get the compare predicate. 1269 // Try to take advantage of fallthrough opportunities. 1270 CmpInst::Predicate Predicate = CI->getPredicate(); 1271 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1272 std::swap(TBB, FBB); 1273 Predicate = CmpInst::getInversePredicate(Predicate); 1274 } 1275 1276 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1277 1278 // We may not handle every CC for now. 1279 if (ARMPred == ARMCC::AL) return false; 1280 1281 // Emit the compare. 1282 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1283 return false; 1284 1285 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1286 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1287 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1288 FastEmitBranch(FBB, DL); 1289 FuncInfo.MBB->addSuccessor(TBB); 1290 return true; 1291 } 1292 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1293 MVT SourceVT; 1294 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1295 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1296 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1297 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1298 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1299 TII.get(TstOpc)) 1300 .addReg(OpReg).addImm(1)); 1301 1302 unsigned CCMode = ARMCC::NE; 1303 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1304 std::swap(TBB, FBB); 1305 CCMode = ARMCC::EQ; 1306 } 1307 1308 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1309 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1310 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1311 1312 FastEmitBranch(FBB, DL); 1313 FuncInfo.MBB->addSuccessor(TBB); 1314 return true; 1315 } 1316 } else if (const ConstantInt *CI = 1317 dyn_cast<ConstantInt>(BI->getCondition())) { 1318 uint64_t Imm = CI->getZExtValue(); 1319 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1320 FastEmitBranch(Target, DL); 1321 return true; 1322 } 1323 1324 unsigned CmpReg = getRegForValue(BI->getCondition()); 1325 if (CmpReg == 0) return false; 1326 1327 // We've been divorced from our compare! Our block was split, and 1328 // now our compare lives in a predecessor block. We musn't 1329 // re-compare here, as the children of the compare aren't guaranteed 1330 // live across the block boundary (we *could* check for this). 1331 // Regardless, the compare has been done in the predecessor block, 1332 // and it left a value for us in a virtual register. Ergo, we test 1333 // the one-bit value left in the virtual register. 1334 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1336 .addReg(CmpReg).addImm(1)); 1337 1338 unsigned CCMode = ARMCC::NE; 1339 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1340 std::swap(TBB, FBB); 1341 CCMode = ARMCC::EQ; 1342 } 1343 1344 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1345 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1346 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1347 FastEmitBranch(FBB, DL); 1348 FuncInfo.MBB->addSuccessor(TBB); 1349 return true; 1350 } 1351 1352 bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1353 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1354 if (AddrReg == 0) return false; 1355 1356 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1357 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1358 .addReg(AddrReg)); 1359 return true; 1360 } 1361 1362 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1363 bool isZExt) { 1364 Type *Ty = Src1Value->getType(); 1365 EVT SrcVT = TLI.getValueType(Ty, true); 1366 if (!SrcVT.isSimple()) return false; 1367 1368 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1369 if (isFloat && !Subtarget->hasVFP2()) 1370 return false; 1371 1372 // Check to see if the 2nd operand is a constant that we can encode directly 1373 // in the compare. 1374 int Imm = 0; 1375 bool UseImm = false; 1376 bool isNegativeImm = false; 1377 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1378 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1379 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1380 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1381 SrcVT == MVT::i1) { 1382 const APInt &CIVal = ConstInt->getValue(); 1383 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1384 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1385 // then a cmn, because there is no way to represent 2147483648 as a 1386 // signed 32-bit int. 1387 if (Imm < 0 && Imm != (int)0x80000000) { 1388 isNegativeImm = true; 1389 Imm = -Imm; 1390 } 1391 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1392 (ARM_AM::getSOImmVal(Imm) != -1); 1393 } 1394 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1395 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1396 if (ConstFP->isZero() && !ConstFP->isNegative()) 1397 UseImm = true; 1398 } 1399 1400 unsigned CmpOpc; 1401 bool isICmp = true; 1402 bool needsExt = false; 1403 switch (SrcVT.getSimpleVT().SimpleTy) { 1404 default: return false; 1405 // TODO: Verify compares. 1406 case MVT::f32: 1407 isICmp = false; 1408 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1409 break; 1410 case MVT::f64: 1411 isICmp = false; 1412 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1413 break; 1414 case MVT::i1: 1415 case MVT::i8: 1416 case MVT::i16: 1417 needsExt = true; 1418 // Intentional fall-through. 1419 case MVT::i32: 1420 if (isThumb2) { 1421 if (!UseImm) 1422 CmpOpc = ARM::t2CMPrr; 1423 else 1424 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1425 } else { 1426 if (!UseImm) 1427 CmpOpc = ARM::CMPrr; 1428 else 1429 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1430 } 1431 break; 1432 } 1433 1434 unsigned SrcReg1 = getRegForValue(Src1Value); 1435 if (SrcReg1 == 0) return false; 1436 1437 unsigned SrcReg2 = 0; 1438 if (!UseImm) { 1439 SrcReg2 = getRegForValue(Src2Value); 1440 if (SrcReg2 == 0) return false; 1441 } 1442 1443 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1444 if (needsExt) { 1445 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1446 if (SrcReg1 == 0) return false; 1447 if (!UseImm) { 1448 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1449 if (SrcReg2 == 0) return false; 1450 } 1451 } 1452 1453 if (!UseImm) { 1454 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1455 TII.get(CmpOpc)) 1456 .addReg(SrcReg1).addReg(SrcReg2)); 1457 } else { 1458 MachineInstrBuilder MIB; 1459 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1460 .addReg(SrcReg1); 1461 1462 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1463 if (isICmp) 1464 MIB.addImm(Imm); 1465 AddOptionalDefs(MIB); 1466 } 1467 1468 // For floating point we need to move the result to a comparison register 1469 // that we can then use for branches. 1470 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1471 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1472 TII.get(ARM::FMSTAT))); 1473 return true; 1474 } 1475 1476 bool ARMFastISel::SelectCmp(const Instruction *I) { 1477 const CmpInst *CI = cast<CmpInst>(I); 1478 1479 // Get the compare predicate. 1480 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1481 1482 // We may not handle every CC for now. 1483 if (ARMPred == ARMCC::AL) return false; 1484 1485 // Emit the compare. 1486 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1487 return false; 1488 1489 // Now set a register based on the comparison. Explicitly set the predicates 1490 // here. 1491 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1492 const TargetRegisterClass *RC = isThumb2 ? 1493 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1494 (const TargetRegisterClass*)&ARM::GPRRegClass; 1495 unsigned DestReg = createResultReg(RC); 1496 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1497 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1498 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1500 .addReg(ZeroReg).addImm(1) 1501 .addImm(ARMPred).addReg(ARM::CPSR); 1502 1503 UpdateValueMap(I, DestReg); 1504 return true; 1505 } 1506 1507 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1508 // Make sure we have VFP and that we're extending float to double. 1509 if (!Subtarget->hasVFP2()) return false; 1510 1511 Value *V = I->getOperand(0); 1512 if (!I->getType()->isDoubleTy() || 1513 !V->getType()->isFloatTy()) return false; 1514 1515 unsigned Op = getRegForValue(V); 1516 if (Op == 0) return false; 1517 1518 unsigned Result = createResultReg(&ARM::DPRRegClass); 1519 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1520 TII.get(ARM::VCVTDS), Result) 1521 .addReg(Op)); 1522 UpdateValueMap(I, Result); 1523 return true; 1524 } 1525 1526 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1527 // Make sure we have VFP and that we're truncating double to float. 1528 if (!Subtarget->hasVFP2()) return false; 1529 1530 Value *V = I->getOperand(0); 1531 if (!(I->getType()->isFloatTy() && 1532 V->getType()->isDoubleTy())) return false; 1533 1534 unsigned Op = getRegForValue(V); 1535 if (Op == 0) return false; 1536 1537 unsigned Result = createResultReg(&ARM::SPRRegClass); 1538 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1539 TII.get(ARM::VCVTSD), Result) 1540 .addReg(Op)); 1541 UpdateValueMap(I, Result); 1542 return true; 1543 } 1544 1545 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1546 // Make sure we have VFP. 1547 if (!Subtarget->hasVFP2()) return false; 1548 1549 MVT DstVT; 1550 Type *Ty = I->getType(); 1551 if (!isTypeLegal(Ty, DstVT)) 1552 return false; 1553 1554 Value *Src = I->getOperand(0); 1555 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1556 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1557 return false; 1558 1559 unsigned SrcReg = getRegForValue(Src); 1560 if (SrcReg == 0) return false; 1561 1562 // Handle sign-extension. 1563 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1564 EVT DestVT = MVT::i32; 1565 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1566 /*isZExt*/!isSigned); 1567 if (SrcReg == 0) return false; 1568 } 1569 1570 // The conversion routine works on fp-reg to fp-reg and the operand above 1571 // was an integer, move it to the fp registers if possible. 1572 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1573 if (FP == 0) return false; 1574 1575 unsigned Opc; 1576 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1577 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1578 else return false; 1579 1580 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1581 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1582 ResultReg) 1583 .addReg(FP)); 1584 UpdateValueMap(I, ResultReg); 1585 return true; 1586 } 1587 1588 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1589 // Make sure we have VFP. 1590 if (!Subtarget->hasVFP2()) return false; 1591 1592 MVT DstVT; 1593 Type *RetTy = I->getType(); 1594 if (!isTypeLegal(RetTy, DstVT)) 1595 return false; 1596 1597 unsigned Op = getRegForValue(I->getOperand(0)); 1598 if (Op == 0) return false; 1599 1600 unsigned Opc; 1601 Type *OpTy = I->getOperand(0)->getType(); 1602 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1603 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1604 else return false; 1605 1606 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1607 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1608 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1609 ResultReg) 1610 .addReg(Op)); 1611 1612 // This result needs to be in an integer register, but the conversion only 1613 // takes place in fp-regs. 1614 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1615 if (IntReg == 0) return false; 1616 1617 UpdateValueMap(I, IntReg); 1618 return true; 1619 } 1620 1621 bool ARMFastISel::SelectSelect(const Instruction *I) { 1622 MVT VT; 1623 if (!isTypeLegal(I->getType(), VT)) 1624 return false; 1625 1626 // Things need to be register sized for register moves. 1627 if (VT != MVT::i32) return false; 1628 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1629 1630 unsigned CondReg = getRegForValue(I->getOperand(0)); 1631 if (CondReg == 0) return false; 1632 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1633 if (Op1Reg == 0) return false; 1634 1635 // Check to see if we can use an immediate in the conditional move. 1636 int Imm = 0; 1637 bool UseImm = false; 1638 bool isNegativeImm = false; 1639 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1640 assert (VT == MVT::i32 && "Expecting an i32."); 1641 Imm = (int)ConstInt->getValue().getZExtValue(); 1642 if (Imm < 0) { 1643 isNegativeImm = true; 1644 Imm = ~Imm; 1645 } 1646 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1647 (ARM_AM::getSOImmVal(Imm) != -1); 1648 } 1649 1650 unsigned Op2Reg = 0; 1651 if (!UseImm) { 1652 Op2Reg = getRegForValue(I->getOperand(2)); 1653 if (Op2Reg == 0) return false; 1654 } 1655 1656 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1657 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1658 .addReg(CondReg).addImm(0)); 1659 1660 unsigned MovCCOpc; 1661 if (!UseImm) { 1662 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1663 } else { 1664 if (!isNegativeImm) { 1665 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1666 } else { 1667 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1668 } 1669 } 1670 unsigned ResultReg = createResultReg(RC); 1671 if (!UseImm) 1672 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1673 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1674 else 1675 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1676 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1677 UpdateValueMap(I, ResultReg); 1678 return true; 1679 } 1680 1681 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1682 MVT VT; 1683 Type *Ty = I->getType(); 1684 if (!isTypeLegal(Ty, VT)) 1685 return false; 1686 1687 // If we have integer div support we should have selected this automagically. 1688 // In case we have a real miss go ahead and return false and we'll pick 1689 // it up later. 1690 if (Subtarget->hasDivide()) return false; 1691 1692 // Otherwise emit a libcall. 1693 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1694 if (VT == MVT::i8) 1695 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1696 else if (VT == MVT::i16) 1697 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1698 else if (VT == MVT::i32) 1699 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1700 else if (VT == MVT::i64) 1701 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1702 else if (VT == MVT::i128) 1703 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1704 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1705 1706 return ARMEmitLibcall(I, LC); 1707 } 1708 1709 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1710 MVT VT; 1711 Type *Ty = I->getType(); 1712 if (!isTypeLegal(Ty, VT)) 1713 return false; 1714 1715 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1716 if (VT == MVT::i8) 1717 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1718 else if (VT == MVT::i16) 1719 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1720 else if (VT == MVT::i32) 1721 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1722 else if (VT == MVT::i64) 1723 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1724 else if (VT == MVT::i128) 1725 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1726 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1727 1728 return ARMEmitLibcall(I, LC); 1729 } 1730 1731 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1732 EVT DestVT = TLI.getValueType(I->getType(), true); 1733 1734 // We can get here in the case when we have a binary operation on a non-legal 1735 // type and the target independent selector doesn't know how to handle it. 1736 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1737 return false; 1738 1739 unsigned Opc; 1740 switch (ISDOpcode) { 1741 default: return false; 1742 case ISD::ADD: 1743 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1744 break; 1745 case ISD::OR: 1746 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1747 break; 1748 case ISD::SUB: 1749 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1750 break; 1751 } 1752 1753 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1754 if (SrcReg1 == 0) return false; 1755 1756 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1757 // in the instruction, rather then materializing the value in a register. 1758 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1759 if (SrcReg2 == 0) return false; 1760 1761 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1762 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1763 TII.get(Opc), ResultReg) 1764 .addReg(SrcReg1).addReg(SrcReg2)); 1765 UpdateValueMap(I, ResultReg); 1766 return true; 1767 } 1768 1769 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1770 EVT VT = TLI.getValueType(I->getType(), true); 1771 1772 // We can get here in the case when we want to use NEON for our fp 1773 // operations, but can't figure out how to. Just use the vfp instructions 1774 // if we have them. 1775 // FIXME: It'd be nice to use NEON instructions. 1776 Type *Ty = I->getType(); 1777 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1778 if (isFloat && !Subtarget->hasVFP2()) 1779 return false; 1780 1781 unsigned Opc; 1782 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1783 switch (ISDOpcode) { 1784 default: return false; 1785 case ISD::FADD: 1786 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1787 break; 1788 case ISD::FSUB: 1789 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1790 break; 1791 case ISD::FMUL: 1792 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1793 break; 1794 } 1795 unsigned Op1 = getRegForValue(I->getOperand(0)); 1796 if (Op1 == 0) return false; 1797 1798 unsigned Op2 = getRegForValue(I->getOperand(1)); 1799 if (Op2 == 0) return false; 1800 1801 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1802 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1803 TII.get(Opc), ResultReg) 1804 .addReg(Op1).addReg(Op2)); 1805 UpdateValueMap(I, ResultReg); 1806 return true; 1807 } 1808 1809 // Call Handling Code 1810 1811 // This is largely taken directly from CCAssignFnForNode - we don't support 1812 // varargs in FastISel so that part has been removed. 1813 // TODO: We may not support all of this. 1814 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1815 switch (CC) { 1816 default: 1817 llvm_unreachable("Unsupported calling convention"); 1818 case CallingConv::Fast: 1819 // Ignore fastcc. Silence compiler warnings. 1820 (void)RetFastCC_ARM_APCS; 1821 (void)FastCC_ARM_APCS; 1822 // Fallthrough 1823 case CallingConv::C: 1824 // Use target triple & subtarget features to do actual dispatch. 1825 if (Subtarget->isAAPCS_ABI()) { 1826 if (Subtarget->hasVFP2() && 1827 TM.Options.FloatABIType == FloatABI::Hard) 1828 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1829 else 1830 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1831 } else 1832 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1833 case CallingConv::ARM_AAPCS_VFP: 1834 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1835 case CallingConv::ARM_AAPCS: 1836 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1837 case CallingConv::ARM_APCS: 1838 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1839 } 1840 } 1841 1842 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1843 SmallVectorImpl<unsigned> &ArgRegs, 1844 SmallVectorImpl<MVT> &ArgVTs, 1845 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1846 SmallVectorImpl<unsigned> &RegArgs, 1847 CallingConv::ID CC, 1848 unsigned &NumBytes) { 1849 SmallVector<CCValAssign, 16> ArgLocs; 1850 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1851 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1852 1853 // Check that we can handle all of the arguments. If we can't, then bail out 1854 // now before we add code to the MBB. 1855 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1856 CCValAssign &VA = ArgLocs[i]; 1857 MVT ArgVT = ArgVTs[VA.getValNo()]; 1858 1859 // We don't handle NEON/vector parameters yet. 1860 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1861 return false; 1862 1863 // Now copy/store arg to correct locations. 1864 if (VA.isRegLoc() && !VA.needsCustom()) { 1865 continue; 1866 } else if (VA.needsCustom()) { 1867 // TODO: We need custom lowering for vector (v2f64) args. 1868 if (VA.getLocVT() != MVT::f64 || 1869 // TODO: Only handle register args for now. 1870 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1871 return false; 1872 } else { 1873 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1874 default: 1875 return false; 1876 case MVT::i1: 1877 case MVT::i8: 1878 case MVT::i16: 1879 case MVT::i32: 1880 break; 1881 case MVT::f32: 1882 if (!Subtarget->hasVFP2()) 1883 return false; 1884 break; 1885 case MVT::f64: 1886 if (!Subtarget->hasVFP2()) 1887 return false; 1888 break; 1889 } 1890 } 1891 } 1892 1893 // At the point, we are able to handle the call's arguments in fast isel. 1894 1895 // Get a count of how many bytes are to be pushed on the stack. 1896 NumBytes = CCInfo.getNextStackOffset(); 1897 1898 // Issue CALLSEQ_START 1899 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1900 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1901 TII.get(AdjStackDown)) 1902 .addImm(NumBytes)); 1903 1904 // Process the args. 1905 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1906 CCValAssign &VA = ArgLocs[i]; 1907 unsigned Arg = ArgRegs[VA.getValNo()]; 1908 MVT ArgVT = ArgVTs[VA.getValNo()]; 1909 1910 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1911 "We don't handle NEON/vector parameters yet."); 1912 1913 // Handle arg promotion, etc. 1914 switch (VA.getLocInfo()) { 1915 case CCValAssign::Full: break; 1916 case CCValAssign::SExt: { 1917 MVT DestVT = VA.getLocVT(); 1918 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1919 assert (Arg != 0 && "Failed to emit a sext"); 1920 ArgVT = DestVT; 1921 break; 1922 } 1923 case CCValAssign::AExt: 1924 // Intentional fall-through. Handle AExt and ZExt. 1925 case CCValAssign::ZExt: { 1926 MVT DestVT = VA.getLocVT(); 1927 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1928 assert (Arg != 0 && "Failed to emit a sext"); 1929 ArgVT = DestVT; 1930 break; 1931 } 1932 case CCValAssign::BCvt: { 1933 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1934 /*TODO: Kill=*/false); 1935 assert(BC != 0 && "Failed to emit a bitcast!"); 1936 Arg = BC; 1937 ArgVT = VA.getLocVT(); 1938 break; 1939 } 1940 default: llvm_unreachable("Unknown arg promotion!"); 1941 } 1942 1943 // Now copy/store arg to correct locations. 1944 if (VA.isRegLoc() && !VA.needsCustom()) { 1945 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1946 VA.getLocReg()) 1947 .addReg(Arg); 1948 RegArgs.push_back(VA.getLocReg()); 1949 } else if (VA.needsCustom()) { 1950 // TODO: We need custom lowering for vector (v2f64) args. 1951 assert(VA.getLocVT() == MVT::f64 && 1952 "Custom lowering for v2f64 args not available"); 1953 1954 CCValAssign &NextVA = ArgLocs[++i]; 1955 1956 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1957 "We only handle register args!"); 1958 1959 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1960 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1961 .addReg(NextVA.getLocReg(), RegState::Define) 1962 .addReg(Arg)); 1963 RegArgs.push_back(VA.getLocReg()); 1964 RegArgs.push_back(NextVA.getLocReg()); 1965 } else { 1966 assert(VA.isMemLoc()); 1967 // Need to store on the stack. 1968 Address Addr; 1969 Addr.BaseType = Address::RegBase; 1970 Addr.Base.Reg = ARM::SP; 1971 Addr.Offset = VA.getLocMemOffset(); 1972 1973 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 1974 assert(EmitRet && "Could not emit a store for argument!"); 1975 } 1976 } 1977 1978 return true; 1979 } 1980 1981 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1982 const Instruction *I, CallingConv::ID CC, 1983 unsigned &NumBytes) { 1984 // Issue CALLSEQ_END 1985 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1986 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1987 TII.get(AdjStackUp)) 1988 .addImm(NumBytes).addImm(0)); 1989 1990 // Now the return value. 1991 if (RetVT != MVT::isVoid) { 1992 SmallVector<CCValAssign, 16> RVLocs; 1993 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1994 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1995 1996 // Copy all of the result registers out of their specified physreg. 1997 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1998 // For this move we copy into two registers and then move into the 1999 // double fp reg we want. 2000 EVT DestVT = RVLocs[0].getValVT(); 2001 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2002 unsigned ResultReg = createResultReg(DstRC); 2003 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2004 TII.get(ARM::VMOVDRR), ResultReg) 2005 .addReg(RVLocs[0].getLocReg()) 2006 .addReg(RVLocs[1].getLocReg())); 2007 2008 UsedRegs.push_back(RVLocs[0].getLocReg()); 2009 UsedRegs.push_back(RVLocs[1].getLocReg()); 2010 2011 // Finally update the result. 2012 UpdateValueMap(I, ResultReg); 2013 } else { 2014 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2015 EVT CopyVT = RVLocs[0].getValVT(); 2016 2017 // Special handling for extended integers. 2018 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2019 CopyVT = MVT::i32; 2020 2021 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2022 2023 unsigned ResultReg = createResultReg(DstRC); 2024 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2025 ResultReg).addReg(RVLocs[0].getLocReg()); 2026 UsedRegs.push_back(RVLocs[0].getLocReg()); 2027 2028 // Finally update the result. 2029 UpdateValueMap(I, ResultReg); 2030 } 2031 } 2032 2033 return true; 2034 } 2035 2036 bool ARMFastISel::SelectRet(const Instruction *I) { 2037 const ReturnInst *Ret = cast<ReturnInst>(I); 2038 const Function &F = *I->getParent()->getParent(); 2039 2040 if (!FuncInfo.CanLowerReturn) 2041 return false; 2042 2043 if (F.isVarArg()) 2044 return false; 2045 2046 CallingConv::ID CC = F.getCallingConv(); 2047 if (Ret->getNumOperands() > 0) { 2048 SmallVector<ISD::OutputArg, 4> Outs; 2049 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2050 Outs, TLI); 2051 2052 // Analyze operands of the call, assigning locations to each operand. 2053 SmallVector<CCValAssign, 16> ValLocs; 2054 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2055 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 2056 2057 const Value *RV = Ret->getOperand(0); 2058 unsigned Reg = getRegForValue(RV); 2059 if (Reg == 0) 2060 return false; 2061 2062 // Only handle a single return value for now. 2063 if (ValLocs.size() != 1) 2064 return false; 2065 2066 CCValAssign &VA = ValLocs[0]; 2067 2068 // Don't bother handling odd stuff for now. 2069 if (VA.getLocInfo() != CCValAssign::Full) 2070 return false; 2071 // Only handle register returns for now. 2072 if (!VA.isRegLoc()) 2073 return false; 2074 2075 unsigned SrcReg = Reg + VA.getValNo(); 2076 EVT RVVT = TLI.getValueType(RV->getType()); 2077 EVT DestVT = VA.getValVT(); 2078 // Special handling for extended integers. 2079 if (RVVT != DestVT) { 2080 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2081 return false; 2082 2083 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2084 2085 // Perform extension if flagged as either zext or sext. Otherwise, do 2086 // nothing. 2087 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2088 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2089 if (SrcReg == 0) return false; 2090 } 2091 } 2092 2093 // Make the copy. 2094 unsigned DstReg = VA.getLocReg(); 2095 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2096 // Avoid a cross-class copy. This is very unlikely. 2097 if (!SrcRC->contains(DstReg)) 2098 return false; 2099 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2100 DstReg).addReg(SrcReg); 2101 2102 // Mark the register as live out of the function. 2103 MRI.addLiveOut(VA.getLocReg()); 2104 } 2105 2106 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2107 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2108 TII.get(RetOpc))); 2109 return true; 2110 } 2111 2112 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2113 if (UseReg) 2114 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2115 else 2116 return isThumb2 ? ARM::tBL : ARM::BL; 2117 } 2118 2119 unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2120 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2121 GlobalValue::ExternalLinkage, 0, Name); 2122 return ARMMaterializeGV(GV, TLI.getValueType(GV->getType())); 2123 } 2124 2125 // A quick function that will emit a call for a named libcall in F with the 2126 // vector of passed arguments for the Instruction in I. We can assume that we 2127 // can emit a call for any libcall we can produce. This is an abridged version 2128 // of the full call infrastructure since we won't need to worry about things 2129 // like computed function pointers or strange arguments at call sites. 2130 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2131 // with X86. 2132 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2133 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2134 2135 // Handle *simple* calls for now. 2136 Type *RetTy = I->getType(); 2137 MVT RetVT; 2138 if (RetTy->isVoidTy()) 2139 RetVT = MVT::isVoid; 2140 else if (!isTypeLegal(RetTy, RetVT)) 2141 return false; 2142 2143 // Can't handle non-double multi-reg retvals. 2144 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2145 SmallVector<CCValAssign, 16> RVLocs; 2146 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2147 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 2148 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2149 return false; 2150 } 2151 2152 // Set up the argument vectors. 2153 SmallVector<Value*, 8> Args; 2154 SmallVector<unsigned, 8> ArgRegs; 2155 SmallVector<MVT, 8> ArgVTs; 2156 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2157 Args.reserve(I->getNumOperands()); 2158 ArgRegs.reserve(I->getNumOperands()); 2159 ArgVTs.reserve(I->getNumOperands()); 2160 ArgFlags.reserve(I->getNumOperands()); 2161 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2162 Value *Op = I->getOperand(i); 2163 unsigned Arg = getRegForValue(Op); 2164 if (Arg == 0) return false; 2165 2166 Type *ArgTy = Op->getType(); 2167 MVT ArgVT; 2168 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2169 2170 ISD::ArgFlagsTy Flags; 2171 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2172 Flags.setOrigAlign(OriginalAlignment); 2173 2174 Args.push_back(Op); 2175 ArgRegs.push_back(Arg); 2176 ArgVTs.push_back(ArgVT); 2177 ArgFlags.push_back(Flags); 2178 } 2179 2180 // Handle the arguments now that we've gotten them. 2181 SmallVector<unsigned, 4> RegArgs; 2182 unsigned NumBytes; 2183 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2184 return false; 2185 2186 unsigned CalleeReg = 0; 2187 if (EnableARMLongCalls) { 2188 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2189 if (CalleeReg == 0) return false; 2190 } 2191 2192 // Issue the call. 2193 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2194 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2195 DL, TII.get(CallOpc)); 2196 if (isThumb2) { 2197 // Explicitly adding the predicate here. 2198 AddDefaultPred(MIB); 2199 if (EnableARMLongCalls) 2200 MIB.addReg(CalleeReg); 2201 else 2202 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2203 } else { 2204 if (EnableARMLongCalls) 2205 MIB.addReg(CalleeReg); 2206 else 2207 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2208 2209 // Explicitly adding the predicate here. 2210 AddDefaultPred(MIB); 2211 } 2212 // Add implicit physical register uses to the call. 2213 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2214 MIB.addReg(RegArgs[i]); 2215 2216 // Add a register mask with the call-preserved registers. 2217 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2218 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2219 2220 // Finish off the call including any return values. 2221 SmallVector<unsigned, 4> UsedRegs; 2222 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2223 2224 // Set all unused physreg defs as dead. 2225 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2226 2227 return true; 2228 } 2229 2230 bool ARMFastISel::SelectCall(const Instruction *I, 2231 const char *IntrMemName = 0) { 2232 const CallInst *CI = cast<CallInst>(I); 2233 const Value *Callee = CI->getCalledValue(); 2234 2235 // Can't handle inline asm. 2236 if (isa<InlineAsm>(Callee)) return false; 2237 2238 // Check the calling convention. 2239 ImmutableCallSite CS(CI); 2240 CallingConv::ID CC = CS.getCallingConv(); 2241 2242 // TODO: Avoid some calling conventions? 2243 2244 // Let SDISel handle vararg functions. 2245 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2246 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2247 if (FTy->isVarArg()) 2248 return false; 2249 2250 // Handle *simple* calls for now. 2251 Type *RetTy = I->getType(); 2252 MVT RetVT; 2253 if (RetTy->isVoidTy()) 2254 RetVT = MVT::isVoid; 2255 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2256 RetVT != MVT::i8 && RetVT != MVT::i1) 2257 return false; 2258 2259 // Can't handle non-double multi-reg retvals. 2260 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2261 RetVT != MVT::i16 && RetVT != MVT::i32) { 2262 SmallVector<CCValAssign, 16> RVLocs; 2263 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2264 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 2265 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2266 return false; 2267 } 2268 2269 // Set up the argument vectors. 2270 SmallVector<Value*, 8> Args; 2271 SmallVector<unsigned, 8> ArgRegs; 2272 SmallVector<MVT, 8> ArgVTs; 2273 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2274 unsigned arg_size = CS.arg_size(); 2275 Args.reserve(arg_size); 2276 ArgRegs.reserve(arg_size); 2277 ArgVTs.reserve(arg_size); 2278 ArgFlags.reserve(arg_size); 2279 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2280 i != e; ++i) { 2281 // If we're lowering a memory intrinsic instead of a regular call, skip the 2282 // last two arguments, which shouldn't be passed to the underlying function. 2283 if (IntrMemName && e-i <= 2) 2284 break; 2285 2286 ISD::ArgFlagsTy Flags; 2287 unsigned AttrInd = i - CS.arg_begin() + 1; 2288 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2289 Flags.setSExt(); 2290 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2291 Flags.setZExt(); 2292 2293 // FIXME: Only handle *easy* calls for now. 2294 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2295 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2296 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2297 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2298 return false; 2299 2300 Type *ArgTy = (*i)->getType(); 2301 MVT ArgVT; 2302 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2303 ArgVT != MVT::i1) 2304 return false; 2305 2306 unsigned Arg = getRegForValue(*i); 2307 if (Arg == 0) 2308 return false; 2309 2310 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2311 Flags.setOrigAlign(OriginalAlignment); 2312 2313 Args.push_back(*i); 2314 ArgRegs.push_back(Arg); 2315 ArgVTs.push_back(ArgVT); 2316 ArgFlags.push_back(Flags); 2317 } 2318 2319 // Handle the arguments now that we've gotten them. 2320 SmallVector<unsigned, 4> RegArgs; 2321 unsigned NumBytes; 2322 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2323 return false; 2324 2325 bool UseReg = false; 2326 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2327 if (!GV || EnableARMLongCalls) UseReg = true; 2328 2329 unsigned CalleeReg = 0; 2330 if (UseReg) { 2331 if (IntrMemName) 2332 CalleeReg = getLibcallReg(IntrMemName); 2333 else 2334 CalleeReg = getRegForValue(Callee); 2335 2336 if (CalleeReg == 0) return false; 2337 } 2338 2339 // Issue the call. 2340 unsigned CallOpc = ARMSelectCallOp(UseReg); 2341 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2342 DL, TII.get(CallOpc)); 2343 if(isThumb2) { 2344 // Explicitly adding the predicate here. 2345 AddDefaultPred(MIB); 2346 if (UseReg) 2347 MIB.addReg(CalleeReg); 2348 else if (!IntrMemName) 2349 MIB.addGlobalAddress(GV, 0, 0); 2350 else 2351 MIB.addExternalSymbol(IntrMemName, 0); 2352 } else { 2353 if (UseReg) 2354 MIB.addReg(CalleeReg); 2355 else if (!IntrMemName) 2356 MIB.addGlobalAddress(GV, 0, 0); 2357 else 2358 MIB.addExternalSymbol(IntrMemName, 0); 2359 2360 // Explicitly adding the predicate here. 2361 AddDefaultPred(MIB); 2362 } 2363 2364 // Add implicit physical register uses to the call. 2365 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2366 MIB.addReg(RegArgs[i]); 2367 2368 // Add a register mask with the call-preserved registers. 2369 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2370 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2371 2372 // Finish off the call including any return values. 2373 SmallVector<unsigned, 4> UsedRegs; 2374 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2375 2376 // Set all unused physreg defs as dead. 2377 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2378 2379 return true; 2380 } 2381 2382 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2383 return Len <= 16; 2384 } 2385 2386 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2387 uint64_t Len) { 2388 // Make sure we don't bloat code by inlining very large memcpy's. 2389 if (!ARMIsMemCpySmall(Len)) 2390 return false; 2391 2392 // We don't care about alignment here since we just emit integer accesses. 2393 while (Len) { 2394 MVT VT; 2395 if (Len >= 4) 2396 VT = MVT::i32; 2397 else if (Len >= 2) 2398 VT = MVT::i16; 2399 else { 2400 assert(Len == 1); 2401 VT = MVT::i8; 2402 } 2403 2404 bool RV; 2405 unsigned ResultReg; 2406 RV = ARMEmitLoad(VT, ResultReg, Src); 2407 assert (RV == true && "Should be able to handle this load."); 2408 RV = ARMEmitStore(VT, ResultReg, Dest); 2409 assert (RV == true && "Should be able to handle this store."); 2410 (void)RV; 2411 2412 unsigned Size = VT.getSizeInBits()/8; 2413 Len -= Size; 2414 Dest.Offset += Size; 2415 Src.Offset += Size; 2416 } 2417 2418 return true; 2419 } 2420 2421 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2422 // FIXME: Handle more intrinsics. 2423 switch (I.getIntrinsicID()) { 2424 default: return false; 2425 case Intrinsic::frameaddress: { 2426 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2427 MFI->setFrameAddressIsTaken(true); 2428 2429 unsigned LdrOpc; 2430 const TargetRegisterClass *RC; 2431 if (isThumb2) { 2432 LdrOpc = ARM::t2LDRi12; 2433 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2434 } else { 2435 LdrOpc = ARM::LDRi12; 2436 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2437 } 2438 2439 const ARMBaseRegisterInfo *RegInfo = 2440 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2441 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2442 unsigned SrcReg = FramePtr; 2443 2444 // Recursively load frame address 2445 // ldr r0 [fp] 2446 // ldr r0 [r0] 2447 // ldr r0 [r0] 2448 // ... 2449 unsigned DestReg; 2450 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2451 while (Depth--) { 2452 DestReg = createResultReg(RC); 2453 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2454 TII.get(LdrOpc), DestReg) 2455 .addReg(SrcReg).addImm(0)); 2456 SrcReg = DestReg; 2457 } 2458 UpdateValueMap(&I, SrcReg); 2459 return true; 2460 } 2461 case Intrinsic::memcpy: 2462 case Intrinsic::memmove: { 2463 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2464 // Don't handle volatile. 2465 if (MTI.isVolatile()) 2466 return false; 2467 2468 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2469 // we would emit dead code because we don't currently handle memmoves. 2470 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2471 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2472 // Small memcpy's are common enough that we want to do them without a call 2473 // if possible. 2474 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2475 if (ARMIsMemCpySmall(Len)) { 2476 Address Dest, Src; 2477 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2478 !ARMComputeAddress(MTI.getRawSource(), Src)) 2479 return false; 2480 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2481 return true; 2482 } 2483 } 2484 2485 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2486 return false; 2487 2488 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2489 return false; 2490 2491 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2492 return SelectCall(&I, IntrMemName); 2493 } 2494 case Intrinsic::memset: { 2495 const MemSetInst &MSI = cast<MemSetInst>(I); 2496 // Don't handle volatile. 2497 if (MSI.isVolatile()) 2498 return false; 2499 2500 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2501 return false; 2502 2503 if (MSI.getDestAddressSpace() > 255) 2504 return false; 2505 2506 return SelectCall(&I, "memset"); 2507 } 2508 case Intrinsic::trap: { 2509 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP)); 2510 return true; 2511 } 2512 } 2513 } 2514 2515 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2516 // The high bits for a type smaller than the register size are assumed to be 2517 // undefined. 2518 Value *Op = I->getOperand(0); 2519 2520 EVT SrcVT, DestVT; 2521 SrcVT = TLI.getValueType(Op->getType(), true); 2522 DestVT = TLI.getValueType(I->getType(), true); 2523 2524 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2525 return false; 2526 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2527 return false; 2528 2529 unsigned SrcReg = getRegForValue(Op); 2530 if (!SrcReg) return false; 2531 2532 // Because the high bits are undefined, a truncate doesn't generate 2533 // any code. 2534 UpdateValueMap(I, SrcReg); 2535 return true; 2536 } 2537 2538 unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2539 bool isZExt) { 2540 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2541 return 0; 2542 2543 unsigned Opc; 2544 bool isBoolZext = false; 2545 if (!SrcVT.isSimple()) return 0; 2546 switch (SrcVT.getSimpleVT().SimpleTy) { 2547 default: return 0; 2548 case MVT::i16: 2549 if (!Subtarget->hasV6Ops()) return 0; 2550 if (isZExt) 2551 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2552 else 2553 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2554 break; 2555 case MVT::i8: 2556 if (!Subtarget->hasV6Ops()) return 0; 2557 if (isZExt) 2558 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2559 else 2560 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2561 break; 2562 case MVT::i1: 2563 if (isZExt) { 2564 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2565 isBoolZext = true; 2566 break; 2567 } 2568 return 0; 2569 } 2570 2571 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2572 MachineInstrBuilder MIB; 2573 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2574 .addReg(SrcReg); 2575 if (isBoolZext) 2576 MIB.addImm(1); 2577 else 2578 MIB.addImm(0); 2579 AddOptionalDefs(MIB); 2580 return ResultReg; 2581 } 2582 2583 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2584 // On ARM, in general, integer casts don't involve legal types; this code 2585 // handles promotable integers. 2586 Type *DestTy = I->getType(); 2587 Value *Src = I->getOperand(0); 2588 Type *SrcTy = Src->getType(); 2589 2590 EVT SrcVT, DestVT; 2591 SrcVT = TLI.getValueType(SrcTy, true); 2592 DestVT = TLI.getValueType(DestTy, true); 2593 2594 bool isZExt = isa<ZExtInst>(I); 2595 unsigned SrcReg = getRegForValue(Src); 2596 if (!SrcReg) return false; 2597 2598 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2599 if (ResultReg == 0) return false; 2600 UpdateValueMap(I, ResultReg); 2601 return true; 2602 } 2603 2604 // TODO: SoftFP support. 2605 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2606 2607 switch (I->getOpcode()) { 2608 case Instruction::Load: 2609 return SelectLoad(I); 2610 case Instruction::Store: 2611 return SelectStore(I); 2612 case Instruction::Br: 2613 return SelectBranch(I); 2614 case Instruction::IndirectBr: 2615 return SelectIndirectBr(I); 2616 case Instruction::ICmp: 2617 case Instruction::FCmp: 2618 return SelectCmp(I); 2619 case Instruction::FPExt: 2620 return SelectFPExt(I); 2621 case Instruction::FPTrunc: 2622 return SelectFPTrunc(I); 2623 case Instruction::SIToFP: 2624 return SelectIToFP(I, /*isSigned*/ true); 2625 case Instruction::UIToFP: 2626 return SelectIToFP(I, /*isSigned*/ false); 2627 case Instruction::FPToSI: 2628 return SelectFPToI(I, /*isSigned*/ true); 2629 case Instruction::FPToUI: 2630 return SelectFPToI(I, /*isSigned*/ false); 2631 case Instruction::Add: 2632 return SelectBinaryIntOp(I, ISD::ADD); 2633 case Instruction::Or: 2634 return SelectBinaryIntOp(I, ISD::OR); 2635 case Instruction::Sub: 2636 return SelectBinaryIntOp(I, ISD::SUB); 2637 case Instruction::FAdd: 2638 return SelectBinaryFPOp(I, ISD::FADD); 2639 case Instruction::FSub: 2640 return SelectBinaryFPOp(I, ISD::FSUB); 2641 case Instruction::FMul: 2642 return SelectBinaryFPOp(I, ISD::FMUL); 2643 case Instruction::SDiv: 2644 return SelectDiv(I, /*isSigned*/ true); 2645 case Instruction::UDiv: 2646 return SelectDiv(I, /*isSigned*/ false); 2647 case Instruction::SRem: 2648 return SelectRem(I, /*isSigned*/ true); 2649 case Instruction::URem: 2650 return SelectRem(I, /*isSigned*/ false); 2651 case Instruction::Call: 2652 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2653 return SelectIntrinsicCall(*II); 2654 return SelectCall(I); 2655 case Instruction::Select: 2656 return SelectSelect(I); 2657 case Instruction::Ret: 2658 return SelectRet(I); 2659 case Instruction::Trunc: 2660 return SelectTrunc(I); 2661 case Instruction::ZExt: 2662 case Instruction::SExt: 2663 return SelectIntExt(I); 2664 default: break; 2665 } 2666 return false; 2667 } 2668 2669 /// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2670 /// vreg is being provided by the specified load instruction. If possible, 2671 /// try to fold the load as an operand to the instruction, returning true if 2672 /// successful. 2673 bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2674 const LoadInst *LI) { 2675 // Verify we have a legal type before going any further. 2676 MVT VT; 2677 if (!isLoadTypeLegal(LI->getType(), VT)) 2678 return false; 2679 2680 // Combine load followed by zero- or sign-extend. 2681 // ldrb r1, [r0] ldrb r1, [r0] 2682 // uxtb r2, r1 => 2683 // mov r3, r2 mov r3, r1 2684 bool isZExt = true; 2685 switch(MI->getOpcode()) { 2686 default: return false; 2687 case ARM::SXTH: 2688 case ARM::t2SXTH: 2689 isZExt = false; 2690 case ARM::UXTH: 2691 case ARM::t2UXTH: 2692 if (VT != MVT::i16) 2693 return false; 2694 break; 2695 case ARM::SXTB: 2696 case ARM::t2SXTB: 2697 isZExt = false; 2698 case ARM::UXTB: 2699 case ARM::t2UXTB: 2700 if (VT != MVT::i8) 2701 return false; 2702 break; 2703 } 2704 // See if we can handle this address. 2705 Address Addr; 2706 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2707 2708 unsigned ResultReg = MI->getOperand(0).getReg(); 2709 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2710 return false; 2711 MI->eraseFromParent(); 2712 return true; 2713 } 2714 2715 namespace llvm { 2716 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2717 // Completely untested on non-iOS. 2718 const TargetMachine &TM = funcInfo.MF->getTarget(); 2719 2720 // Darwin and thumb1 only for now. 2721 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2722 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) 2723 return new ARMFastISel(funcInfo); 2724 return 0; 2725 } 2726 } 2727