1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseInstrInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMConstantPoolValue.h" 20 #include "ARMSubtarget.h" 21 #include "ARMTargetMachine.h" 22 #include "MCTargetDesc/ARMAddressingModes.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/CodeGen/Analysis.h" 25 #include "llvm/CodeGen/FastISel.h" 26 #include "llvm/CodeGen/FunctionLoweringInfo.h" 27 #include "llvm/CodeGen/MachineConstantPool.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineMemOperand.h" 31 #include "llvm/CodeGen/MachineModuleInfo.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/IR/CallingConv.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/GlobalVariable.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/IR/Module.h" 40 #include "llvm/IR/Operator.h" 41 #include "llvm/Support/CallSite.h" 42 #include "llvm/Support/CommandLine.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/GetElementPtrTypeIterator.h" 45 #include "llvm/Target/TargetInstrInfo.h" 46 #include "llvm/Target/TargetLowering.h" 47 #include "llvm/Target/TargetMachine.h" 48 #include "llvm/Target/TargetOptions.h" 49 using namespace llvm; 50 51 extern cl::opt<bool> EnableARMLongCalls; 52 53 namespace { 54 55 // All possible address modes, plus some. 56 typedef struct Address { 57 enum { 58 RegBase, 59 FrameIndexBase 60 } BaseType; 61 62 union { 63 unsigned Reg; 64 int FI; 65 } Base; 66 67 int Offset; 68 69 // Innocuous defaults for our address. 70 Address() 71 : BaseType(RegBase), Offset(0) { 72 Base.Reg = 0; 73 } 74 } Address; 75 76 class ARMFastISel : public FastISel { 77 78 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 79 /// make the right decision when generating code for different targets. 80 const ARMSubtarget *Subtarget; 81 const TargetMachine &TM; 82 const TargetInstrInfo &TII; 83 const TargetLowering &TLI; 84 ARMFunctionInfo *AFI; 85 86 // Convenience variables to avoid some queries. 87 bool isThumb2; 88 LLVMContext *Context; 89 90 public: 91 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 92 const TargetLibraryInfo *libInfo) 93 : FastISel(funcInfo, libInfo), 94 TM(funcInfo.MF->getTarget()), 95 TII(*TM.getInstrInfo()), 96 TLI(*TM.getTargetLowering()) { 97 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 98 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 99 isThumb2 = AFI->isThumbFunction(); 100 Context = &funcInfo.Fn->getContext(); 101 } 102 103 // Code from FastISel.cpp. 104 private: 105 unsigned FastEmitInst_(unsigned MachineInstOpcode, 106 const TargetRegisterClass *RC); 107 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC, 109 unsigned Op0, bool Op0IsKill); 110 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 111 const TargetRegisterClass *RC, 112 unsigned Op0, bool Op0IsKill, 113 unsigned Op1, bool Op1IsKill); 114 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 115 const TargetRegisterClass *RC, 116 unsigned Op0, bool Op0IsKill, 117 unsigned Op1, bool Op1IsKill, 118 unsigned Op2, bool Op2IsKill); 119 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 120 const TargetRegisterClass *RC, 121 unsigned Op0, bool Op0IsKill, 122 uint64_t Imm); 123 unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 124 const TargetRegisterClass *RC, 125 unsigned Op0, bool Op0IsKill, 126 const ConstantFP *FPImm); 127 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 128 const TargetRegisterClass *RC, 129 unsigned Op0, bool Op0IsKill, 130 unsigned Op1, bool Op1IsKill, 131 uint64_t Imm); 132 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 133 const TargetRegisterClass *RC, 134 uint64_t Imm); 135 unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 136 const TargetRegisterClass *RC, 137 uint64_t Imm1, uint64_t Imm2); 138 139 unsigned FastEmitInst_extractsubreg(MVT RetVT, 140 unsigned Op0, bool Op0IsKill, 141 uint32_t Idx); 142 143 // Backend specific FastISel code. 144 private: 145 virtual bool TargetSelectInstruction(const Instruction *I); 146 virtual unsigned TargetMaterializeConstant(const Constant *C); 147 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 148 virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 149 const LoadInst *LI); 150 virtual bool FastLowerArguments(); 151 private: 152 #include "ARMGenFastISel.inc" 153 154 // Instruction selection routines. 155 private: 156 bool SelectLoad(const Instruction *I); 157 bool SelectStore(const Instruction *I); 158 bool SelectBranch(const Instruction *I); 159 bool SelectIndirectBr(const Instruction *I); 160 bool SelectCmp(const Instruction *I); 161 bool SelectFPExt(const Instruction *I); 162 bool SelectFPTrunc(const Instruction *I); 163 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 164 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 165 bool SelectIToFP(const Instruction *I, bool isSigned); 166 bool SelectFPToI(const Instruction *I, bool isSigned); 167 bool SelectDiv(const Instruction *I, bool isSigned); 168 bool SelectRem(const Instruction *I, bool isSigned); 169 bool SelectCall(const Instruction *I, const char *IntrMemName); 170 bool SelectIntrinsicCall(const IntrinsicInst &I); 171 bool SelectSelect(const Instruction *I); 172 bool SelectRet(const Instruction *I); 173 bool SelectTrunc(const Instruction *I); 174 bool SelectIntExt(const Instruction *I); 175 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 176 177 // Utility routines. 178 private: 179 unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned OpNum, 180 unsigned Op); 181 bool isTypeLegal(Type *Ty, MVT &VT); 182 bool isLoadTypeLegal(Type *Ty, MVT &VT); 183 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 184 bool isZExt); 185 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 186 unsigned Alignment = 0, bool isZExt = true, 187 bool allocReg = true); 188 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 189 unsigned Alignment = 0); 190 bool ARMComputeAddress(const Value *Obj, Address &Addr); 191 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 192 bool ARMIsMemCpySmall(uint64_t Len); 193 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 194 unsigned Alignment); 195 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 196 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 197 unsigned ARMMaterializeInt(const Constant *C, MVT VT); 198 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 199 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 200 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 201 unsigned ARMSelectCallOp(bool UseReg); 202 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 203 204 // Call handling routines. 205 private: 206 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 207 bool Return, 208 bool isVarArg); 209 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 210 SmallVectorImpl<unsigned> &ArgRegs, 211 SmallVectorImpl<MVT> &ArgVTs, 212 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 213 SmallVectorImpl<unsigned> &RegArgs, 214 CallingConv::ID CC, 215 unsigned &NumBytes, 216 bool isVarArg); 217 unsigned getLibcallReg(const Twine &Name); 218 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 219 const Instruction *I, CallingConv::ID CC, 220 unsigned &NumBytes, bool isVarArg); 221 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 222 223 // OptionalDef handling routines. 224 private: 225 bool isARMNEONPred(const MachineInstr *MI); 226 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 227 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 228 void AddLoadStoreOperands(MVT VT, Address &Addr, 229 const MachineInstrBuilder &MIB, 230 unsigned Flags, bool useAM3); 231 }; 232 233 } // end anonymous namespace 234 235 #include "ARMGenCallingConv.inc" 236 237 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 238 // we don't care about implicit defs here, just places we'll need to add a 239 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 240 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 241 if (!MI->hasOptionalDef()) 242 return false; 243 244 // Look to see if our OptionalDef is defining CPSR or CCR. 245 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 246 const MachineOperand &MO = MI->getOperand(i); 247 if (!MO.isReg() || !MO.isDef()) continue; 248 if (MO.getReg() == ARM::CPSR) 249 *CPSR = true; 250 } 251 return true; 252 } 253 254 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 255 const MCInstrDesc &MCID = MI->getDesc(); 256 257 // If we're a thumb2 or not NEON function we'll be handled via isPredicable. 258 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 259 AFI->isThumb2Function()) 260 return MI->isPredicable(); 261 262 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 263 if (MCID.OpInfo[i].isPredicate()) 264 return true; 265 266 return false; 267 } 268 269 // If the machine is predicable go ahead and add the predicate operands, if 270 // it needs default CC operands add those. 271 // TODO: If we want to support thumb1 then we'll need to deal with optional 272 // CPSR defs that need to be added before the remaining operands. See s_cc_out 273 // for descriptions why. 274 const MachineInstrBuilder & 275 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 276 MachineInstr *MI = &*MIB; 277 278 // Do we use a predicate? or... 279 // Are we NEON in ARM mode and have a predicate operand? If so, I know 280 // we're not predicable but add it anyways. 281 if (isARMNEONPred(MI)) 282 AddDefaultPred(MIB); 283 284 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 285 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 286 bool CPSR = false; 287 if (DefinesOptionalPredicate(MI, &CPSR)) { 288 if (CPSR) 289 AddDefaultT1CC(MIB); 290 else 291 AddDefaultCC(MIB); 292 } 293 return MIB; 294 } 295 296 unsigned ARMFastISel::constrainOperandRegClass(const MCInstrDesc &II, 297 unsigned Op, unsigned OpNum) { 298 if (TargetRegisterInfo::isVirtualRegister(Op)) { 299 const TargetRegisterClass *RegClass = 300 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 301 if (!MRI.constrainRegClass(Op, RegClass)) { 302 // If it's not legal to COPY between the register classes, something 303 // has gone very wrong before we got here. 304 unsigned NewOp = createResultReg(RegClass); 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 306 TII.get(TargetOpcode::COPY), NewOp).addReg(Op)); 307 return NewOp; 308 } 309 } 310 return Op; 311 } 312 313 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 314 const TargetRegisterClass* RC) { 315 unsigned ResultReg = createResultReg(RC); 316 const MCInstrDesc &II = TII.get(MachineInstOpcode); 317 318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 319 return ResultReg; 320 } 321 322 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 323 const TargetRegisterClass *RC, 324 unsigned Op0, bool Op0IsKill) { 325 unsigned ResultReg = createResultReg(RC); 326 const MCInstrDesc &II = TII.get(MachineInstOpcode); 327 328 // Make sure the input operand is sufficiently constrained to be legal 329 // for this instruction. 330 Op0 = constrainOperandRegClass(II, Op0, 1); 331 if (II.getNumDefs() >= 1) { 332 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 333 .addReg(Op0, Op0IsKill * RegState::Kill)); 334 } else { 335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 336 .addReg(Op0, Op0IsKill * RegState::Kill)); 337 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 338 TII.get(TargetOpcode::COPY), ResultReg) 339 .addReg(II.ImplicitDefs[0])); 340 } 341 return ResultReg; 342 } 343 344 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 345 const TargetRegisterClass *RC, 346 unsigned Op0, bool Op0IsKill, 347 unsigned Op1, bool Op1IsKill) { 348 unsigned ResultReg = createResultReg(RC); 349 const MCInstrDesc &II = TII.get(MachineInstOpcode); 350 351 // Make sure the input operands are sufficiently constrained to be legal 352 // for this instruction. 353 Op0 = constrainOperandRegClass(II, Op0, 1); 354 Op1 = constrainOperandRegClass(II, Op1, 2); 355 356 if (II.getNumDefs() >= 1) { 357 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 358 .addReg(Op0, Op0IsKill * RegState::Kill) 359 .addReg(Op1, Op1IsKill * RegState::Kill)); 360 } else { 361 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 362 .addReg(Op0, Op0IsKill * RegState::Kill) 363 .addReg(Op1, Op1IsKill * RegState::Kill)); 364 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 365 TII.get(TargetOpcode::COPY), ResultReg) 366 .addReg(II.ImplicitDefs[0])); 367 } 368 return ResultReg; 369 } 370 371 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 372 const TargetRegisterClass *RC, 373 unsigned Op0, bool Op0IsKill, 374 unsigned Op1, bool Op1IsKill, 375 unsigned Op2, bool Op2IsKill) { 376 unsigned ResultReg = createResultReg(RC); 377 const MCInstrDesc &II = TII.get(MachineInstOpcode); 378 379 // Make sure the input operands are sufficiently constrained to be legal 380 // for this instruction. 381 Op0 = constrainOperandRegClass(II, Op0, 1); 382 Op1 = constrainOperandRegClass(II, Op1, 2); 383 Op2 = constrainOperandRegClass(II, Op1, 3); 384 385 if (II.getNumDefs() >= 1) { 386 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 387 .addReg(Op0, Op0IsKill * RegState::Kill) 388 .addReg(Op1, Op1IsKill * RegState::Kill) 389 .addReg(Op2, Op2IsKill * RegState::Kill)); 390 } else { 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 392 .addReg(Op0, Op0IsKill * RegState::Kill) 393 .addReg(Op1, Op1IsKill * RegState::Kill) 394 .addReg(Op2, Op2IsKill * RegState::Kill)); 395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 396 TII.get(TargetOpcode::COPY), ResultReg) 397 .addReg(II.ImplicitDefs[0])); 398 } 399 return ResultReg; 400 } 401 402 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 403 const TargetRegisterClass *RC, 404 unsigned Op0, bool Op0IsKill, 405 uint64_t Imm) { 406 unsigned ResultReg = createResultReg(RC); 407 const MCInstrDesc &II = TII.get(MachineInstOpcode); 408 409 // Make sure the input operand is sufficiently constrained to be legal 410 // for this instruction. 411 Op0 = constrainOperandRegClass(II, Op0, 1); 412 if (II.getNumDefs() >= 1) { 413 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 414 .addReg(Op0, Op0IsKill * RegState::Kill) 415 .addImm(Imm)); 416 } else { 417 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 418 .addReg(Op0, Op0IsKill * RegState::Kill) 419 .addImm(Imm)); 420 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 421 TII.get(TargetOpcode::COPY), ResultReg) 422 .addReg(II.ImplicitDefs[0])); 423 } 424 return ResultReg; 425 } 426 427 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 428 const TargetRegisterClass *RC, 429 unsigned Op0, bool Op0IsKill, 430 const ConstantFP *FPImm) { 431 unsigned ResultReg = createResultReg(RC); 432 const MCInstrDesc &II = TII.get(MachineInstOpcode); 433 434 // Make sure the input operand is sufficiently constrained to be legal 435 // for this instruction. 436 Op0 = constrainOperandRegClass(II, Op0, 1); 437 if (II.getNumDefs() >= 1) { 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 439 .addReg(Op0, Op0IsKill * RegState::Kill) 440 .addFPImm(FPImm)); 441 } else { 442 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 443 .addReg(Op0, Op0IsKill * RegState::Kill) 444 .addFPImm(FPImm)); 445 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 446 TII.get(TargetOpcode::COPY), ResultReg) 447 .addReg(II.ImplicitDefs[0])); 448 } 449 return ResultReg; 450 } 451 452 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 453 const TargetRegisterClass *RC, 454 unsigned Op0, bool Op0IsKill, 455 unsigned Op1, bool Op1IsKill, 456 uint64_t Imm) { 457 unsigned ResultReg = createResultReg(RC); 458 const MCInstrDesc &II = TII.get(MachineInstOpcode); 459 460 // Make sure the input operands are sufficiently constrained to be legal 461 // for this instruction. 462 Op0 = constrainOperandRegClass(II, Op0, 1); 463 Op1 = constrainOperandRegClass(II, Op1, 2); 464 if (II.getNumDefs() >= 1) { 465 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 466 .addReg(Op0, Op0IsKill * RegState::Kill) 467 .addReg(Op1, Op1IsKill * RegState::Kill) 468 .addImm(Imm)); 469 } else { 470 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 471 .addReg(Op0, Op0IsKill * RegState::Kill) 472 .addReg(Op1, Op1IsKill * RegState::Kill) 473 .addImm(Imm)); 474 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 475 TII.get(TargetOpcode::COPY), ResultReg) 476 .addReg(II.ImplicitDefs[0])); 477 } 478 return ResultReg; 479 } 480 481 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 482 const TargetRegisterClass *RC, 483 uint64_t Imm) { 484 unsigned ResultReg = createResultReg(RC); 485 const MCInstrDesc &II = TII.get(MachineInstOpcode); 486 487 if (II.getNumDefs() >= 1) { 488 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 489 .addImm(Imm)); 490 } else { 491 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 492 .addImm(Imm)); 493 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 494 TII.get(TargetOpcode::COPY), ResultReg) 495 .addReg(II.ImplicitDefs[0])); 496 } 497 return ResultReg; 498 } 499 500 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 501 const TargetRegisterClass *RC, 502 uint64_t Imm1, uint64_t Imm2) { 503 unsigned ResultReg = createResultReg(RC); 504 const MCInstrDesc &II = TII.get(MachineInstOpcode); 505 506 if (II.getNumDefs() >= 1) { 507 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 508 .addImm(Imm1).addImm(Imm2)); 509 } else { 510 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 511 .addImm(Imm1).addImm(Imm2)); 512 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 513 TII.get(TargetOpcode::COPY), 514 ResultReg) 515 .addReg(II.ImplicitDefs[0])); 516 } 517 return ResultReg; 518 } 519 520 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 521 unsigned Op0, bool Op0IsKill, 522 uint32_t Idx) { 523 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 524 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 525 "Cannot yet extract from physregs"); 526 527 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 528 DL, TII.get(TargetOpcode::COPY), ResultReg) 529 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 530 return ResultReg; 531 } 532 533 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 534 // checks from the various callers. 535 unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 536 if (VT == MVT::f64) return 0; 537 538 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 539 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 540 TII.get(ARM::VMOVSR), MoveReg) 541 .addReg(SrcReg)); 542 return MoveReg; 543 } 544 545 unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 546 if (VT == MVT::i64) return 0; 547 548 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 549 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 550 TII.get(ARM::VMOVRS), MoveReg) 551 .addReg(SrcReg)); 552 return MoveReg; 553 } 554 555 // For double width floating point we need to materialize two constants 556 // (the high and the low) into integer registers then use a move to get 557 // the combined constant into an FP reg. 558 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 559 const APFloat Val = CFP->getValueAPF(); 560 bool is64bit = VT == MVT::f64; 561 562 // This checks to see if we can use VFP3 instructions to materialize 563 // a constant, otherwise we have to go through the constant pool. 564 if (TLI.isFPImmLegal(Val, VT)) { 565 int Imm; 566 unsigned Opc; 567 if (is64bit) { 568 Imm = ARM_AM::getFP64Imm(Val); 569 Opc = ARM::FCONSTD; 570 } else { 571 Imm = ARM_AM::getFP32Imm(Val); 572 Opc = ARM::FCONSTS; 573 } 574 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 575 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 576 DestReg) 577 .addImm(Imm)); 578 return DestReg; 579 } 580 581 // Require VFP2 for loading fp constants. 582 if (!Subtarget->hasVFP2()) return false; 583 584 // MachineConstantPool wants an explicit alignment. 585 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 586 if (Align == 0) { 587 // TODO: Figure out if this is correct. 588 Align = TD.getTypeAllocSize(CFP->getType()); 589 } 590 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 591 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 592 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 593 594 // The extra reg is for addrmode5. 595 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 596 DestReg) 597 .addConstantPoolIndex(Idx) 598 .addReg(0)); 599 return DestReg; 600 } 601 602 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 603 604 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 605 return false; 606 607 // If we can do this in a single instruction without a constant pool entry 608 // do so now. 609 const ConstantInt *CI = cast<ConstantInt>(C); 610 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 611 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 612 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 613 &ARM::GPRRegClass; 614 unsigned ImmReg = createResultReg(RC); 615 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 616 TII.get(Opc), ImmReg) 617 .addImm(CI->getZExtValue())); 618 return ImmReg; 619 } 620 621 // Use MVN to emit negative constants. 622 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 623 unsigned Imm = (unsigned)~(CI->getSExtValue()); 624 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 625 (ARM_AM::getSOImmVal(Imm) != -1); 626 if (UseImm) { 627 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 628 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 629 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 630 TII.get(Opc), ImmReg) 631 .addImm(Imm)); 632 return ImmReg; 633 } 634 } 635 636 // Load from constant pool. For now 32-bit only. 637 if (VT != MVT::i32) 638 return false; 639 640 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 641 642 // MachineConstantPool wants an explicit alignment. 643 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 644 if (Align == 0) { 645 // TODO: Figure out if this is correct. 646 Align = TD.getTypeAllocSize(C->getType()); 647 } 648 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 649 650 if (isThumb2) 651 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 652 TII.get(ARM::t2LDRpci), DestReg) 653 .addConstantPoolIndex(Idx)); 654 else 655 // The extra immediate is for addrmode2. 656 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 657 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 658 TII.get(ARM::LDRcp), DestReg) 659 .addConstantPoolIndex(Idx) 660 .addImm(0)); 661 662 return DestReg; 663 } 664 665 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 666 // For now 32-bit only. 667 if (VT != MVT::i32) return 0; 668 669 Reloc::Model RelocM = TM.getRelocationModel(); 670 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); 671 const TargetRegisterClass *RC = isThumb2 ? 672 (const TargetRegisterClass*)&ARM::rGPRRegClass : 673 (const TargetRegisterClass*)&ARM::GPRRegClass; 674 unsigned DestReg = createResultReg(RC); 675 676 // FastISel TLS support on non-Darwin is broken, punt to SelectionDAG. 677 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 678 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 679 if (!Subtarget->isTargetDarwin() && IsThreadLocal) return 0; 680 681 // Use movw+movt when possible, it avoids constant pool entries. 682 // Non-darwin targets only support static movt relocations in FastISel. 683 if (Subtarget->useMovt() && 684 (Subtarget->isTargetDarwin() || RelocM == Reloc::Static)) { 685 unsigned Opc; 686 unsigned char TF = 0; 687 if (Subtarget->isTargetDarwin()) 688 TF = ARMII::MO_NONLAZY; 689 690 switch (RelocM) { 691 case Reloc::PIC_: 692 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 693 break; 694 default: 695 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 696 break; 697 } 698 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 699 DestReg).addGlobalAddress(GV, 0, TF)); 700 } else { 701 // MachineConstantPool wants an explicit alignment. 702 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 703 if (Align == 0) { 704 // TODO: Figure out if this is correct. 705 Align = TD.getTypeAllocSize(GV->getType()); 706 } 707 708 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) 709 return ARMLowerPICELF(GV, Align, VT); 710 711 // Grab index. 712 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 713 (Subtarget->isThumb() ? 4 : 8); 714 unsigned Id = AFI->createPICLabelUId(); 715 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 716 ARMCP::CPValue, 717 PCAdj); 718 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 719 720 // Load value. 721 MachineInstrBuilder MIB; 722 if (isThumb2) { 723 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 724 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 725 .addConstantPoolIndex(Idx); 726 if (RelocM == Reloc::PIC_) 727 MIB.addImm(Id); 728 AddOptionalDefs(MIB); 729 } else { 730 // The extra immediate is for addrmode2. 731 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 732 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 733 DestReg) 734 .addConstantPoolIndex(Idx) 735 .addImm(0); 736 AddOptionalDefs(MIB); 737 738 if (RelocM == Reloc::PIC_) { 739 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 740 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 741 742 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 743 DL, TII.get(Opc), NewDestReg) 744 .addReg(DestReg) 745 .addImm(Id); 746 AddOptionalDefs(MIB); 747 return NewDestReg; 748 } 749 } 750 } 751 752 if (IsIndirect) { 753 MachineInstrBuilder MIB; 754 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 755 if (isThumb2) 756 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 757 TII.get(ARM::t2LDRi12), NewDestReg) 758 .addReg(DestReg) 759 .addImm(0); 760 else 761 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 762 NewDestReg) 763 .addReg(DestReg) 764 .addImm(0); 765 DestReg = NewDestReg; 766 AddOptionalDefs(MIB); 767 } 768 769 return DestReg; 770 } 771 772 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 773 EVT CEVT = TLI.getValueType(C->getType(), true); 774 775 // Only handle simple types. 776 if (!CEVT.isSimple()) return 0; 777 MVT VT = CEVT.getSimpleVT(); 778 779 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 780 return ARMMaterializeFP(CFP, VT); 781 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 782 return ARMMaterializeGV(GV, VT); 783 else if (isa<ConstantInt>(C)) 784 return ARMMaterializeInt(C, VT); 785 786 return 0; 787 } 788 789 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 790 791 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 792 // Don't handle dynamic allocas. 793 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 794 795 MVT VT; 796 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 797 798 DenseMap<const AllocaInst*, int>::iterator SI = 799 FuncInfo.StaticAllocaMap.find(AI); 800 801 // This will get lowered later into the correct offsets and registers 802 // via rewriteXFrameIndex. 803 if (SI != FuncInfo.StaticAllocaMap.end()) { 804 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 805 unsigned ResultReg = createResultReg(RC); 806 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 807 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 808 TII.get(Opc), ResultReg) 809 .addFrameIndex(SI->second) 810 .addImm(0)); 811 return ResultReg; 812 } 813 814 return 0; 815 } 816 817 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 818 EVT evt = TLI.getValueType(Ty, true); 819 820 // Only handle simple types. 821 if (evt == MVT::Other || !evt.isSimple()) return false; 822 VT = evt.getSimpleVT(); 823 824 // Handle all legal types, i.e. a register that will directly hold this 825 // value. 826 return TLI.isTypeLegal(VT); 827 } 828 829 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 830 if (isTypeLegal(Ty, VT)) return true; 831 832 // If this is a type than can be sign or zero-extended to a basic operation 833 // go ahead and accept it now. 834 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 835 return true; 836 837 return false; 838 } 839 840 // Computes the address to get to an object. 841 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 842 // Some boilerplate from the X86 FastISel. 843 const User *U = NULL; 844 unsigned Opcode = Instruction::UserOp1; 845 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 846 // Don't walk into other basic blocks unless the object is an alloca from 847 // another block, otherwise it may not have a virtual register assigned. 848 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 849 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 850 Opcode = I->getOpcode(); 851 U = I; 852 } 853 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 854 Opcode = C->getOpcode(); 855 U = C; 856 } 857 858 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 859 if (Ty->getAddressSpace() > 255) 860 // Fast instruction selection doesn't support the special 861 // address spaces. 862 return false; 863 864 switch (Opcode) { 865 default: 866 break; 867 case Instruction::BitCast: 868 // Look through bitcasts. 869 return ARMComputeAddress(U->getOperand(0), Addr); 870 case Instruction::IntToPtr: 871 // Look past no-op inttoptrs. 872 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 873 return ARMComputeAddress(U->getOperand(0), Addr); 874 break; 875 case Instruction::PtrToInt: 876 // Look past no-op ptrtoints. 877 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 878 return ARMComputeAddress(U->getOperand(0), Addr); 879 break; 880 case Instruction::GetElementPtr: { 881 Address SavedAddr = Addr; 882 int TmpOffset = Addr.Offset; 883 884 // Iterate through the GEP folding the constants into offsets where 885 // we can. 886 gep_type_iterator GTI = gep_type_begin(U); 887 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 888 i != e; ++i, ++GTI) { 889 const Value *Op = *i; 890 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 891 const StructLayout *SL = TD.getStructLayout(STy); 892 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 893 TmpOffset += SL->getElementOffset(Idx); 894 } else { 895 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 896 for (;;) { 897 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 898 // Constant-offset addressing. 899 TmpOffset += CI->getSExtValue() * S; 900 break; 901 } 902 if (canFoldAddIntoGEP(U, Op)) { 903 // A compatible add with a constant operand. Fold the constant. 904 ConstantInt *CI = 905 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 906 TmpOffset += CI->getSExtValue() * S; 907 // Iterate on the other operand. 908 Op = cast<AddOperator>(Op)->getOperand(0); 909 continue; 910 } 911 // Unsupported 912 goto unsupported_gep; 913 } 914 } 915 } 916 917 // Try to grab the base operand now. 918 Addr.Offset = TmpOffset; 919 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 920 921 // We failed, restore everything and try the other options. 922 Addr = SavedAddr; 923 924 unsupported_gep: 925 break; 926 } 927 case Instruction::Alloca: { 928 const AllocaInst *AI = cast<AllocaInst>(Obj); 929 DenseMap<const AllocaInst*, int>::iterator SI = 930 FuncInfo.StaticAllocaMap.find(AI); 931 if (SI != FuncInfo.StaticAllocaMap.end()) { 932 Addr.BaseType = Address::FrameIndexBase; 933 Addr.Base.FI = SI->second; 934 return true; 935 } 936 break; 937 } 938 } 939 940 // Try to get this in a register if nothing else has worked. 941 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 942 return Addr.Base.Reg != 0; 943 } 944 945 void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 946 bool needsLowering = false; 947 switch (VT.SimpleTy) { 948 default: llvm_unreachable("Unhandled load/store type!"); 949 case MVT::i1: 950 case MVT::i8: 951 case MVT::i16: 952 case MVT::i32: 953 if (!useAM3) { 954 // Integer loads/stores handle 12-bit offsets. 955 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 956 // Handle negative offsets. 957 if (needsLowering && isThumb2) 958 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 959 Addr.Offset > -256); 960 } else { 961 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 962 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 963 } 964 break; 965 case MVT::f32: 966 case MVT::f64: 967 // Floating point operands handle 8-bit offsets. 968 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 969 break; 970 } 971 972 // If this is a stack pointer and the offset needs to be simplified then 973 // put the alloca address into a register, set the base type back to 974 // register and continue. This should almost never happen. 975 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 976 const TargetRegisterClass *RC = isThumb2 ? 977 (const TargetRegisterClass*)&ARM::tGPRRegClass : 978 (const TargetRegisterClass*)&ARM::GPRRegClass; 979 unsigned ResultReg = createResultReg(RC); 980 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 981 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 982 TII.get(Opc), ResultReg) 983 .addFrameIndex(Addr.Base.FI) 984 .addImm(0)); 985 Addr.Base.Reg = ResultReg; 986 Addr.BaseType = Address::RegBase; 987 } 988 989 // Since the offset is too large for the load/store instruction 990 // get the reg+offset into a register. 991 if (needsLowering) { 992 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 993 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 994 Addr.Offset = 0; 995 } 996 } 997 998 void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 999 const MachineInstrBuilder &MIB, 1000 unsigned Flags, bool useAM3) { 1001 // addrmode5 output depends on the selection dag addressing dividing the 1002 // offset by 4 that it then later multiplies. Do this here as well. 1003 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 1004 Addr.Offset /= 4; 1005 1006 // Frame base works a bit differently. Handle it separately. 1007 if (Addr.BaseType == Address::FrameIndexBase) { 1008 int FI = Addr.Base.FI; 1009 int Offset = Addr.Offset; 1010 MachineMemOperand *MMO = 1011 FuncInfo.MF->getMachineMemOperand( 1012 MachinePointerInfo::getFixedStack(FI, Offset), 1013 Flags, 1014 MFI.getObjectSize(FI), 1015 MFI.getObjectAlignment(FI)); 1016 // Now add the rest of the operands. 1017 MIB.addFrameIndex(FI); 1018 1019 // ARM halfword load/stores and signed byte loads need an additional 1020 // operand. 1021 if (useAM3) { 1022 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 1023 MIB.addReg(0); 1024 MIB.addImm(Imm); 1025 } else { 1026 MIB.addImm(Addr.Offset); 1027 } 1028 MIB.addMemOperand(MMO); 1029 } else { 1030 // Now add the rest of the operands. 1031 MIB.addReg(Addr.Base.Reg); 1032 1033 // ARM halfword load/stores and signed byte loads need an additional 1034 // operand. 1035 if (useAM3) { 1036 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 1037 MIB.addReg(0); 1038 MIB.addImm(Imm); 1039 } else { 1040 MIB.addImm(Addr.Offset); 1041 } 1042 } 1043 AddOptionalDefs(MIB); 1044 } 1045 1046 bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 1047 unsigned Alignment, bool isZExt, bool allocReg) { 1048 unsigned Opc; 1049 bool useAM3 = false; 1050 bool needVMOV = false; 1051 const TargetRegisterClass *RC; 1052 switch (VT.SimpleTy) { 1053 // This is mostly going to be Neon/vector support. 1054 default: return false; 1055 case MVT::i1: 1056 case MVT::i8: 1057 if (isThumb2) { 1058 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1059 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 1060 else 1061 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1062 } else { 1063 if (isZExt) { 1064 Opc = ARM::LDRBi12; 1065 } else { 1066 Opc = ARM::LDRSB; 1067 useAM3 = true; 1068 } 1069 } 1070 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1071 break; 1072 case MVT::i16: 1073 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1074 return false; 1075 1076 if (isThumb2) { 1077 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1078 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1079 else 1080 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1081 } else { 1082 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1083 useAM3 = true; 1084 } 1085 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1086 break; 1087 case MVT::i32: 1088 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1089 return false; 1090 1091 if (isThumb2) { 1092 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1093 Opc = ARM::t2LDRi8; 1094 else 1095 Opc = ARM::t2LDRi12; 1096 } else { 1097 Opc = ARM::LDRi12; 1098 } 1099 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1100 break; 1101 case MVT::f32: 1102 if (!Subtarget->hasVFP2()) return false; 1103 // Unaligned loads need special handling. Floats require word-alignment. 1104 if (Alignment && Alignment < 4) { 1105 needVMOV = true; 1106 VT = MVT::i32; 1107 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1108 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1109 } else { 1110 Opc = ARM::VLDRS; 1111 RC = TLI.getRegClassFor(VT); 1112 } 1113 break; 1114 case MVT::f64: 1115 if (!Subtarget->hasVFP2()) return false; 1116 // FIXME: Unaligned loads need special handling. Doublewords require 1117 // word-alignment. 1118 if (Alignment && Alignment < 4) 1119 return false; 1120 1121 Opc = ARM::VLDRD; 1122 RC = TLI.getRegClassFor(VT); 1123 break; 1124 } 1125 // Simplify this down to something we can handle. 1126 ARMSimplifyAddress(Addr, VT, useAM3); 1127 1128 // Create the base instruction, then add the operands. 1129 if (allocReg) 1130 ResultReg = createResultReg(RC); 1131 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1132 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1133 TII.get(Opc), ResultReg); 1134 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1135 1136 // If we had an unaligned load of a float we've converted it to an regular 1137 // load. Now we must move from the GRP to the FP register. 1138 if (needVMOV) { 1139 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1140 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1141 TII.get(ARM::VMOVSR), MoveReg) 1142 .addReg(ResultReg)); 1143 ResultReg = MoveReg; 1144 } 1145 return true; 1146 } 1147 1148 bool ARMFastISel::SelectLoad(const Instruction *I) { 1149 // Atomic loads need special handling. 1150 if (cast<LoadInst>(I)->isAtomic()) 1151 return false; 1152 1153 // Verify we have a legal type before going any further. 1154 MVT VT; 1155 if (!isLoadTypeLegal(I->getType(), VT)) 1156 return false; 1157 1158 // See if we can handle this address. 1159 Address Addr; 1160 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1161 1162 unsigned ResultReg; 1163 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1164 return false; 1165 UpdateValueMap(I, ResultReg); 1166 return true; 1167 } 1168 1169 bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1170 unsigned Alignment) { 1171 unsigned StrOpc; 1172 bool useAM3 = false; 1173 switch (VT.SimpleTy) { 1174 // This is mostly going to be Neon/vector support. 1175 default: return false; 1176 case MVT::i1: { 1177 unsigned Res = createResultReg(isThumb2 ? 1178 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1179 (const TargetRegisterClass*)&ARM::GPRRegClass); 1180 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1181 SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); 1182 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1183 TII.get(Opc), Res) 1184 .addReg(SrcReg).addImm(1)); 1185 SrcReg = Res; 1186 } // Fallthrough here. 1187 case MVT::i8: 1188 if (isThumb2) { 1189 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1190 StrOpc = ARM::t2STRBi8; 1191 else 1192 StrOpc = ARM::t2STRBi12; 1193 } else { 1194 StrOpc = ARM::STRBi12; 1195 } 1196 break; 1197 case MVT::i16: 1198 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1199 return false; 1200 1201 if (isThumb2) { 1202 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1203 StrOpc = ARM::t2STRHi8; 1204 else 1205 StrOpc = ARM::t2STRHi12; 1206 } else { 1207 StrOpc = ARM::STRH; 1208 useAM3 = true; 1209 } 1210 break; 1211 case MVT::i32: 1212 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1213 return false; 1214 1215 if (isThumb2) { 1216 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1217 StrOpc = ARM::t2STRi8; 1218 else 1219 StrOpc = ARM::t2STRi12; 1220 } else { 1221 StrOpc = ARM::STRi12; 1222 } 1223 break; 1224 case MVT::f32: 1225 if (!Subtarget->hasVFP2()) return false; 1226 // Unaligned stores need special handling. Floats require word-alignment. 1227 if (Alignment && Alignment < 4) { 1228 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1229 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1230 TII.get(ARM::VMOVRS), MoveReg) 1231 .addReg(SrcReg)); 1232 SrcReg = MoveReg; 1233 VT = MVT::i32; 1234 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1235 } else { 1236 StrOpc = ARM::VSTRS; 1237 } 1238 break; 1239 case MVT::f64: 1240 if (!Subtarget->hasVFP2()) return false; 1241 // FIXME: Unaligned stores need special handling. Doublewords require 1242 // word-alignment. 1243 if (Alignment && Alignment < 4) 1244 return false; 1245 1246 StrOpc = ARM::VSTRD; 1247 break; 1248 } 1249 // Simplify this down to something we can handle. 1250 ARMSimplifyAddress(Addr, VT, useAM3); 1251 1252 // Create the base instruction, then add the operands. 1253 SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); 1254 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1255 TII.get(StrOpc)) 1256 .addReg(SrcReg); 1257 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1258 return true; 1259 } 1260 1261 bool ARMFastISel::SelectStore(const Instruction *I) { 1262 Value *Op0 = I->getOperand(0); 1263 unsigned SrcReg = 0; 1264 1265 // Atomic stores need special handling. 1266 if (cast<StoreInst>(I)->isAtomic()) 1267 return false; 1268 1269 // Verify we have a legal type before going any further. 1270 MVT VT; 1271 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1272 return false; 1273 1274 // Get the value to be stored into a register. 1275 SrcReg = getRegForValue(Op0); 1276 if (SrcReg == 0) return false; 1277 1278 // See if we can handle this address. 1279 Address Addr; 1280 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1281 return false; 1282 1283 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1284 return false; 1285 return true; 1286 } 1287 1288 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1289 switch (Pred) { 1290 // Needs two compares... 1291 case CmpInst::FCMP_ONE: 1292 case CmpInst::FCMP_UEQ: 1293 default: 1294 // AL is our "false" for now. The other two need more compares. 1295 return ARMCC::AL; 1296 case CmpInst::ICMP_EQ: 1297 case CmpInst::FCMP_OEQ: 1298 return ARMCC::EQ; 1299 case CmpInst::ICMP_SGT: 1300 case CmpInst::FCMP_OGT: 1301 return ARMCC::GT; 1302 case CmpInst::ICMP_SGE: 1303 case CmpInst::FCMP_OGE: 1304 return ARMCC::GE; 1305 case CmpInst::ICMP_UGT: 1306 case CmpInst::FCMP_UGT: 1307 return ARMCC::HI; 1308 case CmpInst::FCMP_OLT: 1309 return ARMCC::MI; 1310 case CmpInst::ICMP_ULE: 1311 case CmpInst::FCMP_OLE: 1312 return ARMCC::LS; 1313 case CmpInst::FCMP_ORD: 1314 return ARMCC::VC; 1315 case CmpInst::FCMP_UNO: 1316 return ARMCC::VS; 1317 case CmpInst::FCMP_UGE: 1318 return ARMCC::PL; 1319 case CmpInst::ICMP_SLT: 1320 case CmpInst::FCMP_ULT: 1321 return ARMCC::LT; 1322 case CmpInst::ICMP_SLE: 1323 case CmpInst::FCMP_ULE: 1324 return ARMCC::LE; 1325 case CmpInst::FCMP_UNE: 1326 case CmpInst::ICMP_NE: 1327 return ARMCC::NE; 1328 case CmpInst::ICMP_UGE: 1329 return ARMCC::HS; 1330 case CmpInst::ICMP_ULT: 1331 return ARMCC::LO; 1332 } 1333 } 1334 1335 bool ARMFastISel::SelectBranch(const Instruction *I) { 1336 const BranchInst *BI = cast<BranchInst>(I); 1337 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1338 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1339 1340 // Simple branch support. 1341 1342 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1343 // behavior. 1344 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1345 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1346 1347 // Get the compare predicate. 1348 // Try to take advantage of fallthrough opportunities. 1349 CmpInst::Predicate Predicate = CI->getPredicate(); 1350 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1351 std::swap(TBB, FBB); 1352 Predicate = CmpInst::getInversePredicate(Predicate); 1353 } 1354 1355 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1356 1357 // We may not handle every CC for now. 1358 if (ARMPred == ARMCC::AL) return false; 1359 1360 // Emit the compare. 1361 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1362 return false; 1363 1364 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1365 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1366 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1367 FastEmitBranch(FBB, DL); 1368 FuncInfo.MBB->addSuccessor(TBB); 1369 return true; 1370 } 1371 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1372 MVT SourceVT; 1373 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1374 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1375 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1376 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1377 OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); 1378 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1379 TII.get(TstOpc)) 1380 .addReg(OpReg).addImm(1)); 1381 1382 unsigned CCMode = ARMCC::NE; 1383 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1384 std::swap(TBB, FBB); 1385 CCMode = ARMCC::EQ; 1386 } 1387 1388 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1389 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1390 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1391 1392 FastEmitBranch(FBB, DL); 1393 FuncInfo.MBB->addSuccessor(TBB); 1394 return true; 1395 } 1396 } else if (const ConstantInt *CI = 1397 dyn_cast<ConstantInt>(BI->getCondition())) { 1398 uint64_t Imm = CI->getZExtValue(); 1399 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1400 FastEmitBranch(Target, DL); 1401 return true; 1402 } 1403 1404 unsigned CmpReg = getRegForValue(BI->getCondition()); 1405 if (CmpReg == 0) return false; 1406 1407 // We've been divorced from our compare! Our block was split, and 1408 // now our compare lives in a predecessor block. We musn't 1409 // re-compare here, as the children of the compare aren't guaranteed 1410 // live across the block boundary (we *could* check for this). 1411 // Regardless, the compare has been done in the predecessor block, 1412 // and it left a value for us in a virtual register. Ergo, we test 1413 // the one-bit value left in the virtual register. 1414 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1415 CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); 1416 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1417 .addReg(CmpReg).addImm(1)); 1418 1419 unsigned CCMode = ARMCC::NE; 1420 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1421 std::swap(TBB, FBB); 1422 CCMode = ARMCC::EQ; 1423 } 1424 1425 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1426 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1427 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1428 FastEmitBranch(FBB, DL); 1429 FuncInfo.MBB->addSuccessor(TBB); 1430 return true; 1431 } 1432 1433 bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1434 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1435 if (AddrReg == 0) return false; 1436 1437 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1439 .addReg(AddrReg)); 1440 1441 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1442 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) 1443 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); 1444 1445 return true; 1446 } 1447 1448 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1449 bool isZExt) { 1450 Type *Ty = Src1Value->getType(); 1451 EVT SrcEVT = TLI.getValueType(Ty, true); 1452 if (!SrcEVT.isSimple()) return false; 1453 MVT SrcVT = SrcEVT.getSimpleVT(); 1454 1455 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1456 if (isFloat && !Subtarget->hasVFP2()) 1457 return false; 1458 1459 // Check to see if the 2nd operand is a constant that we can encode directly 1460 // in the compare. 1461 int Imm = 0; 1462 bool UseImm = false; 1463 bool isNegativeImm = false; 1464 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1465 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1466 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1467 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1468 SrcVT == MVT::i1) { 1469 const APInt &CIVal = ConstInt->getValue(); 1470 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1471 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1472 // then a cmn, because there is no way to represent 2147483648 as a 1473 // signed 32-bit int. 1474 if (Imm < 0 && Imm != (int)0x80000000) { 1475 isNegativeImm = true; 1476 Imm = -Imm; 1477 } 1478 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1479 (ARM_AM::getSOImmVal(Imm) != -1); 1480 } 1481 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1482 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1483 if (ConstFP->isZero() && !ConstFP->isNegative()) 1484 UseImm = true; 1485 } 1486 1487 unsigned CmpOpc; 1488 bool isICmp = true; 1489 bool needsExt = false; 1490 switch (SrcVT.SimpleTy) { 1491 default: return false; 1492 // TODO: Verify compares. 1493 case MVT::f32: 1494 isICmp = false; 1495 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1496 break; 1497 case MVT::f64: 1498 isICmp = false; 1499 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1500 break; 1501 case MVT::i1: 1502 case MVT::i8: 1503 case MVT::i16: 1504 needsExt = true; 1505 // Intentional fall-through. 1506 case MVT::i32: 1507 if (isThumb2) { 1508 if (!UseImm) 1509 CmpOpc = ARM::t2CMPrr; 1510 else 1511 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1512 } else { 1513 if (!UseImm) 1514 CmpOpc = ARM::CMPrr; 1515 else 1516 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1517 } 1518 break; 1519 } 1520 1521 unsigned SrcReg1 = getRegForValue(Src1Value); 1522 if (SrcReg1 == 0) return false; 1523 1524 unsigned SrcReg2 = 0; 1525 if (!UseImm) { 1526 SrcReg2 = getRegForValue(Src2Value); 1527 if (SrcReg2 == 0) return false; 1528 } 1529 1530 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1531 if (needsExt) { 1532 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1533 if (SrcReg1 == 0) return false; 1534 if (!UseImm) { 1535 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1536 if (SrcReg2 == 0) return false; 1537 } 1538 } 1539 1540 const MCInstrDesc &II = TII.get(CmpOpc); 1541 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); 1542 if (!UseImm) { 1543 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); 1544 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1545 .addReg(SrcReg1).addReg(SrcReg2)); 1546 } else { 1547 MachineInstrBuilder MIB; 1548 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1549 .addReg(SrcReg1); 1550 1551 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1552 if (isICmp) 1553 MIB.addImm(Imm); 1554 AddOptionalDefs(MIB); 1555 } 1556 1557 // For floating point we need to move the result to a comparison register 1558 // that we can then use for branches. 1559 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1560 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1561 TII.get(ARM::FMSTAT))); 1562 return true; 1563 } 1564 1565 bool ARMFastISel::SelectCmp(const Instruction *I) { 1566 const CmpInst *CI = cast<CmpInst>(I); 1567 1568 // Get the compare predicate. 1569 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1570 1571 // We may not handle every CC for now. 1572 if (ARMPred == ARMCC::AL) return false; 1573 1574 // Emit the compare. 1575 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1576 return false; 1577 1578 // Now set a register based on the comparison. Explicitly set the predicates 1579 // here. 1580 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1581 const TargetRegisterClass *RC = isThumb2 ? 1582 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1583 (const TargetRegisterClass*)&ARM::GPRRegClass; 1584 unsigned DestReg = createResultReg(RC); 1585 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1586 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1587 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1588 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1589 .addReg(ZeroReg).addImm(1) 1590 .addImm(ARMPred).addReg(ARM::CPSR); 1591 1592 UpdateValueMap(I, DestReg); 1593 return true; 1594 } 1595 1596 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1597 // Make sure we have VFP and that we're extending float to double. 1598 if (!Subtarget->hasVFP2()) return false; 1599 1600 Value *V = I->getOperand(0); 1601 if (!I->getType()->isDoubleTy() || 1602 !V->getType()->isFloatTy()) return false; 1603 1604 unsigned Op = getRegForValue(V); 1605 if (Op == 0) return false; 1606 1607 unsigned Result = createResultReg(&ARM::DPRRegClass); 1608 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1609 TII.get(ARM::VCVTDS), Result) 1610 .addReg(Op)); 1611 UpdateValueMap(I, Result); 1612 return true; 1613 } 1614 1615 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1616 // Make sure we have VFP and that we're truncating double to float. 1617 if (!Subtarget->hasVFP2()) return false; 1618 1619 Value *V = I->getOperand(0); 1620 if (!(I->getType()->isFloatTy() && 1621 V->getType()->isDoubleTy())) return false; 1622 1623 unsigned Op = getRegForValue(V); 1624 if (Op == 0) return false; 1625 1626 unsigned Result = createResultReg(&ARM::SPRRegClass); 1627 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1628 TII.get(ARM::VCVTSD), Result) 1629 .addReg(Op)); 1630 UpdateValueMap(I, Result); 1631 return true; 1632 } 1633 1634 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1635 // Make sure we have VFP. 1636 if (!Subtarget->hasVFP2()) return false; 1637 1638 MVT DstVT; 1639 Type *Ty = I->getType(); 1640 if (!isTypeLegal(Ty, DstVT)) 1641 return false; 1642 1643 Value *Src = I->getOperand(0); 1644 EVT SrcEVT = TLI.getValueType(Src->getType(), true); 1645 if (!SrcEVT.isSimple()) 1646 return false; 1647 MVT SrcVT = SrcEVT.getSimpleVT(); 1648 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1649 return false; 1650 1651 unsigned SrcReg = getRegForValue(Src); 1652 if (SrcReg == 0) return false; 1653 1654 // Handle sign-extension. 1655 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1656 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1657 /*isZExt*/!isSigned); 1658 if (SrcReg == 0) return false; 1659 } 1660 1661 // The conversion routine works on fp-reg to fp-reg and the operand above 1662 // was an integer, move it to the fp registers if possible. 1663 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1664 if (FP == 0) return false; 1665 1666 unsigned Opc; 1667 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1668 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1669 else return false; 1670 1671 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1672 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1673 ResultReg) 1674 .addReg(FP)); 1675 UpdateValueMap(I, ResultReg); 1676 return true; 1677 } 1678 1679 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1680 // Make sure we have VFP. 1681 if (!Subtarget->hasVFP2()) return false; 1682 1683 MVT DstVT; 1684 Type *RetTy = I->getType(); 1685 if (!isTypeLegal(RetTy, DstVT)) 1686 return false; 1687 1688 unsigned Op = getRegForValue(I->getOperand(0)); 1689 if (Op == 0) return false; 1690 1691 unsigned Opc; 1692 Type *OpTy = I->getOperand(0)->getType(); 1693 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1694 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1695 else return false; 1696 1697 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1698 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1699 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1700 ResultReg) 1701 .addReg(Op)); 1702 1703 // This result needs to be in an integer register, but the conversion only 1704 // takes place in fp-regs. 1705 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1706 if (IntReg == 0) return false; 1707 1708 UpdateValueMap(I, IntReg); 1709 return true; 1710 } 1711 1712 bool ARMFastISel::SelectSelect(const Instruction *I) { 1713 MVT VT; 1714 if (!isTypeLegal(I->getType(), VT)) 1715 return false; 1716 1717 // Things need to be register sized for register moves. 1718 if (VT != MVT::i32) return false; 1719 1720 unsigned CondReg = getRegForValue(I->getOperand(0)); 1721 if (CondReg == 0) return false; 1722 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1723 if (Op1Reg == 0) return false; 1724 1725 // Check to see if we can use an immediate in the conditional move. 1726 int Imm = 0; 1727 bool UseImm = false; 1728 bool isNegativeImm = false; 1729 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1730 assert (VT == MVT::i32 && "Expecting an i32."); 1731 Imm = (int)ConstInt->getValue().getZExtValue(); 1732 if (Imm < 0) { 1733 isNegativeImm = true; 1734 Imm = ~Imm; 1735 } 1736 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1737 (ARM_AM::getSOImmVal(Imm) != -1); 1738 } 1739 1740 unsigned Op2Reg = 0; 1741 if (!UseImm) { 1742 Op2Reg = getRegForValue(I->getOperand(2)); 1743 if (Op2Reg == 0) return false; 1744 } 1745 1746 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1747 CondReg = constrainOperandRegClass(TII.get(CmpOpc), CondReg, 0); 1748 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1749 .addReg(CondReg).addImm(0)); 1750 1751 unsigned MovCCOpc; 1752 const TargetRegisterClass *RC; 1753 if (!UseImm) { 1754 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1755 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1756 } else { 1757 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1758 if (!isNegativeImm) 1759 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1760 else 1761 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1762 } 1763 unsigned ResultReg = createResultReg(RC); 1764 if (!UseImm) { 1765 Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); 1766 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); 1767 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1768 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1769 } else { 1770 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); 1771 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1772 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1773 } 1774 UpdateValueMap(I, ResultReg); 1775 return true; 1776 } 1777 1778 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1779 MVT VT; 1780 Type *Ty = I->getType(); 1781 if (!isTypeLegal(Ty, VT)) 1782 return false; 1783 1784 // If we have integer div support we should have selected this automagically. 1785 // In case we have a real miss go ahead and return false and we'll pick 1786 // it up later. 1787 if (Subtarget->hasDivide()) return false; 1788 1789 // Otherwise emit a libcall. 1790 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1791 if (VT == MVT::i8) 1792 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1793 else if (VT == MVT::i16) 1794 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1795 else if (VT == MVT::i32) 1796 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1797 else if (VT == MVT::i64) 1798 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1799 else if (VT == MVT::i128) 1800 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1801 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1802 1803 return ARMEmitLibcall(I, LC); 1804 } 1805 1806 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1807 MVT VT; 1808 Type *Ty = I->getType(); 1809 if (!isTypeLegal(Ty, VT)) 1810 return false; 1811 1812 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1813 if (VT == MVT::i8) 1814 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1815 else if (VT == MVT::i16) 1816 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1817 else if (VT == MVT::i32) 1818 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1819 else if (VT == MVT::i64) 1820 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1821 else if (VT == MVT::i128) 1822 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1823 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1824 1825 return ARMEmitLibcall(I, LC); 1826 } 1827 1828 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1829 EVT DestVT = TLI.getValueType(I->getType(), true); 1830 1831 // We can get here in the case when we have a binary operation on a non-legal 1832 // type and the target independent selector doesn't know how to handle it. 1833 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1834 return false; 1835 1836 unsigned Opc; 1837 switch (ISDOpcode) { 1838 default: return false; 1839 case ISD::ADD: 1840 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1841 break; 1842 case ISD::OR: 1843 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1844 break; 1845 case ISD::SUB: 1846 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1847 break; 1848 } 1849 1850 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1851 if (SrcReg1 == 0) return false; 1852 1853 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1854 // in the instruction, rather then materializing the value in a register. 1855 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1856 if (SrcReg2 == 0) return false; 1857 1858 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 1859 SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); 1860 SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); 1861 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1862 TII.get(Opc), ResultReg) 1863 .addReg(SrcReg1).addReg(SrcReg2)); 1864 UpdateValueMap(I, ResultReg); 1865 return true; 1866 } 1867 1868 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1869 EVT FPVT = TLI.getValueType(I->getType(), true); 1870 if (!FPVT.isSimple()) return false; 1871 MVT VT = FPVT.getSimpleVT(); 1872 1873 // We can get here in the case when we want to use NEON for our fp 1874 // operations, but can't figure out how to. Just use the vfp instructions 1875 // if we have them. 1876 // FIXME: It'd be nice to use NEON instructions. 1877 Type *Ty = I->getType(); 1878 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1879 if (isFloat && !Subtarget->hasVFP2()) 1880 return false; 1881 1882 unsigned Opc; 1883 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1884 switch (ISDOpcode) { 1885 default: return false; 1886 case ISD::FADD: 1887 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1888 break; 1889 case ISD::FSUB: 1890 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1891 break; 1892 case ISD::FMUL: 1893 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1894 break; 1895 } 1896 unsigned Op1 = getRegForValue(I->getOperand(0)); 1897 if (Op1 == 0) return false; 1898 1899 unsigned Op2 = getRegForValue(I->getOperand(1)); 1900 if (Op2 == 0) return false; 1901 1902 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1903 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1904 TII.get(Opc), ResultReg) 1905 .addReg(Op1).addReg(Op2)); 1906 UpdateValueMap(I, ResultReg); 1907 return true; 1908 } 1909 1910 // Call Handling Code 1911 1912 // This is largely taken directly from CCAssignFnForNode 1913 // TODO: We may not support all of this. 1914 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1915 bool Return, 1916 bool isVarArg) { 1917 switch (CC) { 1918 default: 1919 llvm_unreachable("Unsupported calling convention"); 1920 case CallingConv::Fast: 1921 if (Subtarget->hasVFP2() && !isVarArg) { 1922 if (!Subtarget->isAAPCS_ABI()) 1923 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1924 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1925 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1926 } 1927 // Fallthrough 1928 case CallingConv::C: 1929 // Use target triple & subtarget features to do actual dispatch. 1930 if (Subtarget->isAAPCS_ABI()) { 1931 if (Subtarget->hasVFP2() && 1932 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1933 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1934 else 1935 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1936 } else 1937 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1938 case CallingConv::ARM_AAPCS_VFP: 1939 if (!isVarArg) 1940 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1941 // Fall through to soft float variant, variadic functions don't 1942 // use hard floating point ABI. 1943 case CallingConv::ARM_AAPCS: 1944 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1945 case CallingConv::ARM_APCS: 1946 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1947 case CallingConv::GHC: 1948 if (Return) 1949 llvm_unreachable("Can't return in GHC call convention"); 1950 else 1951 return CC_ARM_APCS_GHC; 1952 } 1953 } 1954 1955 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1956 SmallVectorImpl<unsigned> &ArgRegs, 1957 SmallVectorImpl<MVT> &ArgVTs, 1958 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1959 SmallVectorImpl<unsigned> &RegArgs, 1960 CallingConv::ID CC, 1961 unsigned &NumBytes, 1962 bool isVarArg) { 1963 SmallVector<CCValAssign, 16> ArgLocs; 1964 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1965 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1966 CCAssignFnForCall(CC, false, isVarArg)); 1967 1968 // Check that we can handle all of the arguments. If we can't, then bail out 1969 // now before we add code to the MBB. 1970 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1971 CCValAssign &VA = ArgLocs[i]; 1972 MVT ArgVT = ArgVTs[VA.getValNo()]; 1973 1974 // We don't handle NEON/vector parameters yet. 1975 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1976 return false; 1977 1978 // Now copy/store arg to correct locations. 1979 if (VA.isRegLoc() && !VA.needsCustom()) { 1980 continue; 1981 } else if (VA.needsCustom()) { 1982 // TODO: We need custom lowering for vector (v2f64) args. 1983 if (VA.getLocVT() != MVT::f64 || 1984 // TODO: Only handle register args for now. 1985 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1986 return false; 1987 } else { 1988 switch (ArgVT.SimpleTy) { 1989 default: 1990 return false; 1991 case MVT::i1: 1992 case MVT::i8: 1993 case MVT::i16: 1994 case MVT::i32: 1995 break; 1996 case MVT::f32: 1997 if (!Subtarget->hasVFP2()) 1998 return false; 1999 break; 2000 case MVT::f64: 2001 if (!Subtarget->hasVFP2()) 2002 return false; 2003 break; 2004 } 2005 } 2006 } 2007 2008 // At the point, we are able to handle the call's arguments in fast isel. 2009 2010 // Get a count of how many bytes are to be pushed on the stack. 2011 NumBytes = CCInfo.getNextStackOffset(); 2012 2013 // Issue CALLSEQ_START 2014 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 2015 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2016 TII.get(AdjStackDown)) 2017 .addImm(NumBytes)); 2018 2019 // Process the args. 2020 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2021 CCValAssign &VA = ArgLocs[i]; 2022 unsigned Arg = ArgRegs[VA.getValNo()]; 2023 MVT ArgVT = ArgVTs[VA.getValNo()]; 2024 2025 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 2026 "We don't handle NEON/vector parameters yet."); 2027 2028 // Handle arg promotion, etc. 2029 switch (VA.getLocInfo()) { 2030 case CCValAssign::Full: break; 2031 case CCValAssign::SExt: { 2032 MVT DestVT = VA.getLocVT(); 2033 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 2034 assert (Arg != 0 && "Failed to emit a sext"); 2035 ArgVT = DestVT; 2036 break; 2037 } 2038 case CCValAssign::AExt: 2039 // Intentional fall-through. Handle AExt and ZExt. 2040 case CCValAssign::ZExt: { 2041 MVT DestVT = VA.getLocVT(); 2042 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 2043 assert (Arg != 0 && "Failed to emit a zext"); 2044 ArgVT = DestVT; 2045 break; 2046 } 2047 case CCValAssign::BCvt: { 2048 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 2049 /*TODO: Kill=*/false); 2050 assert(BC != 0 && "Failed to emit a bitcast!"); 2051 Arg = BC; 2052 ArgVT = VA.getLocVT(); 2053 break; 2054 } 2055 default: llvm_unreachable("Unknown arg promotion!"); 2056 } 2057 2058 // Now copy/store arg to correct locations. 2059 if (VA.isRegLoc() && !VA.needsCustom()) { 2060 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2061 VA.getLocReg()) 2062 .addReg(Arg); 2063 RegArgs.push_back(VA.getLocReg()); 2064 } else if (VA.needsCustom()) { 2065 // TODO: We need custom lowering for vector (v2f64) args. 2066 assert(VA.getLocVT() == MVT::f64 && 2067 "Custom lowering for v2f64 args not available"); 2068 2069 CCValAssign &NextVA = ArgLocs[++i]; 2070 2071 assert(VA.isRegLoc() && NextVA.isRegLoc() && 2072 "We only handle register args!"); 2073 2074 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2075 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2076 .addReg(NextVA.getLocReg(), RegState::Define) 2077 .addReg(Arg)); 2078 RegArgs.push_back(VA.getLocReg()); 2079 RegArgs.push_back(NextVA.getLocReg()); 2080 } else { 2081 assert(VA.isMemLoc()); 2082 // Need to store on the stack. 2083 Address Addr; 2084 Addr.BaseType = Address::RegBase; 2085 Addr.Base.Reg = ARM::SP; 2086 Addr.Offset = VA.getLocMemOffset(); 2087 2088 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2089 assert(EmitRet && "Could not emit a store for argument!"); 2090 } 2091 } 2092 2093 return true; 2094 } 2095 2096 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2097 const Instruction *I, CallingConv::ID CC, 2098 unsigned &NumBytes, bool isVarArg) { 2099 // Issue CALLSEQ_END 2100 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2101 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2102 TII.get(AdjStackUp)) 2103 .addImm(NumBytes).addImm(0)); 2104 2105 // Now the return value. 2106 if (RetVT != MVT::isVoid) { 2107 SmallVector<CCValAssign, 16> RVLocs; 2108 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2109 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2110 2111 // Copy all of the result registers out of their specified physreg. 2112 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2113 // For this move we copy into two registers and then move into the 2114 // double fp reg we want. 2115 MVT DestVT = RVLocs[0].getValVT(); 2116 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2117 unsigned ResultReg = createResultReg(DstRC); 2118 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2119 TII.get(ARM::VMOVDRR), ResultReg) 2120 .addReg(RVLocs[0].getLocReg()) 2121 .addReg(RVLocs[1].getLocReg())); 2122 2123 UsedRegs.push_back(RVLocs[0].getLocReg()); 2124 UsedRegs.push_back(RVLocs[1].getLocReg()); 2125 2126 // Finally update the result. 2127 UpdateValueMap(I, ResultReg); 2128 } else { 2129 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2130 MVT CopyVT = RVLocs[0].getValVT(); 2131 2132 // Special handling for extended integers. 2133 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2134 CopyVT = MVT::i32; 2135 2136 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2137 2138 unsigned ResultReg = createResultReg(DstRC); 2139 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2140 ResultReg).addReg(RVLocs[0].getLocReg()); 2141 UsedRegs.push_back(RVLocs[0].getLocReg()); 2142 2143 // Finally update the result. 2144 UpdateValueMap(I, ResultReg); 2145 } 2146 } 2147 2148 return true; 2149 } 2150 2151 bool ARMFastISel::SelectRet(const Instruction *I) { 2152 const ReturnInst *Ret = cast<ReturnInst>(I); 2153 const Function &F = *I->getParent()->getParent(); 2154 2155 if (!FuncInfo.CanLowerReturn) 2156 return false; 2157 2158 // Build a list of return value registers. 2159 SmallVector<unsigned, 4> RetRegs; 2160 2161 CallingConv::ID CC = F.getCallingConv(); 2162 if (Ret->getNumOperands() > 0) { 2163 SmallVector<ISD::OutputArg, 4> Outs; 2164 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); 2165 2166 // Analyze operands of the call, assigning locations to each operand. 2167 SmallVector<CCValAssign, 16> ValLocs; 2168 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2169 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2170 F.isVarArg())); 2171 2172 const Value *RV = Ret->getOperand(0); 2173 unsigned Reg = getRegForValue(RV); 2174 if (Reg == 0) 2175 return false; 2176 2177 // Only handle a single return value for now. 2178 if (ValLocs.size() != 1) 2179 return false; 2180 2181 CCValAssign &VA = ValLocs[0]; 2182 2183 // Don't bother handling odd stuff for now. 2184 if (VA.getLocInfo() != CCValAssign::Full) 2185 return false; 2186 // Only handle register returns for now. 2187 if (!VA.isRegLoc()) 2188 return false; 2189 2190 unsigned SrcReg = Reg + VA.getValNo(); 2191 EVT RVEVT = TLI.getValueType(RV->getType()); 2192 if (!RVEVT.isSimple()) return false; 2193 MVT RVVT = RVEVT.getSimpleVT(); 2194 MVT DestVT = VA.getValVT(); 2195 // Special handling for extended integers. 2196 if (RVVT != DestVT) { 2197 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2198 return false; 2199 2200 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2201 2202 // Perform extension if flagged as either zext or sext. Otherwise, do 2203 // nothing. 2204 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2205 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2206 if (SrcReg == 0) return false; 2207 } 2208 } 2209 2210 // Make the copy. 2211 unsigned DstReg = VA.getLocReg(); 2212 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2213 // Avoid a cross-class copy. This is very unlikely. 2214 if (!SrcRC->contains(DstReg)) 2215 return false; 2216 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2217 DstReg).addReg(SrcReg); 2218 2219 // Add register to return instruction. 2220 RetRegs.push_back(VA.getLocReg()); 2221 } 2222 2223 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2224 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2225 TII.get(RetOpc)); 2226 AddOptionalDefs(MIB); 2227 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 2228 MIB.addReg(RetRegs[i], RegState::Implicit); 2229 return true; 2230 } 2231 2232 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2233 if (UseReg) 2234 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2235 else 2236 return isThumb2 ? ARM::tBL : ARM::BL; 2237 } 2238 2239 unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2240 // Manually compute the global's type to avoid building it when unnecessary. 2241 Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); 2242 EVT LCREVT = TLI.getValueType(GVTy); 2243 if (!LCREVT.isSimple()) return 0; 2244 2245 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2246 GlobalValue::ExternalLinkage, 0, Name); 2247 assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); 2248 return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2249 } 2250 2251 // A quick function that will emit a call for a named libcall in F with the 2252 // vector of passed arguments for the Instruction in I. We can assume that we 2253 // can emit a call for any libcall we can produce. This is an abridged version 2254 // of the full call infrastructure since we won't need to worry about things 2255 // like computed function pointers or strange arguments at call sites. 2256 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2257 // with X86. 2258 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2259 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2260 2261 // Handle *simple* calls for now. 2262 Type *RetTy = I->getType(); 2263 MVT RetVT; 2264 if (RetTy->isVoidTy()) 2265 RetVT = MVT::isVoid; 2266 else if (!isTypeLegal(RetTy, RetVT)) 2267 return false; 2268 2269 // Can't handle non-double multi-reg retvals. 2270 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2271 SmallVector<CCValAssign, 16> RVLocs; 2272 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2273 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2274 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2275 return false; 2276 } 2277 2278 // Set up the argument vectors. 2279 SmallVector<Value*, 8> Args; 2280 SmallVector<unsigned, 8> ArgRegs; 2281 SmallVector<MVT, 8> ArgVTs; 2282 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2283 Args.reserve(I->getNumOperands()); 2284 ArgRegs.reserve(I->getNumOperands()); 2285 ArgVTs.reserve(I->getNumOperands()); 2286 ArgFlags.reserve(I->getNumOperands()); 2287 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2288 Value *Op = I->getOperand(i); 2289 unsigned Arg = getRegForValue(Op); 2290 if (Arg == 0) return false; 2291 2292 Type *ArgTy = Op->getType(); 2293 MVT ArgVT; 2294 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2295 2296 ISD::ArgFlagsTy Flags; 2297 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2298 Flags.setOrigAlign(OriginalAlignment); 2299 2300 Args.push_back(Op); 2301 ArgRegs.push_back(Arg); 2302 ArgVTs.push_back(ArgVT); 2303 ArgFlags.push_back(Flags); 2304 } 2305 2306 // Handle the arguments now that we've gotten them. 2307 SmallVector<unsigned, 4> RegArgs; 2308 unsigned NumBytes; 2309 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2310 RegArgs, CC, NumBytes, false)) 2311 return false; 2312 2313 unsigned CalleeReg = 0; 2314 if (EnableARMLongCalls) { 2315 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2316 if (CalleeReg == 0) return false; 2317 } 2318 2319 // Issue the call. 2320 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2321 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2322 DL, TII.get(CallOpc)); 2323 // BL / BLX don't take a predicate, but tBL / tBLX do. 2324 if (isThumb2) 2325 AddDefaultPred(MIB); 2326 if (EnableARMLongCalls) 2327 MIB.addReg(CalleeReg); 2328 else 2329 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2330 2331 // Add implicit physical register uses to the call. 2332 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2333 MIB.addReg(RegArgs[i], RegState::Implicit); 2334 2335 // Add a register mask with the call-preserved registers. 2336 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2337 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2338 2339 // Finish off the call including any return values. 2340 SmallVector<unsigned, 4> UsedRegs; 2341 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2342 2343 // Set all unused physreg defs as dead. 2344 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2345 2346 return true; 2347 } 2348 2349 bool ARMFastISel::SelectCall(const Instruction *I, 2350 const char *IntrMemName = 0) { 2351 const CallInst *CI = cast<CallInst>(I); 2352 const Value *Callee = CI->getCalledValue(); 2353 2354 // Can't handle inline asm. 2355 if (isa<InlineAsm>(Callee)) return false; 2356 2357 // Allow SelectionDAG isel to handle tail calls. 2358 if (CI->isTailCall()) return false; 2359 2360 // Check the calling convention. 2361 ImmutableCallSite CS(CI); 2362 CallingConv::ID CC = CS.getCallingConv(); 2363 2364 // TODO: Avoid some calling conventions? 2365 2366 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2367 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2368 bool isVarArg = FTy->isVarArg(); 2369 2370 // Handle *simple* calls for now. 2371 Type *RetTy = I->getType(); 2372 MVT RetVT; 2373 if (RetTy->isVoidTy()) 2374 RetVT = MVT::isVoid; 2375 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2376 RetVT != MVT::i8 && RetVT != MVT::i1) 2377 return false; 2378 2379 // Can't handle non-double multi-reg retvals. 2380 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2381 RetVT != MVT::i16 && RetVT != MVT::i32) { 2382 SmallVector<CCValAssign, 16> RVLocs; 2383 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2384 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2385 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2386 return false; 2387 } 2388 2389 // Set up the argument vectors. 2390 SmallVector<Value*, 8> Args; 2391 SmallVector<unsigned, 8> ArgRegs; 2392 SmallVector<MVT, 8> ArgVTs; 2393 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2394 unsigned arg_size = CS.arg_size(); 2395 Args.reserve(arg_size); 2396 ArgRegs.reserve(arg_size); 2397 ArgVTs.reserve(arg_size); 2398 ArgFlags.reserve(arg_size); 2399 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2400 i != e; ++i) { 2401 // If we're lowering a memory intrinsic instead of a regular call, skip the 2402 // last two arguments, which shouldn't be passed to the underlying function. 2403 if (IntrMemName && e-i <= 2) 2404 break; 2405 2406 ISD::ArgFlagsTy Flags; 2407 unsigned AttrInd = i - CS.arg_begin() + 1; 2408 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2409 Flags.setSExt(); 2410 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2411 Flags.setZExt(); 2412 2413 // FIXME: Only handle *easy* calls for now. 2414 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2415 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2416 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2417 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2418 return false; 2419 2420 Type *ArgTy = (*i)->getType(); 2421 MVT ArgVT; 2422 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2423 ArgVT != MVT::i1) 2424 return false; 2425 2426 unsigned Arg = getRegForValue(*i); 2427 if (Arg == 0) 2428 return false; 2429 2430 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2431 Flags.setOrigAlign(OriginalAlignment); 2432 2433 Args.push_back(*i); 2434 ArgRegs.push_back(Arg); 2435 ArgVTs.push_back(ArgVT); 2436 ArgFlags.push_back(Flags); 2437 } 2438 2439 // Handle the arguments now that we've gotten them. 2440 SmallVector<unsigned, 4> RegArgs; 2441 unsigned NumBytes; 2442 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2443 RegArgs, CC, NumBytes, isVarArg)) 2444 return false; 2445 2446 bool UseReg = false; 2447 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2448 if (!GV || EnableARMLongCalls) UseReg = true; 2449 2450 unsigned CalleeReg = 0; 2451 if (UseReg) { 2452 if (IntrMemName) 2453 CalleeReg = getLibcallReg(IntrMemName); 2454 else 2455 CalleeReg = getRegForValue(Callee); 2456 2457 if (CalleeReg == 0) return false; 2458 } 2459 2460 // Issue the call. 2461 unsigned CallOpc = ARMSelectCallOp(UseReg); 2462 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2463 DL, TII.get(CallOpc)); 2464 2465 unsigned char OpFlags = 0; 2466 2467 // Add MO_PLT for global address or external symbol in the PIC relocation 2468 // model. 2469 if (Subtarget->isTargetELF() && TM.getRelocationModel() == Reloc::PIC_) 2470 OpFlags = ARMII::MO_PLT; 2471 2472 // ARM calls don't take a predicate, but tBL / tBLX do. 2473 if(isThumb2) 2474 AddDefaultPred(MIB); 2475 if (UseReg) 2476 MIB.addReg(CalleeReg); 2477 else if (!IntrMemName) 2478 MIB.addGlobalAddress(GV, 0, OpFlags); 2479 else 2480 MIB.addExternalSymbol(IntrMemName, OpFlags); 2481 2482 // Add implicit physical register uses to the call. 2483 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2484 MIB.addReg(RegArgs[i], RegState::Implicit); 2485 2486 // Add a register mask with the call-preserved registers. 2487 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2488 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2489 2490 // Finish off the call including any return values. 2491 SmallVector<unsigned, 4> UsedRegs; 2492 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2493 return false; 2494 2495 // Set all unused physreg defs as dead. 2496 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2497 2498 return true; 2499 } 2500 2501 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2502 return Len <= 16; 2503 } 2504 2505 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2506 uint64_t Len, unsigned Alignment) { 2507 // Make sure we don't bloat code by inlining very large memcpy's. 2508 if (!ARMIsMemCpySmall(Len)) 2509 return false; 2510 2511 while (Len) { 2512 MVT VT; 2513 if (!Alignment || Alignment >= 4) { 2514 if (Len >= 4) 2515 VT = MVT::i32; 2516 else if (Len >= 2) 2517 VT = MVT::i16; 2518 else { 2519 assert (Len == 1 && "Expected a length of 1!"); 2520 VT = MVT::i8; 2521 } 2522 } else { 2523 // Bound based on alignment. 2524 if (Len >= 2 && Alignment == 2) 2525 VT = MVT::i16; 2526 else { 2527 VT = MVT::i8; 2528 } 2529 } 2530 2531 bool RV; 2532 unsigned ResultReg; 2533 RV = ARMEmitLoad(VT, ResultReg, Src); 2534 assert (RV == true && "Should be able to handle this load."); 2535 RV = ARMEmitStore(VT, ResultReg, Dest); 2536 assert (RV == true && "Should be able to handle this store."); 2537 (void)RV; 2538 2539 unsigned Size = VT.getSizeInBits()/8; 2540 Len -= Size; 2541 Dest.Offset += Size; 2542 Src.Offset += Size; 2543 } 2544 2545 return true; 2546 } 2547 2548 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2549 // FIXME: Handle more intrinsics. 2550 switch (I.getIntrinsicID()) { 2551 default: return false; 2552 case Intrinsic::frameaddress: { 2553 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2554 MFI->setFrameAddressIsTaken(true); 2555 2556 unsigned LdrOpc; 2557 const TargetRegisterClass *RC; 2558 if (isThumb2) { 2559 LdrOpc = ARM::t2LDRi12; 2560 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2561 } else { 2562 LdrOpc = ARM::LDRi12; 2563 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2564 } 2565 2566 const ARMBaseRegisterInfo *RegInfo = 2567 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2568 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2569 unsigned SrcReg = FramePtr; 2570 2571 // Recursively load frame address 2572 // ldr r0 [fp] 2573 // ldr r0 [r0] 2574 // ldr r0 [r0] 2575 // ... 2576 unsigned DestReg; 2577 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2578 while (Depth--) { 2579 DestReg = createResultReg(RC); 2580 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2581 TII.get(LdrOpc), DestReg) 2582 .addReg(SrcReg).addImm(0)); 2583 SrcReg = DestReg; 2584 } 2585 UpdateValueMap(&I, SrcReg); 2586 return true; 2587 } 2588 case Intrinsic::memcpy: 2589 case Intrinsic::memmove: { 2590 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2591 // Don't handle volatile. 2592 if (MTI.isVolatile()) 2593 return false; 2594 2595 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2596 // we would emit dead code because we don't currently handle memmoves. 2597 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2598 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2599 // Small memcpy's are common enough that we want to do them without a call 2600 // if possible. 2601 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2602 if (ARMIsMemCpySmall(Len)) { 2603 Address Dest, Src; 2604 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2605 !ARMComputeAddress(MTI.getRawSource(), Src)) 2606 return false; 2607 unsigned Alignment = MTI.getAlignment(); 2608 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2609 return true; 2610 } 2611 } 2612 2613 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2614 return false; 2615 2616 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2617 return false; 2618 2619 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2620 return SelectCall(&I, IntrMemName); 2621 } 2622 case Intrinsic::memset: { 2623 const MemSetInst &MSI = cast<MemSetInst>(I); 2624 // Don't handle volatile. 2625 if (MSI.isVolatile()) 2626 return false; 2627 2628 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2629 return false; 2630 2631 if (MSI.getDestAddressSpace() > 255) 2632 return false; 2633 2634 return SelectCall(&I, "memset"); 2635 } 2636 case Intrinsic::trap: { 2637 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get( 2638 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP)); 2639 return true; 2640 } 2641 } 2642 } 2643 2644 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2645 // The high bits for a type smaller than the register size are assumed to be 2646 // undefined. 2647 Value *Op = I->getOperand(0); 2648 2649 EVT SrcVT, DestVT; 2650 SrcVT = TLI.getValueType(Op->getType(), true); 2651 DestVT = TLI.getValueType(I->getType(), true); 2652 2653 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2654 return false; 2655 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2656 return false; 2657 2658 unsigned SrcReg = getRegForValue(Op); 2659 if (!SrcReg) return false; 2660 2661 // Because the high bits are undefined, a truncate doesn't generate 2662 // any code. 2663 UpdateValueMap(I, SrcReg); 2664 return true; 2665 } 2666 2667 unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2668 bool isZExt) { 2669 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2670 return 0; 2671 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) 2672 return 0; 2673 2674 // Table of which combinations can be emitted as a single instruction, 2675 // and which will require two. 2676 static const uint8_t isSingleInstrTbl[3][2][2][2] = { 2677 // ARM Thumb 2678 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops 2679 // ext: s z s z s z s z 2680 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, 2681 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, 2682 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } 2683 }; 2684 2685 // Target registers for: 2686 // - For ARM can never be PC. 2687 // - For 16-bit Thumb are restricted to lower 8 registers. 2688 // - For 32-bit Thumb are restricted to non-SP and non-PC. 2689 static const TargetRegisterClass *RCTbl[2][2] = { 2690 // Instructions: Two Single 2691 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, 2692 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } 2693 }; 2694 2695 // Table governing the instruction(s) to be emitted. 2696 static const struct InstructionTable { 2697 uint32_t Opc : 16; 2698 uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. 2699 uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. 2700 uint32_t Imm : 8; // All instructions have either a shift or a mask. 2701 } IT[2][2][3][2] = { 2702 { // Two instructions (first is left shift, second is in this table). 2703 { // ARM Opc S Shift Imm 2704 /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, 2705 /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, 2706 /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, 2707 /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, 2708 /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, 2709 /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } 2710 }, 2711 { // Thumb Opc S Shift Imm 2712 /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, 2713 /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, 2714 /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, 2715 /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, 2716 /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, 2717 /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } 2718 } 2719 }, 2720 { // Single instruction. 2721 { // ARM Opc S Shift Imm 2722 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2723 /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, 2724 /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, 2725 /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, 2726 /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, 2727 /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } 2728 }, 2729 { // Thumb Opc S Shift Imm 2730 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2731 /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, 2732 /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, 2733 /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, 2734 /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, 2735 /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } 2736 } 2737 } 2738 }; 2739 2740 unsigned SrcBits = SrcVT.getSizeInBits(); 2741 unsigned DestBits = DestVT.getSizeInBits(); 2742 (void) DestBits; 2743 assert((SrcBits < DestBits) && "can only extend to larger types"); 2744 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && 2745 "other sizes unimplemented"); 2746 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && 2747 "other sizes unimplemented"); 2748 2749 bool hasV6Ops = Subtarget->hasV6Ops(); 2750 unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} 2751 assert((Bitness < 3) && "sanity-check table bounds"); 2752 2753 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; 2754 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; 2755 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; 2756 unsigned Opc = ITP->Opc; 2757 assert(ARM::KILL != Opc && "Invalid table entry"); 2758 unsigned hasS = ITP->hasS; 2759 ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; 2760 assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && 2761 "only MOVsi has shift operand addressing mode"); 2762 unsigned Imm = ITP->Imm; 2763 2764 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). 2765 bool setsCPSR = &ARM::tGPRRegClass == RC; 2766 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; 2767 unsigned ResultReg; 2768 // MOVsi encodes shift and immediate in shift operand addressing mode. 2769 // The following condition has the same value when emitting two 2770 // instruction sequences: both are shifts. 2771 bool ImmIsSO = (Shift != ARM_AM::no_shift); 2772 2773 // Either one or two instructions are emitted. 2774 // They're always of the form: 2775 // dst = in OP imm 2776 // CPSR is set only by 16-bit Thumb instructions. 2777 // Predicate, if any, is AL. 2778 // S bit, if available, is always 0. 2779 // When two are emitted the first's result will feed as the second's input, 2780 // that value is then dead. 2781 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; 2782 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { 2783 ResultReg = createResultReg(RC); 2784 bool isLsl = (0 == Instr) && !isSingleInstr; 2785 unsigned Opcode = isLsl ? LSLOpc : Opc; 2786 ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; 2787 unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; 2788 bool isKill = 1 == Instr; 2789 MachineInstrBuilder MIB = BuildMI( 2790 *FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opcode), ResultReg); 2791 if (setsCPSR) 2792 MIB.addReg(ARM::CPSR, RegState::Define); 2793 SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); 2794 AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); 2795 if (hasS) 2796 AddDefaultCC(MIB); 2797 // Second instruction consumes the first's result. 2798 SrcReg = ResultReg; 2799 } 2800 2801 return ResultReg; 2802 } 2803 2804 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2805 // On ARM, in general, integer casts don't involve legal types; this code 2806 // handles promotable integers. 2807 Type *DestTy = I->getType(); 2808 Value *Src = I->getOperand(0); 2809 Type *SrcTy = Src->getType(); 2810 2811 bool isZExt = isa<ZExtInst>(I); 2812 unsigned SrcReg = getRegForValue(Src); 2813 if (!SrcReg) return false; 2814 2815 EVT SrcEVT, DestEVT; 2816 SrcEVT = TLI.getValueType(SrcTy, true); 2817 DestEVT = TLI.getValueType(DestTy, true); 2818 if (!SrcEVT.isSimple()) return false; 2819 if (!DestEVT.isSimple()) return false; 2820 2821 MVT SrcVT = SrcEVT.getSimpleVT(); 2822 MVT DestVT = DestEVT.getSimpleVT(); 2823 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2824 if (ResultReg == 0) return false; 2825 UpdateValueMap(I, ResultReg); 2826 return true; 2827 } 2828 2829 bool ARMFastISel::SelectShift(const Instruction *I, 2830 ARM_AM::ShiftOpc ShiftTy) { 2831 // We handle thumb2 mode by target independent selector 2832 // or SelectionDAG ISel. 2833 if (isThumb2) 2834 return false; 2835 2836 // Only handle i32 now. 2837 EVT DestVT = TLI.getValueType(I->getType(), true); 2838 if (DestVT != MVT::i32) 2839 return false; 2840 2841 unsigned Opc = ARM::MOVsr; 2842 unsigned ShiftImm; 2843 Value *Src2Value = I->getOperand(1); 2844 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2845 ShiftImm = CI->getZExtValue(); 2846 2847 // Fall back to selection DAG isel if the shift amount 2848 // is zero or greater than the width of the value type. 2849 if (ShiftImm == 0 || ShiftImm >=32) 2850 return false; 2851 2852 Opc = ARM::MOVsi; 2853 } 2854 2855 Value *Src1Value = I->getOperand(0); 2856 unsigned Reg1 = getRegForValue(Src1Value); 2857 if (Reg1 == 0) return false; 2858 2859 unsigned Reg2 = 0; 2860 if (Opc == ARM::MOVsr) { 2861 Reg2 = getRegForValue(Src2Value); 2862 if (Reg2 == 0) return false; 2863 } 2864 2865 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 2866 if(ResultReg == 0) return false; 2867 2868 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2869 TII.get(Opc), ResultReg) 2870 .addReg(Reg1); 2871 2872 if (Opc == ARM::MOVsi) 2873 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2874 else if (Opc == ARM::MOVsr) { 2875 MIB.addReg(Reg2); 2876 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2877 } 2878 2879 AddOptionalDefs(MIB); 2880 UpdateValueMap(I, ResultReg); 2881 return true; 2882 } 2883 2884 // TODO: SoftFP support. 2885 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2886 2887 switch (I->getOpcode()) { 2888 case Instruction::Load: 2889 return SelectLoad(I); 2890 case Instruction::Store: 2891 return SelectStore(I); 2892 case Instruction::Br: 2893 return SelectBranch(I); 2894 case Instruction::IndirectBr: 2895 return SelectIndirectBr(I); 2896 case Instruction::ICmp: 2897 case Instruction::FCmp: 2898 return SelectCmp(I); 2899 case Instruction::FPExt: 2900 return SelectFPExt(I); 2901 case Instruction::FPTrunc: 2902 return SelectFPTrunc(I); 2903 case Instruction::SIToFP: 2904 return SelectIToFP(I, /*isSigned*/ true); 2905 case Instruction::UIToFP: 2906 return SelectIToFP(I, /*isSigned*/ false); 2907 case Instruction::FPToSI: 2908 return SelectFPToI(I, /*isSigned*/ true); 2909 case Instruction::FPToUI: 2910 return SelectFPToI(I, /*isSigned*/ false); 2911 case Instruction::Add: 2912 return SelectBinaryIntOp(I, ISD::ADD); 2913 case Instruction::Or: 2914 return SelectBinaryIntOp(I, ISD::OR); 2915 case Instruction::Sub: 2916 return SelectBinaryIntOp(I, ISD::SUB); 2917 case Instruction::FAdd: 2918 return SelectBinaryFPOp(I, ISD::FADD); 2919 case Instruction::FSub: 2920 return SelectBinaryFPOp(I, ISD::FSUB); 2921 case Instruction::FMul: 2922 return SelectBinaryFPOp(I, ISD::FMUL); 2923 case Instruction::SDiv: 2924 return SelectDiv(I, /*isSigned*/ true); 2925 case Instruction::UDiv: 2926 return SelectDiv(I, /*isSigned*/ false); 2927 case Instruction::SRem: 2928 return SelectRem(I, /*isSigned*/ true); 2929 case Instruction::URem: 2930 return SelectRem(I, /*isSigned*/ false); 2931 case Instruction::Call: 2932 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2933 return SelectIntrinsicCall(*II); 2934 return SelectCall(I); 2935 case Instruction::Select: 2936 return SelectSelect(I); 2937 case Instruction::Ret: 2938 return SelectRet(I); 2939 case Instruction::Trunc: 2940 return SelectTrunc(I); 2941 case Instruction::ZExt: 2942 case Instruction::SExt: 2943 return SelectIntExt(I); 2944 case Instruction::Shl: 2945 return SelectShift(I, ARM_AM::lsl); 2946 case Instruction::LShr: 2947 return SelectShift(I, ARM_AM::lsr); 2948 case Instruction::AShr: 2949 return SelectShift(I, ARM_AM::asr); 2950 default: break; 2951 } 2952 return false; 2953 } 2954 2955 namespace { 2956 // This table describes sign- and zero-extend instructions which can be 2957 // folded into a preceding load. All of these extends have an immediate 2958 // (sometimes a mask and sometimes a shift) that's applied after 2959 // extension. 2960 const struct FoldableLoadExtendsStruct { 2961 uint16_t Opc[2]; // ARM, Thumb. 2962 uint8_t ExpectedImm; 2963 uint8_t isZExt : 1; 2964 uint8_t ExpectedVT : 7; 2965 } FoldableLoadExtends[] = { 2966 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, 2967 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, 2968 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, 2969 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, 2970 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } 2971 }; 2972 } 2973 2974 /// \brief The specified machine instr operand is a vreg, and that 2975 /// vreg is being provided by the specified load instruction. If possible, 2976 /// try to fold the load as an operand to the instruction, returning true if 2977 /// successful. 2978 bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 2979 const LoadInst *LI) { 2980 // Verify we have a legal type before going any further. 2981 MVT VT; 2982 if (!isLoadTypeLegal(LI->getType(), VT)) 2983 return false; 2984 2985 // Combine load followed by zero- or sign-extend. 2986 // ldrb r1, [r0] ldrb r1, [r0] 2987 // uxtb r2, r1 => 2988 // mov r3, r2 mov r3, r1 2989 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) 2990 return false; 2991 const uint64_t Imm = MI->getOperand(2).getImm(); 2992 2993 bool Found = false; 2994 bool isZExt; 2995 for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends); 2996 i != e; ++i) { 2997 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() && 2998 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm && 2999 MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) { 3000 Found = true; 3001 isZExt = FoldableLoadExtends[i].isZExt; 3002 } 3003 } 3004 if (!Found) return false; 3005 3006 // See if we can handle this address. 3007 Address Addr; 3008 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 3009 3010 unsigned ResultReg = MI->getOperand(0).getReg(); 3011 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 3012 return false; 3013 MI->eraseFromParent(); 3014 return true; 3015 } 3016 3017 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 3018 unsigned Align, MVT VT) { 3019 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 3020 ARMConstantPoolConstant *CPV = 3021 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 3022 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 3023 3024 unsigned Opc; 3025 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); 3026 // Load value. 3027 if (isThumb2) { 3028 DestReg1 = constrainOperandRegClass(TII.get(ARM::t2LDRpci), DestReg1, 0); 3029 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 3030 TII.get(ARM::t2LDRpci), DestReg1) 3031 .addConstantPoolIndex(Idx)); 3032 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; 3033 } else { 3034 // The extra immediate is for addrmode2. 3035 DestReg1 = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg1, 0); 3036 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 3037 DL, TII.get(ARM::LDRcp), DestReg1) 3038 .addConstantPoolIndex(Idx).addImm(0)); 3039 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; 3040 } 3041 3042 unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); 3043 if (GlobalBaseReg == 0) { 3044 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); 3045 AFI->setGlobalBaseReg(GlobalBaseReg); 3046 } 3047 3048 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); 3049 DestReg2 = constrainOperandRegClass(TII.get(Opc), DestReg2, 0); 3050 DestReg1 = constrainOperandRegClass(TII.get(Opc), DestReg1, 1); 3051 GlobalBaseReg = constrainOperandRegClass(TII.get(Opc), GlobalBaseReg, 2); 3052 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 3053 DL, TII.get(Opc), DestReg2) 3054 .addReg(DestReg1) 3055 .addReg(GlobalBaseReg); 3056 if (!UseGOTOFF) 3057 MIB.addImm(0); 3058 AddOptionalDefs(MIB); 3059 3060 return DestReg2; 3061 } 3062 3063 bool ARMFastISel::FastLowerArguments() { 3064 if (!FuncInfo.CanLowerReturn) 3065 return false; 3066 3067 const Function *F = FuncInfo.Fn; 3068 if (F->isVarArg()) 3069 return false; 3070 3071 CallingConv::ID CC = F->getCallingConv(); 3072 switch (CC) { 3073 default: 3074 return false; 3075 case CallingConv::Fast: 3076 case CallingConv::C: 3077 case CallingConv::ARM_AAPCS_VFP: 3078 case CallingConv::ARM_AAPCS: 3079 case CallingConv::ARM_APCS: 3080 break; 3081 } 3082 3083 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments 3084 // which are passed in r0 - r3. 3085 unsigned Idx = 1; 3086 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3087 I != E; ++I, ++Idx) { 3088 if (Idx > 4) 3089 return false; 3090 3091 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || 3092 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || 3093 F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) 3094 return false; 3095 3096 Type *ArgTy = I->getType(); 3097 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) 3098 return false; 3099 3100 EVT ArgVT = TLI.getValueType(ArgTy); 3101 if (!ArgVT.isSimple()) return false; 3102 switch (ArgVT.getSimpleVT().SimpleTy) { 3103 case MVT::i8: 3104 case MVT::i16: 3105 case MVT::i32: 3106 break; 3107 default: 3108 return false; 3109 } 3110 } 3111 3112 3113 static const uint16_t GPRArgRegs[] = { 3114 ARM::R0, ARM::R1, ARM::R2, ARM::R3 3115 }; 3116 3117 const TargetRegisterClass *RC = &ARM::rGPRRegClass; 3118 Idx = 0; 3119 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3120 I != E; ++I, ++Idx) { 3121 unsigned SrcReg = GPRArgRegs[Idx]; 3122 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); 3123 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. 3124 // Without this, EmitLiveInCopies may eliminate the livein if its only 3125 // use is a bitcast (which isn't turned into an instruction). 3126 unsigned ResultReg = createResultReg(RC); 3127 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 3128 ResultReg).addReg(DstReg, getKillRegState(true)); 3129 UpdateValueMap(I, ResultReg); 3130 } 3131 3132 return true; 3133 } 3134 3135 namespace llvm { 3136 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 3137 const TargetLibraryInfo *libInfo) { 3138 const TargetMachine &TM = funcInfo.MF->getTarget(); 3139 3140 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 3141 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. 3142 bool UseFastISel = false; 3143 UseFastISel |= Subtarget->isTargetIOS() && !Subtarget->isThumb1Only(); 3144 UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb(); 3145 UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb(); 3146 3147 if (UseFastISel) { 3148 // iOS always has a FP for backtracking, force other targets 3149 // to keep their FP when doing FastISel. The emitted code is 3150 // currently superior, and in cases like test-suite's lencod 3151 // FastISel isn't quite correct when FP is eliminated. 3152 TM.Options.NoFramePointerElim = true; 3153 return new ARMFastISel(funcInfo, libInfo); 3154 } 3155 return 0; 3156 } 3157 } 3158