1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseInstrInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMTargetMachine.h" 20 #include "ARMSubtarget.h" 21 #include "ARMConstantPoolValue.h" 22 #include "MCTargetDesc/ARMAddressingModes.h" 23 #include "llvm/CallingConv.h" 24 #include "llvm/DerivedTypes.h" 25 #include "llvm/GlobalVariable.h" 26 #include "llvm/Instructions.h" 27 #include "llvm/IntrinsicInst.h" 28 #include "llvm/Module.h" 29 #include "llvm/Operator.h" 30 #include "llvm/CodeGen/Analysis.h" 31 #include "llvm/CodeGen/FastISel.h" 32 #include "llvm/CodeGen/FunctionLoweringInfo.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineModuleInfo.h" 35 #include "llvm/CodeGen/MachineConstantPool.h" 36 #include "llvm/CodeGen/MachineFrameInfo.h" 37 #include "llvm/CodeGen/MachineMemOperand.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/Support/CallSite.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include "llvm/Support/GetElementPtrTypeIterator.h" 43 #include "llvm/DataLayout.h" 44 #include "llvm/Target/TargetInstrInfo.h" 45 #include "llvm/Target/TargetLowering.h" 46 #include "llvm/Target/TargetMachine.h" 47 #include "llvm/Target/TargetOptions.h" 48 using namespace llvm; 49 50 extern cl::opt<bool> EnableARMLongCalls; 51 52 namespace { 53 54 // All possible address modes, plus some. 55 typedef struct Address { 56 enum { 57 RegBase, 58 FrameIndexBase 59 } BaseType; 60 61 union { 62 unsigned Reg; 63 int FI; 64 } Base; 65 66 int Offset; 67 68 // Innocuous defaults for our address. 69 Address() 70 : BaseType(RegBase), Offset(0) { 71 Base.Reg = 0; 72 } 73 } Address; 74 75 class ARMFastISel : public FastISel { 76 77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 78 /// make the right decision when generating code for different targets. 79 const ARMSubtarget *Subtarget; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 91 const TargetLibraryInfo *libInfo) 92 : FastISel(funcInfo, libInfo), 93 TM(funcInfo.MF->getTarget()), 94 TII(*TM.getInstrInfo()), 95 TLI(*TM.getTargetLowering()) { 96 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 97 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 98 isThumb2 = AFI->isThumbFunction(); 99 Context = &funcInfo.Fn->getContext(); 100 } 101 102 // Code from FastISel.cpp. 103 private: 104 unsigned FastEmitInst_(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC); 106 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 107 const TargetRegisterClass *RC, 108 unsigned Op0, bool Op0IsKill); 109 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC, 111 unsigned Op0, bool Op0IsKill, 112 unsigned Op1, bool Op1IsKill); 113 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill, 116 unsigned Op1, bool Op1IsKill, 117 unsigned Op2, bool Op2IsKill); 118 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 uint64_t Imm); 122 unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 const ConstantFP *FPImm); 126 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 unsigned Op0, bool Op0IsKill, 129 unsigned Op1, bool Op1IsKill, 130 uint64_t Imm); 131 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 uint64_t Imm); 134 unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 135 const TargetRegisterClass *RC, 136 uint64_t Imm1, uint64_t Imm2); 137 138 unsigned FastEmitInst_extractsubreg(MVT RetVT, 139 unsigned Op0, bool Op0IsKill, 140 uint32_t Idx); 141 142 // Backend specific FastISel code. 143 private: 144 virtual bool TargetSelectInstruction(const Instruction *I); 145 virtual unsigned TargetMaterializeConstant(const Constant *C); 146 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 147 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 148 const LoadInst *LI); 149 private: 150 #include "ARMGenFastISel.inc" 151 152 // Instruction selection routines. 153 private: 154 bool SelectLoad(const Instruction *I); 155 bool SelectStore(const Instruction *I); 156 bool SelectBranch(const Instruction *I); 157 bool SelectIndirectBr(const Instruction *I); 158 bool SelectCmp(const Instruction *I); 159 bool SelectFPExt(const Instruction *I); 160 bool SelectFPTrunc(const Instruction *I); 161 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 162 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 163 bool SelectIToFP(const Instruction *I, bool isSigned); 164 bool SelectFPToI(const Instruction *I, bool isSigned); 165 bool SelectDiv(const Instruction *I, bool isSigned); 166 bool SelectRem(const Instruction *I, bool isSigned); 167 bool SelectCall(const Instruction *I, const char *IntrMemName); 168 bool SelectIntrinsicCall(const IntrinsicInst &I); 169 bool SelectSelect(const Instruction *I); 170 bool SelectRet(const Instruction *I); 171 bool SelectTrunc(const Instruction *I); 172 bool SelectIntExt(const Instruction *I); 173 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 174 175 // Utility routines. 176 private: 177 bool isTypeLegal(Type *Ty, MVT &VT); 178 bool isLoadTypeLegal(Type *Ty, MVT &VT); 179 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 180 bool isZExt); 181 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 182 unsigned Alignment = 0, bool isZExt = true, 183 bool allocReg = true); 184 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 185 unsigned Alignment = 0); 186 bool ARMComputeAddress(const Value *Obj, Address &Addr); 187 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 188 bool ARMIsMemCpySmall(uint64_t Len); 189 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 190 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 191 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 192 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 193 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 194 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 195 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 196 unsigned ARMSelectCallOp(bool UseReg); 197 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, EVT VT); 198 199 // Call handling routines. 200 private: 201 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 202 bool Return, 203 bool isVarArg); 204 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 205 SmallVectorImpl<unsigned> &ArgRegs, 206 SmallVectorImpl<MVT> &ArgVTs, 207 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 208 SmallVectorImpl<unsigned> &RegArgs, 209 CallingConv::ID CC, 210 unsigned &NumBytes, 211 bool isVarArg); 212 unsigned getLibcallReg(const Twine &Name); 213 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 214 const Instruction *I, CallingConv::ID CC, 215 unsigned &NumBytes, bool isVarArg); 216 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 217 218 // OptionalDef handling routines. 219 private: 220 bool isARMNEONPred(const MachineInstr *MI); 221 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 222 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 223 void AddLoadStoreOperands(EVT VT, Address &Addr, 224 const MachineInstrBuilder &MIB, 225 unsigned Flags, bool useAM3); 226 }; 227 228 } // end anonymous namespace 229 230 #include "ARMGenCallingConv.inc" 231 232 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 233 // we don't care about implicit defs here, just places we'll need to add a 234 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 235 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 236 if (!MI->hasOptionalDef()) 237 return false; 238 239 // Look to see if our OptionalDef is defining CPSR or CCR. 240 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 241 const MachineOperand &MO = MI->getOperand(i); 242 if (!MO.isReg() || !MO.isDef()) continue; 243 if (MO.getReg() == ARM::CPSR) 244 *CPSR = true; 245 } 246 return true; 247 } 248 249 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 250 const MCInstrDesc &MCID = MI->getDesc(); 251 252 // If we're a thumb2 or not NEON function we were handled via isPredicable. 253 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 254 AFI->isThumb2Function()) 255 return false; 256 257 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 258 if (MCID.OpInfo[i].isPredicate()) 259 return true; 260 261 return false; 262 } 263 264 // If the machine is predicable go ahead and add the predicate operands, if 265 // it needs default CC operands add those. 266 // TODO: If we want to support thumb1 then we'll need to deal with optional 267 // CPSR defs that need to be added before the remaining operands. See s_cc_out 268 // for descriptions why. 269 const MachineInstrBuilder & 270 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 271 MachineInstr *MI = &*MIB; 272 273 // Do we use a predicate? or... 274 // Are we NEON in ARM mode and have a predicate operand? If so, I know 275 // we're not predicable but add it anyways. 276 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 277 AddDefaultPred(MIB); 278 279 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 280 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 281 bool CPSR = false; 282 if (DefinesOptionalPredicate(MI, &CPSR)) { 283 if (CPSR) 284 AddDefaultT1CC(MIB); 285 else 286 AddDefaultCC(MIB); 287 } 288 return MIB; 289 } 290 291 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 292 const TargetRegisterClass* RC) { 293 unsigned ResultReg = createResultReg(RC); 294 const MCInstrDesc &II = TII.get(MachineInstOpcode); 295 296 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 297 return ResultReg; 298 } 299 300 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 301 const TargetRegisterClass *RC, 302 unsigned Op0, bool Op0IsKill) { 303 unsigned ResultReg = createResultReg(RC); 304 const MCInstrDesc &II = TII.get(MachineInstOpcode); 305 306 if (II.getNumDefs() >= 1) { 307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 308 .addReg(Op0, Op0IsKill * RegState::Kill)); 309 } else { 310 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 311 .addReg(Op0, Op0IsKill * RegState::Kill)); 312 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 313 TII.get(TargetOpcode::COPY), ResultReg) 314 .addReg(II.ImplicitDefs[0])); 315 } 316 return ResultReg; 317 } 318 319 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 320 const TargetRegisterClass *RC, 321 unsigned Op0, bool Op0IsKill, 322 unsigned Op1, bool Op1IsKill) { 323 unsigned ResultReg = createResultReg(RC); 324 const MCInstrDesc &II = TII.get(MachineInstOpcode); 325 326 if (II.getNumDefs() >= 1) { 327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 328 .addReg(Op0, Op0IsKill * RegState::Kill) 329 .addReg(Op1, Op1IsKill * RegState::Kill)); 330 } else { 331 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 332 .addReg(Op0, Op0IsKill * RegState::Kill) 333 .addReg(Op1, Op1IsKill * RegState::Kill)); 334 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 335 TII.get(TargetOpcode::COPY), ResultReg) 336 .addReg(II.ImplicitDefs[0])); 337 } 338 return ResultReg; 339 } 340 341 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 342 const TargetRegisterClass *RC, 343 unsigned Op0, bool Op0IsKill, 344 unsigned Op1, bool Op1IsKill, 345 unsigned Op2, bool Op2IsKill) { 346 unsigned ResultReg = createResultReg(RC); 347 const MCInstrDesc &II = TII.get(MachineInstOpcode); 348 349 if (II.getNumDefs() >= 1) { 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 351 .addReg(Op0, Op0IsKill * RegState::Kill) 352 .addReg(Op1, Op1IsKill * RegState::Kill) 353 .addReg(Op2, Op2IsKill * RegState::Kill)); 354 } else { 355 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 356 .addReg(Op0, Op0IsKill * RegState::Kill) 357 .addReg(Op1, Op1IsKill * RegState::Kill) 358 .addReg(Op2, Op2IsKill * RegState::Kill)); 359 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 360 TII.get(TargetOpcode::COPY), ResultReg) 361 .addReg(II.ImplicitDefs[0])); 362 } 363 return ResultReg; 364 } 365 366 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 367 const TargetRegisterClass *RC, 368 unsigned Op0, bool Op0IsKill, 369 uint64_t Imm) { 370 unsigned ResultReg = createResultReg(RC); 371 const MCInstrDesc &II = TII.get(MachineInstOpcode); 372 373 if (II.getNumDefs() >= 1) { 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 375 .addReg(Op0, Op0IsKill * RegState::Kill) 376 .addImm(Imm)); 377 } else { 378 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 379 .addReg(Op0, Op0IsKill * RegState::Kill) 380 .addImm(Imm)); 381 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 382 TII.get(TargetOpcode::COPY), ResultReg) 383 .addReg(II.ImplicitDefs[0])); 384 } 385 return ResultReg; 386 } 387 388 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 389 const TargetRegisterClass *RC, 390 unsigned Op0, bool Op0IsKill, 391 const ConstantFP *FPImm) { 392 unsigned ResultReg = createResultReg(RC); 393 const MCInstrDesc &II = TII.get(MachineInstOpcode); 394 395 if (II.getNumDefs() >= 1) { 396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 397 .addReg(Op0, Op0IsKill * RegState::Kill) 398 .addFPImm(FPImm)); 399 } else { 400 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 401 .addReg(Op0, Op0IsKill * RegState::Kill) 402 .addFPImm(FPImm)); 403 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 404 TII.get(TargetOpcode::COPY), ResultReg) 405 .addReg(II.ImplicitDefs[0])); 406 } 407 return ResultReg; 408 } 409 410 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 411 const TargetRegisterClass *RC, 412 unsigned Op0, bool Op0IsKill, 413 unsigned Op1, bool Op1IsKill, 414 uint64_t Imm) { 415 unsigned ResultReg = createResultReg(RC); 416 const MCInstrDesc &II = TII.get(MachineInstOpcode); 417 418 if (II.getNumDefs() >= 1) { 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 420 .addReg(Op0, Op0IsKill * RegState::Kill) 421 .addReg(Op1, Op1IsKill * RegState::Kill) 422 .addImm(Imm)); 423 } else { 424 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 425 .addReg(Op0, Op0IsKill * RegState::Kill) 426 .addReg(Op1, Op1IsKill * RegState::Kill) 427 .addImm(Imm)); 428 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 429 TII.get(TargetOpcode::COPY), ResultReg) 430 .addReg(II.ImplicitDefs[0])); 431 } 432 return ResultReg; 433 } 434 435 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 436 const TargetRegisterClass *RC, 437 uint64_t Imm) { 438 unsigned ResultReg = createResultReg(RC); 439 const MCInstrDesc &II = TII.get(MachineInstOpcode); 440 441 if (II.getNumDefs() >= 1) { 442 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 443 .addImm(Imm)); 444 } else { 445 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 446 .addImm(Imm)); 447 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 448 TII.get(TargetOpcode::COPY), ResultReg) 449 .addReg(II.ImplicitDefs[0])); 450 } 451 return ResultReg; 452 } 453 454 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 455 const TargetRegisterClass *RC, 456 uint64_t Imm1, uint64_t Imm2) { 457 unsigned ResultReg = createResultReg(RC); 458 const MCInstrDesc &II = TII.get(MachineInstOpcode); 459 460 if (II.getNumDefs() >= 1) { 461 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 462 .addImm(Imm1).addImm(Imm2)); 463 } else { 464 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 465 .addImm(Imm1).addImm(Imm2)); 466 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 467 TII.get(TargetOpcode::COPY), 468 ResultReg) 469 .addReg(II.ImplicitDefs[0])); 470 } 471 return ResultReg; 472 } 473 474 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 475 unsigned Op0, bool Op0IsKill, 476 uint32_t Idx) { 477 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 478 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 479 "Cannot yet extract from physregs"); 480 481 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 482 DL, TII.get(TargetOpcode::COPY), ResultReg) 483 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 484 return ResultReg; 485 } 486 487 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 488 // checks from the various callers. 489 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 490 if (VT == MVT::f64) return 0; 491 492 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 493 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 494 TII.get(ARM::VMOVSR), MoveReg) 495 .addReg(SrcReg)); 496 return MoveReg; 497 } 498 499 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 500 if (VT == MVT::i64) return 0; 501 502 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 503 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 504 TII.get(ARM::VMOVRS), MoveReg) 505 .addReg(SrcReg)); 506 return MoveReg; 507 } 508 509 // For double width floating point we need to materialize two constants 510 // (the high and the low) into integer registers then use a move to get 511 // the combined constant into an FP reg. 512 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 513 const APFloat Val = CFP->getValueAPF(); 514 bool is64bit = VT == MVT::f64; 515 516 // This checks to see if we can use VFP3 instructions to materialize 517 // a constant, otherwise we have to go through the constant pool. 518 if (TLI.isFPImmLegal(Val, VT)) { 519 int Imm; 520 unsigned Opc; 521 if (is64bit) { 522 Imm = ARM_AM::getFP64Imm(Val); 523 Opc = ARM::FCONSTD; 524 } else { 525 Imm = ARM_AM::getFP32Imm(Val); 526 Opc = ARM::FCONSTS; 527 } 528 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 529 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 530 DestReg) 531 .addImm(Imm)); 532 return DestReg; 533 } 534 535 // Require VFP2 for loading fp constants. 536 if (!Subtarget->hasVFP2()) return false; 537 538 // MachineConstantPool wants an explicit alignment. 539 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 540 if (Align == 0) { 541 // TODO: Figure out if this is correct. 542 Align = TD.getTypeAllocSize(CFP->getType()); 543 } 544 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 545 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 546 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 547 548 // The extra reg is for addrmode5. 549 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 550 DestReg) 551 .addConstantPoolIndex(Idx) 552 .addReg(0)); 553 return DestReg; 554 } 555 556 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 557 558 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 559 return false; 560 561 // If we can do this in a single instruction without a constant pool entry 562 // do so now. 563 const ConstantInt *CI = cast<ConstantInt>(C); 564 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 565 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 566 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 567 &ARM::GPRRegClass; 568 unsigned ImmReg = createResultReg(RC); 569 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 570 TII.get(Opc), ImmReg) 571 .addImm(CI->getZExtValue())); 572 return ImmReg; 573 } 574 575 // Use MVN to emit negative constants. 576 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 577 unsigned Imm = (unsigned)~(CI->getSExtValue()); 578 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 579 (ARM_AM::getSOImmVal(Imm) != -1); 580 if (UseImm) { 581 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 582 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 583 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 584 TII.get(Opc), ImmReg) 585 .addImm(Imm)); 586 return ImmReg; 587 } 588 } 589 590 // Load from constant pool. For now 32-bit only. 591 if (VT != MVT::i32) 592 return false; 593 594 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 595 596 // MachineConstantPool wants an explicit alignment. 597 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 598 if (Align == 0) { 599 // TODO: Figure out if this is correct. 600 Align = TD.getTypeAllocSize(C->getType()); 601 } 602 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 603 604 if (isThumb2) 605 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 606 TII.get(ARM::t2LDRpci), DestReg) 607 .addConstantPoolIndex(Idx)); 608 else 609 // The extra immediate is for addrmode2. 610 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 611 TII.get(ARM::LDRcp), DestReg) 612 .addConstantPoolIndex(Idx) 613 .addImm(0)); 614 615 return DestReg; 616 } 617 618 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 619 // For now 32-bit only. 620 if (VT != MVT::i32) return 0; 621 622 Reloc::Model RelocM = TM.getRelocationModel(); 623 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); 624 const TargetRegisterClass *RC = isThumb2 ? 625 (const TargetRegisterClass*)&ARM::rGPRRegClass : 626 (const TargetRegisterClass*)&ARM::GPRRegClass; 627 unsigned DestReg = createResultReg(RC); 628 629 // Use movw+movt when possible, it avoids constant pool entries. 630 // Darwin targets don't support movt with Reloc::Static, see 631 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 632 // static movt relocations. 633 if (Subtarget->useMovt() && 634 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 635 unsigned Opc; 636 switch (RelocM) { 637 case Reloc::PIC_: 638 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 639 break; 640 case Reloc::DynamicNoPIC: 641 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 642 break; 643 default: 644 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 645 break; 646 } 647 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 648 DestReg).addGlobalAddress(GV)); 649 } else { 650 // MachineConstantPool wants an explicit alignment. 651 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 652 if (Align == 0) { 653 // TODO: Figure out if this is correct. 654 Align = TD.getTypeAllocSize(GV->getType()); 655 } 656 657 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) 658 return ARMLowerPICELF(GV, Align, VT); 659 660 // Grab index. 661 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 662 (Subtarget->isThumb() ? 4 : 8); 663 unsigned Id = AFI->createPICLabelUId(); 664 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 665 ARMCP::CPValue, 666 PCAdj); 667 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 668 669 // Load value. 670 MachineInstrBuilder MIB; 671 if (isThumb2) { 672 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 673 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 674 .addConstantPoolIndex(Idx); 675 if (RelocM == Reloc::PIC_) 676 MIB.addImm(Id); 677 AddOptionalDefs(MIB); 678 } else { 679 // The extra immediate is for addrmode2. 680 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 681 DestReg) 682 .addConstantPoolIndex(Idx) 683 .addImm(0); 684 AddOptionalDefs(MIB); 685 686 if (RelocM == Reloc::PIC_) { 687 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 688 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 689 690 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 691 DL, TII.get(Opc), NewDestReg) 692 .addReg(DestReg) 693 .addImm(Id); 694 AddOptionalDefs(MIB); 695 return NewDestReg; 696 } 697 } 698 } 699 700 if (IsIndirect) { 701 MachineInstrBuilder MIB; 702 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 703 if (isThumb2) 704 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 705 TII.get(ARM::t2LDRi12), NewDestReg) 706 .addReg(DestReg) 707 .addImm(0); 708 else 709 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 710 NewDestReg) 711 .addReg(DestReg) 712 .addImm(0); 713 DestReg = NewDestReg; 714 AddOptionalDefs(MIB); 715 } 716 717 return DestReg; 718 } 719 720 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 721 EVT VT = TLI.getValueType(C->getType(), true); 722 723 // Only handle simple types. 724 if (!VT.isSimple()) return 0; 725 726 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 727 return ARMMaterializeFP(CFP, VT); 728 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 729 return ARMMaterializeGV(GV, VT); 730 else if (isa<ConstantInt>(C)) 731 return ARMMaterializeInt(C, VT); 732 733 return 0; 734 } 735 736 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 737 738 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 739 // Don't handle dynamic allocas. 740 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 741 742 MVT VT; 743 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 744 745 DenseMap<const AllocaInst*, int>::iterator SI = 746 FuncInfo.StaticAllocaMap.find(AI); 747 748 // This will get lowered later into the correct offsets and registers 749 // via rewriteXFrameIndex. 750 if (SI != FuncInfo.StaticAllocaMap.end()) { 751 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 752 unsigned ResultReg = createResultReg(RC); 753 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 754 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 755 TII.get(Opc), ResultReg) 756 .addFrameIndex(SI->second) 757 .addImm(0)); 758 return ResultReg; 759 } 760 761 return 0; 762 } 763 764 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 765 EVT evt = TLI.getValueType(Ty, true); 766 767 // Only handle simple types. 768 if (evt == MVT::Other || !evt.isSimple()) return false; 769 VT = evt.getSimpleVT(); 770 771 // Handle all legal types, i.e. a register that will directly hold this 772 // value. 773 return TLI.isTypeLegal(VT); 774 } 775 776 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 777 if (isTypeLegal(Ty, VT)) return true; 778 779 // If this is a type than can be sign or zero-extended to a basic operation 780 // go ahead and accept it now. 781 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 782 return true; 783 784 return false; 785 } 786 787 // Computes the address to get to an object. 788 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 789 // Some boilerplate from the X86 FastISel. 790 const User *U = NULL; 791 unsigned Opcode = Instruction::UserOp1; 792 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 793 // Don't walk into other basic blocks unless the object is an alloca from 794 // another block, otherwise it may not have a virtual register assigned. 795 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 796 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 797 Opcode = I->getOpcode(); 798 U = I; 799 } 800 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 801 Opcode = C->getOpcode(); 802 U = C; 803 } 804 805 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 806 if (Ty->getAddressSpace() > 255) 807 // Fast instruction selection doesn't support the special 808 // address spaces. 809 return false; 810 811 switch (Opcode) { 812 default: 813 break; 814 case Instruction::BitCast: { 815 // Look through bitcasts. 816 return ARMComputeAddress(U->getOperand(0), Addr); 817 } 818 case Instruction::IntToPtr: { 819 // Look past no-op inttoptrs. 820 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 821 return ARMComputeAddress(U->getOperand(0), Addr); 822 break; 823 } 824 case Instruction::PtrToInt: { 825 // Look past no-op ptrtoints. 826 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 827 return ARMComputeAddress(U->getOperand(0), Addr); 828 break; 829 } 830 case Instruction::GetElementPtr: { 831 Address SavedAddr = Addr; 832 int TmpOffset = Addr.Offset; 833 834 // Iterate through the GEP folding the constants into offsets where 835 // we can. 836 gep_type_iterator GTI = gep_type_begin(U); 837 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 838 i != e; ++i, ++GTI) { 839 const Value *Op = *i; 840 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 841 const StructLayout *SL = TD.getStructLayout(STy); 842 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 843 TmpOffset += SL->getElementOffset(Idx); 844 } else { 845 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 846 for (;;) { 847 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 848 // Constant-offset addressing. 849 TmpOffset += CI->getSExtValue() * S; 850 break; 851 } 852 if (isa<AddOperator>(Op) && 853 (!isa<Instruction>(Op) || 854 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 855 == FuncInfo.MBB) && 856 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 857 // An add (in the same block) with a constant operand. Fold the 858 // constant. 859 ConstantInt *CI = 860 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 861 TmpOffset += CI->getSExtValue() * S; 862 // Iterate on the other operand. 863 Op = cast<AddOperator>(Op)->getOperand(0); 864 continue; 865 } 866 // Unsupported 867 goto unsupported_gep; 868 } 869 } 870 } 871 872 // Try to grab the base operand now. 873 Addr.Offset = TmpOffset; 874 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 875 876 // We failed, restore everything and try the other options. 877 Addr = SavedAddr; 878 879 unsupported_gep: 880 break; 881 } 882 case Instruction::Alloca: { 883 const AllocaInst *AI = cast<AllocaInst>(Obj); 884 DenseMap<const AllocaInst*, int>::iterator SI = 885 FuncInfo.StaticAllocaMap.find(AI); 886 if (SI != FuncInfo.StaticAllocaMap.end()) { 887 Addr.BaseType = Address::FrameIndexBase; 888 Addr.Base.FI = SI->second; 889 return true; 890 } 891 break; 892 } 893 } 894 895 // Try to get this in a register if nothing else has worked. 896 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 897 return Addr.Base.Reg != 0; 898 } 899 900 void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 901 902 assert(VT.isSimple() && "Non-simple types are invalid here!"); 903 904 bool needsLowering = false; 905 switch (VT.getSimpleVT().SimpleTy) { 906 default: llvm_unreachable("Unhandled load/store type!"); 907 case MVT::i1: 908 case MVT::i8: 909 case MVT::i16: 910 case MVT::i32: 911 if (!useAM3) { 912 // Integer loads/stores handle 12-bit offsets. 913 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 914 // Handle negative offsets. 915 if (needsLowering && isThumb2) 916 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 917 Addr.Offset > -256); 918 } else { 919 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 920 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 921 } 922 break; 923 case MVT::f32: 924 case MVT::f64: 925 // Floating point operands handle 8-bit offsets. 926 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 927 break; 928 } 929 930 // If this is a stack pointer and the offset needs to be simplified then 931 // put the alloca address into a register, set the base type back to 932 // register and continue. This should almost never happen. 933 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 934 const TargetRegisterClass *RC = isThumb2 ? 935 (const TargetRegisterClass*)&ARM::tGPRRegClass : 936 (const TargetRegisterClass*)&ARM::GPRRegClass; 937 unsigned ResultReg = createResultReg(RC); 938 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 939 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 940 TII.get(Opc), ResultReg) 941 .addFrameIndex(Addr.Base.FI) 942 .addImm(0)); 943 Addr.Base.Reg = ResultReg; 944 Addr.BaseType = Address::RegBase; 945 } 946 947 // Since the offset is too large for the load/store instruction 948 // get the reg+offset into a register. 949 if (needsLowering) { 950 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 951 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 952 Addr.Offset = 0; 953 } 954 } 955 956 void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 957 const MachineInstrBuilder &MIB, 958 unsigned Flags, bool useAM3) { 959 // addrmode5 output depends on the selection dag addressing dividing the 960 // offset by 4 that it then later multiplies. Do this here as well. 961 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 962 VT.getSimpleVT().SimpleTy == MVT::f64) 963 Addr.Offset /= 4; 964 965 // Frame base works a bit differently. Handle it separately. 966 if (Addr.BaseType == Address::FrameIndexBase) { 967 int FI = Addr.Base.FI; 968 int Offset = Addr.Offset; 969 MachineMemOperand *MMO = 970 FuncInfo.MF->getMachineMemOperand( 971 MachinePointerInfo::getFixedStack(FI, Offset), 972 Flags, 973 MFI.getObjectSize(FI), 974 MFI.getObjectAlignment(FI)); 975 // Now add the rest of the operands. 976 MIB.addFrameIndex(FI); 977 978 // ARM halfword load/stores and signed byte loads need an additional 979 // operand. 980 if (useAM3) { 981 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 982 MIB.addReg(0); 983 MIB.addImm(Imm); 984 } else { 985 MIB.addImm(Addr.Offset); 986 } 987 MIB.addMemOperand(MMO); 988 } else { 989 // Now add the rest of the operands. 990 MIB.addReg(Addr.Base.Reg); 991 992 // ARM halfword load/stores and signed byte loads need an additional 993 // operand. 994 if (useAM3) { 995 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 996 MIB.addReg(0); 997 MIB.addImm(Imm); 998 } else { 999 MIB.addImm(Addr.Offset); 1000 } 1001 } 1002 AddOptionalDefs(MIB); 1003 } 1004 1005 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 1006 unsigned Alignment, bool isZExt, bool allocReg) { 1007 assert(VT.isSimple() && "Non-simple types are invalid here!"); 1008 unsigned Opc; 1009 bool useAM3 = false; 1010 bool needVMOV = false; 1011 const TargetRegisterClass *RC; 1012 switch (VT.getSimpleVT().SimpleTy) { 1013 // This is mostly going to be Neon/vector support. 1014 default: return false; 1015 case MVT::i1: 1016 case MVT::i8: 1017 if (isThumb2) { 1018 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1019 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 1020 else 1021 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1022 } else { 1023 if (isZExt) { 1024 Opc = ARM::LDRBi12; 1025 } else { 1026 Opc = ARM::LDRSB; 1027 useAM3 = true; 1028 } 1029 } 1030 RC = &ARM::GPRRegClass; 1031 break; 1032 case MVT::i16: 1033 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1034 return false; 1035 1036 if (isThumb2) { 1037 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1038 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1039 else 1040 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1041 } else { 1042 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1043 useAM3 = true; 1044 } 1045 RC = &ARM::GPRRegClass; 1046 break; 1047 case MVT::i32: 1048 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1049 return false; 1050 1051 if (isThumb2) { 1052 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1053 Opc = ARM::t2LDRi8; 1054 else 1055 Opc = ARM::t2LDRi12; 1056 } else { 1057 Opc = ARM::LDRi12; 1058 } 1059 RC = &ARM::GPRRegClass; 1060 break; 1061 case MVT::f32: 1062 if (!Subtarget->hasVFP2()) return false; 1063 // Unaligned loads need special handling. Floats require word-alignment. 1064 if (Alignment && Alignment < 4) { 1065 needVMOV = true; 1066 VT = MVT::i32; 1067 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1068 RC = &ARM::GPRRegClass; 1069 } else { 1070 Opc = ARM::VLDRS; 1071 RC = TLI.getRegClassFor(VT); 1072 } 1073 break; 1074 case MVT::f64: 1075 if (!Subtarget->hasVFP2()) return false; 1076 // FIXME: Unaligned loads need special handling. Doublewords require 1077 // word-alignment. 1078 if (Alignment && Alignment < 4) 1079 return false; 1080 1081 Opc = ARM::VLDRD; 1082 RC = TLI.getRegClassFor(VT); 1083 break; 1084 } 1085 // Simplify this down to something we can handle. 1086 ARMSimplifyAddress(Addr, VT, useAM3); 1087 1088 // Create the base instruction, then add the operands. 1089 if (allocReg) 1090 ResultReg = createResultReg(RC); 1091 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1092 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1093 TII.get(Opc), ResultReg); 1094 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1095 1096 // If we had an unaligned load of a float we've converted it to an regular 1097 // load. Now we must move from the GRP to the FP register. 1098 if (needVMOV) { 1099 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1100 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1101 TII.get(ARM::VMOVSR), MoveReg) 1102 .addReg(ResultReg)); 1103 ResultReg = MoveReg; 1104 } 1105 return true; 1106 } 1107 1108 bool ARMFastISel::SelectLoad(const Instruction *I) { 1109 // Atomic loads need special handling. 1110 if (cast<LoadInst>(I)->isAtomic()) 1111 return false; 1112 1113 // Verify we have a legal type before going any further. 1114 MVT VT; 1115 if (!isLoadTypeLegal(I->getType(), VT)) 1116 return false; 1117 1118 // See if we can handle this address. 1119 Address Addr; 1120 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1121 1122 unsigned ResultReg; 1123 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1124 return false; 1125 UpdateValueMap(I, ResultReg); 1126 return true; 1127 } 1128 1129 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1130 unsigned Alignment) { 1131 unsigned StrOpc; 1132 bool useAM3 = false; 1133 switch (VT.getSimpleVT().SimpleTy) { 1134 // This is mostly going to be Neon/vector support. 1135 default: return false; 1136 case MVT::i1: { 1137 unsigned Res = createResultReg(isThumb2 ? 1138 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1139 (const TargetRegisterClass*)&ARM::GPRRegClass); 1140 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1141 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1142 TII.get(Opc), Res) 1143 .addReg(SrcReg).addImm(1)); 1144 SrcReg = Res; 1145 } // Fallthrough here. 1146 case MVT::i8: 1147 if (isThumb2) { 1148 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1149 StrOpc = ARM::t2STRBi8; 1150 else 1151 StrOpc = ARM::t2STRBi12; 1152 } else { 1153 StrOpc = ARM::STRBi12; 1154 } 1155 break; 1156 case MVT::i16: 1157 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1158 return false; 1159 1160 if (isThumb2) { 1161 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1162 StrOpc = ARM::t2STRHi8; 1163 else 1164 StrOpc = ARM::t2STRHi12; 1165 } else { 1166 StrOpc = ARM::STRH; 1167 useAM3 = true; 1168 } 1169 break; 1170 case MVT::i32: 1171 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1172 return false; 1173 1174 if (isThumb2) { 1175 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1176 StrOpc = ARM::t2STRi8; 1177 else 1178 StrOpc = ARM::t2STRi12; 1179 } else { 1180 StrOpc = ARM::STRi12; 1181 } 1182 break; 1183 case MVT::f32: 1184 if (!Subtarget->hasVFP2()) return false; 1185 // Unaligned stores need special handling. Floats require word-alignment. 1186 if (Alignment && Alignment < 4) { 1187 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1188 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1189 TII.get(ARM::VMOVRS), MoveReg) 1190 .addReg(SrcReg)); 1191 SrcReg = MoveReg; 1192 VT = MVT::i32; 1193 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1194 } else { 1195 StrOpc = ARM::VSTRS; 1196 } 1197 break; 1198 case MVT::f64: 1199 if (!Subtarget->hasVFP2()) return false; 1200 // FIXME: Unaligned stores need special handling. Doublewords require 1201 // word-alignment. 1202 if (Alignment && Alignment < 4) 1203 return false; 1204 1205 StrOpc = ARM::VSTRD; 1206 break; 1207 } 1208 // Simplify this down to something we can handle. 1209 ARMSimplifyAddress(Addr, VT, useAM3); 1210 1211 // Create the base instruction, then add the operands. 1212 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1213 TII.get(StrOpc)) 1214 .addReg(SrcReg); 1215 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1216 return true; 1217 } 1218 1219 bool ARMFastISel::SelectStore(const Instruction *I) { 1220 Value *Op0 = I->getOperand(0); 1221 unsigned SrcReg = 0; 1222 1223 // Atomic stores need special handling. 1224 if (cast<StoreInst>(I)->isAtomic()) 1225 return false; 1226 1227 // Verify we have a legal type before going any further. 1228 MVT VT; 1229 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1230 return false; 1231 1232 // Get the value to be stored into a register. 1233 SrcReg = getRegForValue(Op0); 1234 if (SrcReg == 0) return false; 1235 1236 // See if we can handle this address. 1237 Address Addr; 1238 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1239 return false; 1240 1241 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1242 return false; 1243 return true; 1244 } 1245 1246 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1247 switch (Pred) { 1248 // Needs two compares... 1249 case CmpInst::FCMP_ONE: 1250 case CmpInst::FCMP_UEQ: 1251 default: 1252 // AL is our "false" for now. The other two need more compares. 1253 return ARMCC::AL; 1254 case CmpInst::ICMP_EQ: 1255 case CmpInst::FCMP_OEQ: 1256 return ARMCC::EQ; 1257 case CmpInst::ICMP_SGT: 1258 case CmpInst::FCMP_OGT: 1259 return ARMCC::GT; 1260 case CmpInst::ICMP_SGE: 1261 case CmpInst::FCMP_OGE: 1262 return ARMCC::GE; 1263 case CmpInst::ICMP_UGT: 1264 case CmpInst::FCMP_UGT: 1265 return ARMCC::HI; 1266 case CmpInst::FCMP_OLT: 1267 return ARMCC::MI; 1268 case CmpInst::ICMP_ULE: 1269 case CmpInst::FCMP_OLE: 1270 return ARMCC::LS; 1271 case CmpInst::FCMP_ORD: 1272 return ARMCC::VC; 1273 case CmpInst::FCMP_UNO: 1274 return ARMCC::VS; 1275 case CmpInst::FCMP_UGE: 1276 return ARMCC::PL; 1277 case CmpInst::ICMP_SLT: 1278 case CmpInst::FCMP_ULT: 1279 return ARMCC::LT; 1280 case CmpInst::ICMP_SLE: 1281 case CmpInst::FCMP_ULE: 1282 return ARMCC::LE; 1283 case CmpInst::FCMP_UNE: 1284 case CmpInst::ICMP_NE: 1285 return ARMCC::NE; 1286 case CmpInst::ICMP_UGE: 1287 return ARMCC::HS; 1288 case CmpInst::ICMP_ULT: 1289 return ARMCC::LO; 1290 } 1291 } 1292 1293 bool ARMFastISel::SelectBranch(const Instruction *I) { 1294 const BranchInst *BI = cast<BranchInst>(I); 1295 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1296 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1297 1298 // Simple branch support. 1299 1300 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1301 // behavior. 1302 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1303 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1304 1305 // Get the compare predicate. 1306 // Try to take advantage of fallthrough opportunities. 1307 CmpInst::Predicate Predicate = CI->getPredicate(); 1308 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1309 std::swap(TBB, FBB); 1310 Predicate = CmpInst::getInversePredicate(Predicate); 1311 } 1312 1313 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1314 1315 // We may not handle every CC for now. 1316 if (ARMPred == ARMCC::AL) return false; 1317 1318 // Emit the compare. 1319 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1320 return false; 1321 1322 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1323 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1324 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1325 FastEmitBranch(FBB, DL); 1326 FuncInfo.MBB->addSuccessor(TBB); 1327 return true; 1328 } 1329 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1330 MVT SourceVT; 1331 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1332 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1333 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1334 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1336 TII.get(TstOpc)) 1337 .addReg(OpReg).addImm(1)); 1338 1339 unsigned CCMode = ARMCC::NE; 1340 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1341 std::swap(TBB, FBB); 1342 CCMode = ARMCC::EQ; 1343 } 1344 1345 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1346 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1347 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1348 1349 FastEmitBranch(FBB, DL); 1350 FuncInfo.MBB->addSuccessor(TBB); 1351 return true; 1352 } 1353 } else if (const ConstantInt *CI = 1354 dyn_cast<ConstantInt>(BI->getCondition())) { 1355 uint64_t Imm = CI->getZExtValue(); 1356 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1357 FastEmitBranch(Target, DL); 1358 return true; 1359 } 1360 1361 unsigned CmpReg = getRegForValue(BI->getCondition()); 1362 if (CmpReg == 0) return false; 1363 1364 // We've been divorced from our compare! Our block was split, and 1365 // now our compare lives in a predecessor block. We musn't 1366 // re-compare here, as the children of the compare aren't guaranteed 1367 // live across the block boundary (we *could* check for this). 1368 // Regardless, the compare has been done in the predecessor block, 1369 // and it left a value for us in a virtual register. Ergo, we test 1370 // the one-bit value left in the virtual register. 1371 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1372 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1373 .addReg(CmpReg).addImm(1)); 1374 1375 unsigned CCMode = ARMCC::NE; 1376 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1377 std::swap(TBB, FBB); 1378 CCMode = ARMCC::EQ; 1379 } 1380 1381 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1382 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1383 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1384 FastEmitBranch(FBB, DL); 1385 FuncInfo.MBB->addSuccessor(TBB); 1386 return true; 1387 } 1388 1389 bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1390 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1391 if (AddrReg == 0) return false; 1392 1393 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1394 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1395 .addReg(AddrReg)); 1396 1397 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1398 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) 1399 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); 1400 1401 return true; 1402 } 1403 1404 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1405 bool isZExt) { 1406 Type *Ty = Src1Value->getType(); 1407 EVT SrcVT = TLI.getValueType(Ty, true); 1408 if (!SrcVT.isSimple()) return false; 1409 1410 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1411 if (isFloat && !Subtarget->hasVFP2()) 1412 return false; 1413 1414 // Check to see if the 2nd operand is a constant that we can encode directly 1415 // in the compare. 1416 int Imm = 0; 1417 bool UseImm = false; 1418 bool isNegativeImm = false; 1419 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1420 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1421 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1422 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1423 SrcVT == MVT::i1) { 1424 const APInt &CIVal = ConstInt->getValue(); 1425 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1426 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1427 // then a cmn, because there is no way to represent 2147483648 as a 1428 // signed 32-bit int. 1429 if (Imm < 0 && Imm != (int)0x80000000) { 1430 isNegativeImm = true; 1431 Imm = -Imm; 1432 } 1433 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1434 (ARM_AM::getSOImmVal(Imm) != -1); 1435 } 1436 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1437 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1438 if (ConstFP->isZero() && !ConstFP->isNegative()) 1439 UseImm = true; 1440 } 1441 1442 unsigned CmpOpc; 1443 bool isICmp = true; 1444 bool needsExt = false; 1445 switch (SrcVT.getSimpleVT().SimpleTy) { 1446 default: return false; 1447 // TODO: Verify compares. 1448 case MVT::f32: 1449 isICmp = false; 1450 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1451 break; 1452 case MVT::f64: 1453 isICmp = false; 1454 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1455 break; 1456 case MVT::i1: 1457 case MVT::i8: 1458 case MVT::i16: 1459 needsExt = true; 1460 // Intentional fall-through. 1461 case MVT::i32: 1462 if (isThumb2) { 1463 if (!UseImm) 1464 CmpOpc = ARM::t2CMPrr; 1465 else 1466 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1467 } else { 1468 if (!UseImm) 1469 CmpOpc = ARM::CMPrr; 1470 else 1471 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1472 } 1473 break; 1474 } 1475 1476 unsigned SrcReg1 = getRegForValue(Src1Value); 1477 if (SrcReg1 == 0) return false; 1478 1479 unsigned SrcReg2 = 0; 1480 if (!UseImm) { 1481 SrcReg2 = getRegForValue(Src2Value); 1482 if (SrcReg2 == 0) return false; 1483 } 1484 1485 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1486 if (needsExt) { 1487 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1488 if (SrcReg1 == 0) return false; 1489 if (!UseImm) { 1490 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1491 if (SrcReg2 == 0) return false; 1492 } 1493 } 1494 1495 if (!UseImm) { 1496 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1497 TII.get(CmpOpc)) 1498 .addReg(SrcReg1).addReg(SrcReg2)); 1499 } else { 1500 MachineInstrBuilder MIB; 1501 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1502 .addReg(SrcReg1); 1503 1504 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1505 if (isICmp) 1506 MIB.addImm(Imm); 1507 AddOptionalDefs(MIB); 1508 } 1509 1510 // For floating point we need to move the result to a comparison register 1511 // that we can then use for branches. 1512 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1513 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1514 TII.get(ARM::FMSTAT))); 1515 return true; 1516 } 1517 1518 bool ARMFastISel::SelectCmp(const Instruction *I) { 1519 const CmpInst *CI = cast<CmpInst>(I); 1520 1521 // Get the compare predicate. 1522 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1523 1524 // We may not handle every CC for now. 1525 if (ARMPred == ARMCC::AL) return false; 1526 1527 // Emit the compare. 1528 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1529 return false; 1530 1531 // Now set a register based on the comparison. Explicitly set the predicates 1532 // here. 1533 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1534 const TargetRegisterClass *RC = isThumb2 ? 1535 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1536 (const TargetRegisterClass*)&ARM::GPRRegClass; 1537 unsigned DestReg = createResultReg(RC); 1538 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1539 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1540 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1541 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1542 .addReg(ZeroReg).addImm(1) 1543 .addImm(ARMPred).addReg(ARM::CPSR); 1544 1545 UpdateValueMap(I, DestReg); 1546 return true; 1547 } 1548 1549 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1550 // Make sure we have VFP and that we're extending float to double. 1551 if (!Subtarget->hasVFP2()) return false; 1552 1553 Value *V = I->getOperand(0); 1554 if (!I->getType()->isDoubleTy() || 1555 !V->getType()->isFloatTy()) return false; 1556 1557 unsigned Op = getRegForValue(V); 1558 if (Op == 0) return false; 1559 1560 unsigned Result = createResultReg(&ARM::DPRRegClass); 1561 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1562 TII.get(ARM::VCVTDS), Result) 1563 .addReg(Op)); 1564 UpdateValueMap(I, Result); 1565 return true; 1566 } 1567 1568 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1569 // Make sure we have VFP and that we're truncating double to float. 1570 if (!Subtarget->hasVFP2()) return false; 1571 1572 Value *V = I->getOperand(0); 1573 if (!(I->getType()->isFloatTy() && 1574 V->getType()->isDoubleTy())) return false; 1575 1576 unsigned Op = getRegForValue(V); 1577 if (Op == 0) return false; 1578 1579 unsigned Result = createResultReg(&ARM::SPRRegClass); 1580 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1581 TII.get(ARM::VCVTSD), Result) 1582 .addReg(Op)); 1583 UpdateValueMap(I, Result); 1584 return true; 1585 } 1586 1587 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1588 // Make sure we have VFP. 1589 if (!Subtarget->hasVFP2()) return false; 1590 1591 MVT DstVT; 1592 Type *Ty = I->getType(); 1593 if (!isTypeLegal(Ty, DstVT)) 1594 return false; 1595 1596 Value *Src = I->getOperand(0); 1597 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1598 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1599 return false; 1600 1601 unsigned SrcReg = getRegForValue(Src); 1602 if (SrcReg == 0) return false; 1603 1604 // Handle sign-extension. 1605 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1606 EVT DestVT = MVT::i32; 1607 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1608 /*isZExt*/!isSigned); 1609 if (SrcReg == 0) return false; 1610 } 1611 1612 // The conversion routine works on fp-reg to fp-reg and the operand above 1613 // was an integer, move it to the fp registers if possible. 1614 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1615 if (FP == 0) return false; 1616 1617 unsigned Opc; 1618 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1619 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1620 else return false; 1621 1622 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1623 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1624 ResultReg) 1625 .addReg(FP)); 1626 UpdateValueMap(I, ResultReg); 1627 return true; 1628 } 1629 1630 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1631 // Make sure we have VFP. 1632 if (!Subtarget->hasVFP2()) return false; 1633 1634 MVT DstVT; 1635 Type *RetTy = I->getType(); 1636 if (!isTypeLegal(RetTy, DstVT)) 1637 return false; 1638 1639 unsigned Op = getRegForValue(I->getOperand(0)); 1640 if (Op == 0) return false; 1641 1642 unsigned Opc; 1643 Type *OpTy = I->getOperand(0)->getType(); 1644 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1645 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1646 else return false; 1647 1648 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1649 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1650 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1651 ResultReg) 1652 .addReg(Op)); 1653 1654 // This result needs to be in an integer register, but the conversion only 1655 // takes place in fp-regs. 1656 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1657 if (IntReg == 0) return false; 1658 1659 UpdateValueMap(I, IntReg); 1660 return true; 1661 } 1662 1663 bool ARMFastISel::SelectSelect(const Instruction *I) { 1664 MVT VT; 1665 if (!isTypeLegal(I->getType(), VT)) 1666 return false; 1667 1668 // Things need to be register sized for register moves. 1669 if (VT != MVT::i32) return false; 1670 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1671 1672 unsigned CondReg = getRegForValue(I->getOperand(0)); 1673 if (CondReg == 0) return false; 1674 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1675 if (Op1Reg == 0) return false; 1676 1677 // Check to see if we can use an immediate in the conditional move. 1678 int Imm = 0; 1679 bool UseImm = false; 1680 bool isNegativeImm = false; 1681 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1682 assert (VT == MVT::i32 && "Expecting an i32."); 1683 Imm = (int)ConstInt->getValue().getZExtValue(); 1684 if (Imm < 0) { 1685 isNegativeImm = true; 1686 Imm = ~Imm; 1687 } 1688 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1689 (ARM_AM::getSOImmVal(Imm) != -1); 1690 } 1691 1692 unsigned Op2Reg = 0; 1693 if (!UseImm) { 1694 Op2Reg = getRegForValue(I->getOperand(2)); 1695 if (Op2Reg == 0) return false; 1696 } 1697 1698 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1699 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1700 .addReg(CondReg).addImm(0)); 1701 1702 unsigned MovCCOpc; 1703 if (!UseImm) { 1704 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1705 } else { 1706 if (!isNegativeImm) { 1707 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1708 } else { 1709 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1710 } 1711 } 1712 unsigned ResultReg = createResultReg(RC); 1713 if (!UseImm) 1714 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1715 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1716 else 1717 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1718 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1719 UpdateValueMap(I, ResultReg); 1720 return true; 1721 } 1722 1723 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1724 MVT VT; 1725 Type *Ty = I->getType(); 1726 if (!isTypeLegal(Ty, VT)) 1727 return false; 1728 1729 // If we have integer div support we should have selected this automagically. 1730 // In case we have a real miss go ahead and return false and we'll pick 1731 // it up later. 1732 if (Subtarget->hasDivide()) return false; 1733 1734 // Otherwise emit a libcall. 1735 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1736 if (VT == MVT::i8) 1737 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1738 else if (VT == MVT::i16) 1739 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1740 else if (VT == MVT::i32) 1741 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1742 else if (VT == MVT::i64) 1743 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1744 else if (VT == MVT::i128) 1745 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1746 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1747 1748 return ARMEmitLibcall(I, LC); 1749 } 1750 1751 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1752 MVT VT; 1753 Type *Ty = I->getType(); 1754 if (!isTypeLegal(Ty, VT)) 1755 return false; 1756 1757 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1758 if (VT == MVT::i8) 1759 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1760 else if (VT == MVT::i16) 1761 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1762 else if (VT == MVT::i32) 1763 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1764 else if (VT == MVT::i64) 1765 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1766 else if (VT == MVT::i128) 1767 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1768 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1769 1770 return ARMEmitLibcall(I, LC); 1771 } 1772 1773 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1774 EVT DestVT = TLI.getValueType(I->getType(), true); 1775 1776 // We can get here in the case when we have a binary operation on a non-legal 1777 // type and the target independent selector doesn't know how to handle it. 1778 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1779 return false; 1780 1781 unsigned Opc; 1782 switch (ISDOpcode) { 1783 default: return false; 1784 case ISD::ADD: 1785 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1786 break; 1787 case ISD::OR: 1788 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1789 break; 1790 case ISD::SUB: 1791 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1792 break; 1793 } 1794 1795 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1796 if (SrcReg1 == 0) return false; 1797 1798 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1799 // in the instruction, rather then materializing the value in a register. 1800 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1801 if (SrcReg2 == 0) return false; 1802 1803 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1804 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1805 TII.get(Opc), ResultReg) 1806 .addReg(SrcReg1).addReg(SrcReg2)); 1807 UpdateValueMap(I, ResultReg); 1808 return true; 1809 } 1810 1811 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1812 EVT VT = TLI.getValueType(I->getType(), true); 1813 1814 // We can get here in the case when we want to use NEON for our fp 1815 // operations, but can't figure out how to. Just use the vfp instructions 1816 // if we have them. 1817 // FIXME: It'd be nice to use NEON instructions. 1818 Type *Ty = I->getType(); 1819 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1820 if (isFloat && !Subtarget->hasVFP2()) 1821 return false; 1822 1823 unsigned Opc; 1824 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1825 switch (ISDOpcode) { 1826 default: return false; 1827 case ISD::FADD: 1828 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1829 break; 1830 case ISD::FSUB: 1831 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1832 break; 1833 case ISD::FMUL: 1834 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1835 break; 1836 } 1837 unsigned Op1 = getRegForValue(I->getOperand(0)); 1838 if (Op1 == 0) return false; 1839 1840 unsigned Op2 = getRegForValue(I->getOperand(1)); 1841 if (Op2 == 0) return false; 1842 1843 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1844 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1845 TII.get(Opc), ResultReg) 1846 .addReg(Op1).addReg(Op2)); 1847 UpdateValueMap(I, ResultReg); 1848 return true; 1849 } 1850 1851 // Call Handling Code 1852 1853 // This is largely taken directly from CCAssignFnForNode 1854 // TODO: We may not support all of this. 1855 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1856 bool Return, 1857 bool isVarArg) { 1858 switch (CC) { 1859 default: 1860 llvm_unreachable("Unsupported calling convention"); 1861 case CallingConv::Fast: 1862 if (Subtarget->hasVFP2() && !isVarArg) { 1863 if (!Subtarget->isAAPCS_ABI()) 1864 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1865 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1866 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1867 } 1868 // Fallthrough 1869 case CallingConv::C: 1870 // Use target triple & subtarget features to do actual dispatch. 1871 if (Subtarget->isAAPCS_ABI()) { 1872 if (Subtarget->hasVFP2() && 1873 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1874 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1875 else 1876 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1877 } else 1878 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1879 case CallingConv::ARM_AAPCS_VFP: 1880 if (!isVarArg) 1881 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1882 // Fall through to soft float variant, variadic functions don't 1883 // use hard floating point ABI. 1884 case CallingConv::ARM_AAPCS: 1885 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1886 case CallingConv::ARM_APCS: 1887 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1888 case CallingConv::GHC: 1889 if (Return) 1890 llvm_unreachable("Can't return in GHC call convention"); 1891 else 1892 return CC_ARM_APCS_GHC; 1893 } 1894 } 1895 1896 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1897 SmallVectorImpl<unsigned> &ArgRegs, 1898 SmallVectorImpl<MVT> &ArgVTs, 1899 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1900 SmallVectorImpl<unsigned> &RegArgs, 1901 CallingConv::ID CC, 1902 unsigned &NumBytes, 1903 bool isVarArg) { 1904 SmallVector<CCValAssign, 16> ArgLocs; 1905 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1906 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1907 CCAssignFnForCall(CC, false, isVarArg)); 1908 1909 // Check that we can handle all of the arguments. If we can't, then bail out 1910 // now before we add code to the MBB. 1911 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1912 CCValAssign &VA = ArgLocs[i]; 1913 MVT ArgVT = ArgVTs[VA.getValNo()]; 1914 1915 // We don't handle NEON/vector parameters yet. 1916 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1917 return false; 1918 1919 // Now copy/store arg to correct locations. 1920 if (VA.isRegLoc() && !VA.needsCustom()) { 1921 continue; 1922 } else if (VA.needsCustom()) { 1923 // TODO: We need custom lowering for vector (v2f64) args. 1924 if (VA.getLocVT() != MVT::f64 || 1925 // TODO: Only handle register args for now. 1926 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1927 return false; 1928 } else { 1929 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1930 default: 1931 return false; 1932 case MVT::i1: 1933 case MVT::i8: 1934 case MVT::i16: 1935 case MVT::i32: 1936 break; 1937 case MVT::f32: 1938 if (!Subtarget->hasVFP2()) 1939 return false; 1940 break; 1941 case MVT::f64: 1942 if (!Subtarget->hasVFP2()) 1943 return false; 1944 break; 1945 } 1946 } 1947 } 1948 1949 // At the point, we are able to handle the call's arguments in fast isel. 1950 1951 // Get a count of how many bytes are to be pushed on the stack. 1952 NumBytes = CCInfo.getNextStackOffset(); 1953 1954 // Issue CALLSEQ_START 1955 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1956 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1957 TII.get(AdjStackDown)) 1958 .addImm(NumBytes)); 1959 1960 // Process the args. 1961 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1962 CCValAssign &VA = ArgLocs[i]; 1963 unsigned Arg = ArgRegs[VA.getValNo()]; 1964 MVT ArgVT = ArgVTs[VA.getValNo()]; 1965 1966 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1967 "We don't handle NEON/vector parameters yet."); 1968 1969 // Handle arg promotion, etc. 1970 switch (VA.getLocInfo()) { 1971 case CCValAssign::Full: break; 1972 case CCValAssign::SExt: { 1973 MVT DestVT = VA.getLocVT(); 1974 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1975 assert (Arg != 0 && "Failed to emit a sext"); 1976 ArgVT = DestVT; 1977 break; 1978 } 1979 case CCValAssign::AExt: 1980 // Intentional fall-through. Handle AExt and ZExt. 1981 case CCValAssign::ZExt: { 1982 MVT DestVT = VA.getLocVT(); 1983 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1984 assert (Arg != 0 && "Failed to emit a sext"); 1985 ArgVT = DestVT; 1986 break; 1987 } 1988 case CCValAssign::BCvt: { 1989 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1990 /*TODO: Kill=*/false); 1991 assert(BC != 0 && "Failed to emit a bitcast!"); 1992 Arg = BC; 1993 ArgVT = VA.getLocVT(); 1994 break; 1995 } 1996 default: llvm_unreachable("Unknown arg promotion!"); 1997 } 1998 1999 // Now copy/store arg to correct locations. 2000 if (VA.isRegLoc() && !VA.needsCustom()) { 2001 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2002 VA.getLocReg()) 2003 .addReg(Arg); 2004 RegArgs.push_back(VA.getLocReg()); 2005 } else if (VA.needsCustom()) { 2006 // TODO: We need custom lowering for vector (v2f64) args. 2007 assert(VA.getLocVT() == MVT::f64 && 2008 "Custom lowering for v2f64 args not available"); 2009 2010 CCValAssign &NextVA = ArgLocs[++i]; 2011 2012 assert(VA.isRegLoc() && NextVA.isRegLoc() && 2013 "We only handle register args!"); 2014 2015 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2016 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2017 .addReg(NextVA.getLocReg(), RegState::Define) 2018 .addReg(Arg)); 2019 RegArgs.push_back(VA.getLocReg()); 2020 RegArgs.push_back(NextVA.getLocReg()); 2021 } else { 2022 assert(VA.isMemLoc()); 2023 // Need to store on the stack. 2024 Address Addr; 2025 Addr.BaseType = Address::RegBase; 2026 Addr.Base.Reg = ARM::SP; 2027 Addr.Offset = VA.getLocMemOffset(); 2028 2029 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2030 assert(EmitRet && "Could not emit a store for argument!"); 2031 } 2032 } 2033 2034 return true; 2035 } 2036 2037 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2038 const Instruction *I, CallingConv::ID CC, 2039 unsigned &NumBytes, bool isVarArg) { 2040 // Issue CALLSEQ_END 2041 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2042 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2043 TII.get(AdjStackUp)) 2044 .addImm(NumBytes).addImm(0)); 2045 2046 // Now the return value. 2047 if (RetVT != MVT::isVoid) { 2048 SmallVector<CCValAssign, 16> RVLocs; 2049 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2050 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2051 2052 // Copy all of the result registers out of their specified physreg. 2053 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2054 // For this move we copy into two registers and then move into the 2055 // double fp reg we want. 2056 EVT DestVT = RVLocs[0].getValVT(); 2057 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2058 unsigned ResultReg = createResultReg(DstRC); 2059 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2060 TII.get(ARM::VMOVDRR), ResultReg) 2061 .addReg(RVLocs[0].getLocReg()) 2062 .addReg(RVLocs[1].getLocReg())); 2063 2064 UsedRegs.push_back(RVLocs[0].getLocReg()); 2065 UsedRegs.push_back(RVLocs[1].getLocReg()); 2066 2067 // Finally update the result. 2068 UpdateValueMap(I, ResultReg); 2069 } else { 2070 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2071 EVT CopyVT = RVLocs[0].getValVT(); 2072 2073 // Special handling for extended integers. 2074 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2075 CopyVT = MVT::i32; 2076 2077 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2078 2079 unsigned ResultReg = createResultReg(DstRC); 2080 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2081 ResultReg).addReg(RVLocs[0].getLocReg()); 2082 UsedRegs.push_back(RVLocs[0].getLocReg()); 2083 2084 // Finally update the result. 2085 UpdateValueMap(I, ResultReg); 2086 } 2087 } 2088 2089 return true; 2090 } 2091 2092 bool ARMFastISel::SelectRet(const Instruction *I) { 2093 const ReturnInst *Ret = cast<ReturnInst>(I); 2094 const Function &F = *I->getParent()->getParent(); 2095 2096 if (!FuncInfo.CanLowerReturn) 2097 return false; 2098 2099 CallingConv::ID CC = F.getCallingConv(); 2100 if (Ret->getNumOperands() > 0) { 2101 SmallVector<ISD::OutputArg, 4> Outs; 2102 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2103 Outs, TLI); 2104 2105 // Analyze operands of the call, assigning locations to each operand. 2106 SmallVector<CCValAssign, 16> ValLocs; 2107 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2108 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2109 F.isVarArg())); 2110 2111 const Value *RV = Ret->getOperand(0); 2112 unsigned Reg = getRegForValue(RV); 2113 if (Reg == 0) 2114 return false; 2115 2116 // Only handle a single return value for now. 2117 if (ValLocs.size() != 1) 2118 return false; 2119 2120 CCValAssign &VA = ValLocs[0]; 2121 2122 // Don't bother handling odd stuff for now. 2123 if (VA.getLocInfo() != CCValAssign::Full) 2124 return false; 2125 // Only handle register returns for now. 2126 if (!VA.isRegLoc()) 2127 return false; 2128 2129 unsigned SrcReg = Reg + VA.getValNo(); 2130 EVT RVVT = TLI.getValueType(RV->getType()); 2131 EVT DestVT = VA.getValVT(); 2132 // Special handling for extended integers. 2133 if (RVVT != DestVT) { 2134 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2135 return false; 2136 2137 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2138 2139 // Perform extension if flagged as either zext or sext. Otherwise, do 2140 // nothing. 2141 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2142 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2143 if (SrcReg == 0) return false; 2144 } 2145 } 2146 2147 // Make the copy. 2148 unsigned DstReg = VA.getLocReg(); 2149 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2150 // Avoid a cross-class copy. This is very unlikely. 2151 if (!SrcRC->contains(DstReg)) 2152 return false; 2153 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2154 DstReg).addReg(SrcReg); 2155 2156 // Mark the register as live out of the function. 2157 MRI.addLiveOut(VA.getLocReg()); 2158 } 2159 2160 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2161 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2162 TII.get(RetOpc))); 2163 return true; 2164 } 2165 2166 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2167 if (UseReg) 2168 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2169 else 2170 return isThumb2 ? ARM::tBL : ARM::BL; 2171 } 2172 2173 unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2174 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2175 GlobalValue::ExternalLinkage, 0, Name); 2176 return ARMMaterializeGV(GV, TLI.getValueType(GV->getType())); 2177 } 2178 2179 // A quick function that will emit a call for a named libcall in F with the 2180 // vector of passed arguments for the Instruction in I. We can assume that we 2181 // can emit a call for any libcall we can produce. This is an abridged version 2182 // of the full call infrastructure since we won't need to worry about things 2183 // like computed function pointers or strange arguments at call sites. 2184 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2185 // with X86. 2186 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2187 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2188 2189 // Handle *simple* calls for now. 2190 Type *RetTy = I->getType(); 2191 MVT RetVT; 2192 if (RetTy->isVoidTy()) 2193 RetVT = MVT::isVoid; 2194 else if (!isTypeLegal(RetTy, RetVT)) 2195 return false; 2196 2197 // Can't handle non-double multi-reg retvals. 2198 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2199 SmallVector<CCValAssign, 16> RVLocs; 2200 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2201 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2202 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2203 return false; 2204 } 2205 2206 // Set up the argument vectors. 2207 SmallVector<Value*, 8> Args; 2208 SmallVector<unsigned, 8> ArgRegs; 2209 SmallVector<MVT, 8> ArgVTs; 2210 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2211 Args.reserve(I->getNumOperands()); 2212 ArgRegs.reserve(I->getNumOperands()); 2213 ArgVTs.reserve(I->getNumOperands()); 2214 ArgFlags.reserve(I->getNumOperands()); 2215 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2216 Value *Op = I->getOperand(i); 2217 unsigned Arg = getRegForValue(Op); 2218 if (Arg == 0) return false; 2219 2220 Type *ArgTy = Op->getType(); 2221 MVT ArgVT; 2222 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2223 2224 ISD::ArgFlagsTy Flags; 2225 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2226 Flags.setOrigAlign(OriginalAlignment); 2227 2228 Args.push_back(Op); 2229 ArgRegs.push_back(Arg); 2230 ArgVTs.push_back(ArgVT); 2231 ArgFlags.push_back(Flags); 2232 } 2233 2234 // Handle the arguments now that we've gotten them. 2235 SmallVector<unsigned, 4> RegArgs; 2236 unsigned NumBytes; 2237 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2238 RegArgs, CC, NumBytes, false)) 2239 return false; 2240 2241 unsigned CalleeReg = 0; 2242 if (EnableARMLongCalls) { 2243 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2244 if (CalleeReg == 0) return false; 2245 } 2246 2247 // Issue the call. 2248 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2249 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2250 DL, TII.get(CallOpc)); 2251 // BL / BLX don't take a predicate, but tBL / tBLX do. 2252 if (isThumb2) 2253 AddDefaultPred(MIB); 2254 if (EnableARMLongCalls) 2255 MIB.addReg(CalleeReg); 2256 else 2257 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2258 2259 // Add implicit physical register uses to the call. 2260 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2261 MIB.addReg(RegArgs[i], RegState::Implicit); 2262 2263 // Add a register mask with the call-preserved registers. 2264 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2265 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2266 2267 // Finish off the call including any return values. 2268 SmallVector<unsigned, 4> UsedRegs; 2269 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2270 2271 // Set all unused physreg defs as dead. 2272 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2273 2274 return true; 2275 } 2276 2277 bool ARMFastISel::SelectCall(const Instruction *I, 2278 const char *IntrMemName = 0) { 2279 const CallInst *CI = cast<CallInst>(I); 2280 const Value *Callee = CI->getCalledValue(); 2281 2282 // Can't handle inline asm. 2283 if (isa<InlineAsm>(Callee)) return false; 2284 2285 // Check the calling convention. 2286 ImmutableCallSite CS(CI); 2287 CallingConv::ID CC = CS.getCallingConv(); 2288 2289 // TODO: Avoid some calling conventions? 2290 2291 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2292 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2293 bool isVarArg = FTy->isVarArg(); 2294 2295 // Handle *simple* calls for now. 2296 Type *RetTy = I->getType(); 2297 MVT RetVT; 2298 if (RetTy->isVoidTy()) 2299 RetVT = MVT::isVoid; 2300 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2301 RetVT != MVT::i8 && RetVT != MVT::i1) 2302 return false; 2303 2304 // Can't handle non-double multi-reg retvals. 2305 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2306 RetVT != MVT::i16 && RetVT != MVT::i32) { 2307 SmallVector<CCValAssign, 16> RVLocs; 2308 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2309 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2310 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2311 return false; 2312 } 2313 2314 // Set up the argument vectors. 2315 SmallVector<Value*, 8> Args; 2316 SmallVector<unsigned, 8> ArgRegs; 2317 SmallVector<MVT, 8> ArgVTs; 2318 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2319 unsigned arg_size = CS.arg_size(); 2320 Args.reserve(arg_size); 2321 ArgRegs.reserve(arg_size); 2322 ArgVTs.reserve(arg_size); 2323 ArgFlags.reserve(arg_size); 2324 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2325 i != e; ++i) { 2326 // If we're lowering a memory intrinsic instead of a regular call, skip the 2327 // last two arguments, which shouldn't be passed to the underlying function. 2328 if (IntrMemName && e-i <= 2) 2329 break; 2330 2331 ISD::ArgFlagsTy Flags; 2332 unsigned AttrInd = i - CS.arg_begin() + 1; 2333 if (CS.paramHasAttr(AttrInd, Attributes::SExt)) 2334 Flags.setSExt(); 2335 if (CS.paramHasAttr(AttrInd, Attributes::ZExt)) 2336 Flags.setZExt(); 2337 2338 // FIXME: Only handle *easy* calls for now. 2339 if (CS.paramHasAttr(AttrInd, Attributes::InReg) || 2340 CS.paramHasAttr(AttrInd, Attributes::StructRet) || 2341 CS.paramHasAttr(AttrInd, Attributes::Nest) || 2342 CS.paramHasAttr(AttrInd, Attributes::ByVal)) 2343 return false; 2344 2345 Type *ArgTy = (*i)->getType(); 2346 MVT ArgVT; 2347 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2348 ArgVT != MVT::i1) 2349 return false; 2350 2351 unsigned Arg = getRegForValue(*i); 2352 if (Arg == 0) 2353 return false; 2354 2355 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2356 Flags.setOrigAlign(OriginalAlignment); 2357 2358 Args.push_back(*i); 2359 ArgRegs.push_back(Arg); 2360 ArgVTs.push_back(ArgVT); 2361 ArgFlags.push_back(Flags); 2362 } 2363 2364 // Handle the arguments now that we've gotten them. 2365 SmallVector<unsigned, 4> RegArgs; 2366 unsigned NumBytes; 2367 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2368 RegArgs, CC, NumBytes, isVarArg)) 2369 return false; 2370 2371 bool UseReg = false; 2372 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2373 if (!GV || EnableARMLongCalls) UseReg = true; 2374 2375 unsigned CalleeReg = 0; 2376 if (UseReg) { 2377 if (IntrMemName) 2378 CalleeReg = getLibcallReg(IntrMemName); 2379 else 2380 CalleeReg = getRegForValue(Callee); 2381 2382 if (CalleeReg == 0) return false; 2383 } 2384 2385 // Issue the call. 2386 unsigned CallOpc = ARMSelectCallOp(UseReg); 2387 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2388 DL, TII.get(CallOpc)); 2389 2390 // ARM calls don't take a predicate, but tBL / tBLX do. 2391 if(isThumb2) 2392 AddDefaultPred(MIB); 2393 if (UseReg) 2394 MIB.addReg(CalleeReg); 2395 else if (!IntrMemName) 2396 MIB.addGlobalAddress(GV, 0, 0); 2397 else 2398 MIB.addExternalSymbol(IntrMemName, 0); 2399 2400 // Add implicit physical register uses to the call. 2401 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2402 MIB.addReg(RegArgs[i], RegState::Implicit); 2403 2404 // Add a register mask with the call-preserved registers. 2405 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2406 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2407 2408 // Finish off the call including any return values. 2409 SmallVector<unsigned, 4> UsedRegs; 2410 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2411 return false; 2412 2413 // Set all unused physreg defs as dead. 2414 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2415 2416 return true; 2417 } 2418 2419 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2420 return Len <= 16; 2421 } 2422 2423 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2424 uint64_t Len) { 2425 // Make sure we don't bloat code by inlining very large memcpy's. 2426 if (!ARMIsMemCpySmall(Len)) 2427 return false; 2428 2429 // We don't care about alignment here since we just emit integer accesses. 2430 while (Len) { 2431 MVT VT; 2432 if (Len >= 4) 2433 VT = MVT::i32; 2434 else if (Len >= 2) 2435 VT = MVT::i16; 2436 else { 2437 assert(Len == 1); 2438 VT = MVT::i8; 2439 } 2440 2441 bool RV; 2442 unsigned ResultReg; 2443 RV = ARMEmitLoad(VT, ResultReg, Src); 2444 assert (RV == true && "Should be able to handle this load."); 2445 RV = ARMEmitStore(VT, ResultReg, Dest); 2446 assert (RV == true && "Should be able to handle this store."); 2447 (void)RV; 2448 2449 unsigned Size = VT.getSizeInBits()/8; 2450 Len -= Size; 2451 Dest.Offset += Size; 2452 Src.Offset += Size; 2453 } 2454 2455 return true; 2456 } 2457 2458 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2459 // FIXME: Handle more intrinsics. 2460 switch (I.getIntrinsicID()) { 2461 default: return false; 2462 case Intrinsic::frameaddress: { 2463 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2464 MFI->setFrameAddressIsTaken(true); 2465 2466 unsigned LdrOpc; 2467 const TargetRegisterClass *RC; 2468 if (isThumb2) { 2469 LdrOpc = ARM::t2LDRi12; 2470 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2471 } else { 2472 LdrOpc = ARM::LDRi12; 2473 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2474 } 2475 2476 const ARMBaseRegisterInfo *RegInfo = 2477 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2478 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2479 unsigned SrcReg = FramePtr; 2480 2481 // Recursively load frame address 2482 // ldr r0 [fp] 2483 // ldr r0 [r0] 2484 // ldr r0 [r0] 2485 // ... 2486 unsigned DestReg; 2487 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2488 while (Depth--) { 2489 DestReg = createResultReg(RC); 2490 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2491 TII.get(LdrOpc), DestReg) 2492 .addReg(SrcReg).addImm(0)); 2493 SrcReg = DestReg; 2494 } 2495 UpdateValueMap(&I, SrcReg); 2496 return true; 2497 } 2498 case Intrinsic::memcpy: 2499 case Intrinsic::memmove: { 2500 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2501 // Don't handle volatile. 2502 if (MTI.isVolatile()) 2503 return false; 2504 2505 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2506 // we would emit dead code because we don't currently handle memmoves. 2507 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2508 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2509 // Small memcpy's are common enough that we want to do them without a call 2510 // if possible. 2511 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2512 if (ARMIsMemCpySmall(Len)) { 2513 Address Dest, Src; 2514 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2515 !ARMComputeAddress(MTI.getRawSource(), Src)) 2516 return false; 2517 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2518 return true; 2519 } 2520 } 2521 2522 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2523 return false; 2524 2525 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2526 return false; 2527 2528 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2529 return SelectCall(&I, IntrMemName); 2530 } 2531 case Intrinsic::memset: { 2532 const MemSetInst &MSI = cast<MemSetInst>(I); 2533 // Don't handle volatile. 2534 if (MSI.isVolatile()) 2535 return false; 2536 2537 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2538 return false; 2539 2540 if (MSI.getDestAddressSpace() > 255) 2541 return false; 2542 2543 return SelectCall(&I, "memset"); 2544 } 2545 case Intrinsic::trap: { 2546 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP)); 2547 return true; 2548 } 2549 } 2550 } 2551 2552 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2553 // The high bits for a type smaller than the register size are assumed to be 2554 // undefined. 2555 Value *Op = I->getOperand(0); 2556 2557 EVT SrcVT, DestVT; 2558 SrcVT = TLI.getValueType(Op->getType(), true); 2559 DestVT = TLI.getValueType(I->getType(), true); 2560 2561 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2562 return false; 2563 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2564 return false; 2565 2566 unsigned SrcReg = getRegForValue(Op); 2567 if (!SrcReg) return false; 2568 2569 // Because the high bits are undefined, a truncate doesn't generate 2570 // any code. 2571 UpdateValueMap(I, SrcReg); 2572 return true; 2573 } 2574 2575 unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2576 bool isZExt) { 2577 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2578 return 0; 2579 2580 unsigned Opc; 2581 bool isBoolZext = false; 2582 const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32); 2583 if (!SrcVT.isSimple()) return 0; 2584 switch (SrcVT.getSimpleVT().SimpleTy) { 2585 default: return 0; 2586 case MVT::i16: 2587 if (!Subtarget->hasV6Ops()) return 0; 2588 if (isZExt) { 2589 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2590 } else { 2591 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2592 RC = isThumb2 ?&ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 2593 } 2594 break; 2595 case MVT::i8: 2596 if (!Subtarget->hasV6Ops()) return 0; 2597 if (isZExt) 2598 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2599 else 2600 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2601 break; 2602 case MVT::i1: 2603 if (isZExt) { 2604 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2605 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 2606 isBoolZext = true; 2607 break; 2608 } 2609 return 0; 2610 } 2611 2612 unsigned ResultReg = createResultReg(RC); 2613 MachineInstrBuilder MIB; 2614 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2615 .addReg(SrcReg); 2616 if (isBoolZext) 2617 MIB.addImm(1); 2618 else 2619 MIB.addImm(0); 2620 AddOptionalDefs(MIB); 2621 return ResultReg; 2622 } 2623 2624 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2625 // On ARM, in general, integer casts don't involve legal types; this code 2626 // handles promotable integers. 2627 Type *DestTy = I->getType(); 2628 Value *Src = I->getOperand(0); 2629 Type *SrcTy = Src->getType(); 2630 2631 EVT SrcVT, DestVT; 2632 SrcVT = TLI.getValueType(SrcTy, true); 2633 DestVT = TLI.getValueType(DestTy, true); 2634 2635 bool isZExt = isa<ZExtInst>(I); 2636 unsigned SrcReg = getRegForValue(Src); 2637 if (!SrcReg) return false; 2638 2639 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2640 if (ResultReg == 0) return false; 2641 UpdateValueMap(I, ResultReg); 2642 return true; 2643 } 2644 2645 bool ARMFastISel::SelectShift(const Instruction *I, 2646 ARM_AM::ShiftOpc ShiftTy) { 2647 // We handle thumb2 mode by target independent selector 2648 // or SelectionDAG ISel. 2649 if (isThumb2) 2650 return false; 2651 2652 // Only handle i32 now. 2653 EVT DestVT = TLI.getValueType(I->getType(), true); 2654 if (DestVT != MVT::i32) 2655 return false; 2656 2657 unsigned Opc = ARM::MOVsr; 2658 unsigned ShiftImm; 2659 Value *Src2Value = I->getOperand(1); 2660 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2661 ShiftImm = CI->getZExtValue(); 2662 2663 // Fall back to selection DAG isel if the shift amount 2664 // is zero or greater than the width of the value type. 2665 if (ShiftImm == 0 || ShiftImm >=32) 2666 return false; 2667 2668 Opc = ARM::MOVsi; 2669 } 2670 2671 Value *Src1Value = I->getOperand(0); 2672 unsigned Reg1 = getRegForValue(Src1Value); 2673 if (Reg1 == 0) return false; 2674 2675 unsigned Reg2 = 0; 2676 if (Opc == ARM::MOVsr) { 2677 Reg2 = getRegForValue(Src2Value); 2678 if (Reg2 == 0) return false; 2679 } 2680 2681 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2682 if(ResultReg == 0) return false; 2683 2684 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2685 TII.get(Opc), ResultReg) 2686 .addReg(Reg1); 2687 2688 if (Opc == ARM::MOVsi) 2689 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2690 else if (Opc == ARM::MOVsr) { 2691 MIB.addReg(Reg2); 2692 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2693 } 2694 2695 AddOptionalDefs(MIB); 2696 UpdateValueMap(I, ResultReg); 2697 return true; 2698 } 2699 2700 // TODO: SoftFP support. 2701 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2702 2703 switch (I->getOpcode()) { 2704 case Instruction::Load: 2705 return SelectLoad(I); 2706 case Instruction::Store: 2707 return SelectStore(I); 2708 case Instruction::Br: 2709 return SelectBranch(I); 2710 case Instruction::IndirectBr: 2711 return SelectIndirectBr(I); 2712 case Instruction::ICmp: 2713 case Instruction::FCmp: 2714 return SelectCmp(I); 2715 case Instruction::FPExt: 2716 return SelectFPExt(I); 2717 case Instruction::FPTrunc: 2718 return SelectFPTrunc(I); 2719 case Instruction::SIToFP: 2720 return SelectIToFP(I, /*isSigned*/ true); 2721 case Instruction::UIToFP: 2722 return SelectIToFP(I, /*isSigned*/ false); 2723 case Instruction::FPToSI: 2724 return SelectFPToI(I, /*isSigned*/ true); 2725 case Instruction::FPToUI: 2726 return SelectFPToI(I, /*isSigned*/ false); 2727 case Instruction::Add: 2728 return SelectBinaryIntOp(I, ISD::ADD); 2729 case Instruction::Or: 2730 return SelectBinaryIntOp(I, ISD::OR); 2731 case Instruction::Sub: 2732 return SelectBinaryIntOp(I, ISD::SUB); 2733 case Instruction::FAdd: 2734 return SelectBinaryFPOp(I, ISD::FADD); 2735 case Instruction::FSub: 2736 return SelectBinaryFPOp(I, ISD::FSUB); 2737 case Instruction::FMul: 2738 return SelectBinaryFPOp(I, ISD::FMUL); 2739 case Instruction::SDiv: 2740 return SelectDiv(I, /*isSigned*/ true); 2741 case Instruction::UDiv: 2742 return SelectDiv(I, /*isSigned*/ false); 2743 case Instruction::SRem: 2744 return SelectRem(I, /*isSigned*/ true); 2745 case Instruction::URem: 2746 return SelectRem(I, /*isSigned*/ false); 2747 case Instruction::Call: 2748 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2749 return SelectIntrinsicCall(*II); 2750 return SelectCall(I); 2751 case Instruction::Select: 2752 return SelectSelect(I); 2753 case Instruction::Ret: 2754 return SelectRet(I); 2755 case Instruction::Trunc: 2756 return SelectTrunc(I); 2757 case Instruction::ZExt: 2758 case Instruction::SExt: 2759 return SelectIntExt(I); 2760 case Instruction::Shl: 2761 return SelectShift(I, ARM_AM::lsl); 2762 case Instruction::LShr: 2763 return SelectShift(I, ARM_AM::lsr); 2764 case Instruction::AShr: 2765 return SelectShift(I, ARM_AM::asr); 2766 default: break; 2767 } 2768 return false; 2769 } 2770 2771 /// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2772 /// vreg is being provided by the specified load instruction. If possible, 2773 /// try to fold the load as an operand to the instruction, returning true if 2774 /// successful. 2775 bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2776 const LoadInst *LI) { 2777 // Verify we have a legal type before going any further. 2778 MVT VT; 2779 if (!isLoadTypeLegal(LI->getType(), VT)) 2780 return false; 2781 2782 // Combine load followed by zero- or sign-extend. 2783 // ldrb r1, [r0] ldrb r1, [r0] 2784 // uxtb r2, r1 => 2785 // mov r3, r2 mov r3, r1 2786 bool isZExt = true; 2787 switch(MI->getOpcode()) { 2788 default: return false; 2789 case ARM::SXTH: 2790 case ARM::t2SXTH: 2791 isZExt = false; 2792 case ARM::UXTH: 2793 case ARM::t2UXTH: 2794 if (VT != MVT::i16) 2795 return false; 2796 break; 2797 case ARM::SXTB: 2798 case ARM::t2SXTB: 2799 isZExt = false; 2800 case ARM::UXTB: 2801 case ARM::t2UXTB: 2802 if (VT != MVT::i8) 2803 return false; 2804 break; 2805 } 2806 // See if we can handle this address. 2807 Address Addr; 2808 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2809 2810 unsigned ResultReg = MI->getOperand(0).getReg(); 2811 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2812 return false; 2813 MI->eraseFromParent(); 2814 return true; 2815 } 2816 2817 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2818 unsigned Align, EVT VT) { 2819 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2820 ARMConstantPoolConstant *CPV = 2821 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2822 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 2823 2824 unsigned Opc; 2825 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); 2826 // Load value. 2827 if (isThumb2) { 2828 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2829 TII.get(ARM::t2LDRpci), DestReg1) 2830 .addConstantPoolIndex(Idx)); 2831 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; 2832 } else { 2833 // The extra immediate is for addrmode2. 2834 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2835 DL, TII.get(ARM::LDRcp), DestReg1) 2836 .addConstantPoolIndex(Idx).addImm(0)); 2837 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; 2838 } 2839 2840 unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); 2841 if (GlobalBaseReg == 0) { 2842 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); 2843 AFI->setGlobalBaseReg(GlobalBaseReg); 2844 } 2845 2846 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); 2847 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2848 DL, TII.get(Opc), DestReg2) 2849 .addReg(DestReg1) 2850 .addReg(GlobalBaseReg); 2851 if (!UseGOTOFF) 2852 MIB.addImm(0); 2853 AddOptionalDefs(MIB); 2854 2855 return DestReg2; 2856 } 2857 2858 namespace llvm { 2859 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 2860 const TargetLibraryInfo *libInfo) { 2861 // Completely untested on non-iOS. 2862 const TargetMachine &TM = funcInfo.MF->getTarget(); 2863 2864 // Darwin and thumb1 only for now. 2865 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2866 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) 2867 return new ARMFastISel(funcInfo, libInfo); 2868 return 0; 2869 } 2870 } 2871