1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseInstrInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMRegisterInfo.h" 20 #include "ARMTargetMachine.h" 21 #include "ARMSubtarget.h" 22 #include "ARMConstantPoolValue.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/CallingConv.h" 25 #include "llvm/DerivedTypes.h" 26 #include "llvm/GlobalVariable.h" 27 #include "llvm/Instructions.h" 28 #include "llvm/IntrinsicInst.h" 29 #include "llvm/Module.h" 30 #include "llvm/Operator.h" 31 #include "llvm/CodeGen/Analysis.h" 32 #include "llvm/CodeGen/FastISel.h" 33 #include "llvm/CodeGen/FunctionLoweringInfo.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineModuleInfo.h" 36 #include "llvm/CodeGen/MachineConstantPool.h" 37 #include "llvm/CodeGen/MachineFrameInfo.h" 38 #include "llvm/CodeGen/MachineMemOperand.h" 39 #include "llvm/CodeGen/MachineRegisterInfo.h" 40 #include "llvm/CodeGen/PseudoSourceValue.h" 41 #include "llvm/Support/CallSite.h" 42 #include "llvm/Support/CommandLine.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/GetElementPtrTypeIterator.h" 45 #include "llvm/Target/TargetData.h" 46 #include "llvm/Target/TargetInstrInfo.h" 47 #include "llvm/Target/TargetLowering.h" 48 #include "llvm/Target/TargetMachine.h" 49 #include "llvm/Target/TargetOptions.h" 50 using namespace llvm; 51 52 static cl::opt<bool> 53 DisableARMFastISel("disable-arm-fast-isel", 54 cl::desc("Turn off experimental ARM fast-isel support"), 55 cl::init(false), cl::Hidden); 56 57 extern cl::opt<bool> EnableARMLongCalls; 58 59 namespace { 60 61 // All possible address modes, plus some. 62 typedef struct Address { 63 enum { 64 RegBase, 65 FrameIndexBase 66 } BaseType; 67 68 union { 69 unsigned Reg; 70 int FI; 71 } Base; 72 73 int Offset; 74 75 // Innocuous defaults for our address. 76 Address() 77 : BaseType(RegBase), Offset(0) { 78 Base.Reg = 0; 79 } 80 } Address; 81 82 class ARMFastISel : public FastISel { 83 84 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 85 /// make the right decision when generating code for different targets. 86 const ARMSubtarget *Subtarget; 87 const TargetMachine &TM; 88 const TargetInstrInfo &TII; 89 const TargetLowering &TLI; 90 ARMFunctionInfo *AFI; 91 92 // Convenience variables to avoid some queries. 93 bool isThumb2; 94 LLVMContext *Context; 95 96 public: 97 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 98 : FastISel(funcInfo), 99 TM(funcInfo.MF->getTarget()), 100 TII(*TM.getInstrInfo()), 101 TLI(*TM.getTargetLowering()) { 102 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 103 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 104 isThumb2 = AFI->isThumbFunction(); 105 Context = &funcInfo.Fn->getContext(); 106 } 107 108 // Code from FastISel.cpp. 109 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC); 111 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill); 114 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 115 const TargetRegisterClass *RC, 116 unsigned Op0, bool Op0IsKill, 117 unsigned Op1, bool Op1IsKill); 118 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 unsigned Op1, bool Op1IsKill, 122 unsigned Op2, bool Op2IsKill); 123 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 124 const TargetRegisterClass *RC, 125 unsigned Op0, bool Op0IsKill, 126 uint64_t Imm); 127 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 128 const TargetRegisterClass *RC, 129 unsigned Op0, bool Op0IsKill, 130 const ConstantFP *FPImm); 131 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 unsigned Op0, bool Op0IsKill, 134 unsigned Op1, bool Op1IsKill, 135 uint64_t Imm); 136 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 137 const TargetRegisterClass *RC, 138 uint64_t Imm); 139 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 140 const TargetRegisterClass *RC, 141 uint64_t Imm1, uint64_t Imm2); 142 143 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 144 unsigned Op0, bool Op0IsKill, 145 uint32_t Idx); 146 147 // Backend specific FastISel code. 148 virtual bool TargetSelectInstruction(const Instruction *I); 149 virtual unsigned TargetMaterializeConstant(const Constant *C); 150 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 151 152 #include "ARMGenFastISel.inc" 153 154 // Instruction selection routines. 155 private: 156 bool SelectLoad(const Instruction *I); 157 bool SelectStore(const Instruction *I); 158 bool SelectBranch(const Instruction *I); 159 bool SelectCmp(const Instruction *I); 160 bool SelectFPExt(const Instruction *I); 161 bool SelectFPTrunc(const Instruction *I); 162 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 163 bool SelectSIToFP(const Instruction *I); 164 bool SelectFPToSI(const Instruction *I); 165 bool SelectSDiv(const Instruction *I); 166 bool SelectSRem(const Instruction *I); 167 bool SelectCall(const Instruction *I); 168 bool SelectSelect(const Instruction *I); 169 bool SelectRet(const Instruction *I); 170 bool SelectTrunc(const Instruction *I); 171 bool SelectIntExt(const Instruction *I); 172 173 // Utility routines. 174 private: 175 bool isTypeLegal(Type *Ty, MVT &VT); 176 bool isLoadTypeLegal(Type *Ty, MVT &VT); 177 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 178 bool isZExt); 179 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); 180 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); 181 bool ARMComputeAddress(const Value *Obj, Address &Addr); 182 void ARMSimplifyAddress(Address &Addr, EVT VT); 183 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 184 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 185 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 186 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 187 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 188 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 189 unsigned ARMSelectCallOp(const GlobalValue *GV); 190 191 // Call handling routines. 192 private: 193 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 194 unsigned &ResultReg); 195 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 196 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 197 SmallVectorImpl<unsigned> &ArgRegs, 198 SmallVectorImpl<MVT> &ArgVTs, 199 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 200 SmallVectorImpl<unsigned> &RegArgs, 201 CallingConv::ID CC, 202 unsigned &NumBytes); 203 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 204 const Instruction *I, CallingConv::ID CC, 205 unsigned &NumBytes); 206 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 207 208 // OptionalDef handling routines. 209 private: 210 bool isARMNEONPred(const MachineInstr *MI); 211 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 212 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 213 void AddLoadStoreOperands(EVT VT, Address &Addr, 214 const MachineInstrBuilder &MIB, 215 unsigned Flags); 216 }; 217 218 } // end anonymous namespace 219 220 #include "ARMGenCallingConv.inc" 221 222 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 223 // we don't care about implicit defs here, just places we'll need to add a 224 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 225 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 226 const MCInstrDesc &MCID = MI->getDesc(); 227 if (!MCID.hasOptionalDef()) 228 return false; 229 230 // Look to see if our OptionalDef is defining CPSR or CCR. 231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 232 const MachineOperand &MO = MI->getOperand(i); 233 if (!MO.isReg() || !MO.isDef()) continue; 234 if (MO.getReg() == ARM::CPSR) 235 *CPSR = true; 236 } 237 return true; 238 } 239 240 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 241 const MCInstrDesc &MCID = MI->getDesc(); 242 243 // If we're a thumb2 or not NEON function we were handled via isPredicable. 244 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 245 AFI->isThumb2Function()) 246 return false; 247 248 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 249 if (MCID.OpInfo[i].isPredicate()) 250 return true; 251 252 return false; 253 } 254 255 // If the machine is predicable go ahead and add the predicate operands, if 256 // it needs default CC operands add those. 257 // TODO: If we want to support thumb1 then we'll need to deal with optional 258 // CPSR defs that need to be added before the remaining operands. See s_cc_out 259 // for descriptions why. 260 const MachineInstrBuilder & 261 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 262 MachineInstr *MI = &*MIB; 263 264 // Do we use a predicate? or... 265 // Are we NEON in ARM mode and have a predicate operand? If so, I know 266 // we're not predicable but add it anyways. 267 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 268 AddDefaultPred(MIB); 269 270 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 271 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 272 bool CPSR = false; 273 if (DefinesOptionalPredicate(MI, &CPSR)) { 274 if (CPSR) 275 AddDefaultT1CC(MIB); 276 else 277 AddDefaultCC(MIB); 278 } 279 return MIB; 280 } 281 282 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 283 const TargetRegisterClass* RC) { 284 unsigned ResultReg = createResultReg(RC); 285 const MCInstrDesc &II = TII.get(MachineInstOpcode); 286 287 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 288 return ResultReg; 289 } 290 291 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 292 const TargetRegisterClass *RC, 293 unsigned Op0, bool Op0IsKill) { 294 unsigned ResultReg = createResultReg(RC); 295 const MCInstrDesc &II = TII.get(MachineInstOpcode); 296 297 if (II.getNumDefs() >= 1) 298 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 299 .addReg(Op0, Op0IsKill * RegState::Kill)); 300 else { 301 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 302 .addReg(Op0, Op0IsKill * RegState::Kill)); 303 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 304 TII.get(TargetOpcode::COPY), ResultReg) 305 .addReg(II.ImplicitDefs[0])); 306 } 307 return ResultReg; 308 } 309 310 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 311 const TargetRegisterClass *RC, 312 unsigned Op0, bool Op0IsKill, 313 unsigned Op1, bool Op1IsKill) { 314 unsigned ResultReg = createResultReg(RC); 315 const MCInstrDesc &II = TII.get(MachineInstOpcode); 316 317 if (II.getNumDefs() >= 1) 318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 319 .addReg(Op0, Op0IsKill * RegState::Kill) 320 .addReg(Op1, Op1IsKill * RegState::Kill)); 321 else { 322 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 323 .addReg(Op0, Op0IsKill * RegState::Kill) 324 .addReg(Op1, Op1IsKill * RegState::Kill)); 325 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 326 TII.get(TargetOpcode::COPY), ResultReg) 327 .addReg(II.ImplicitDefs[0])); 328 } 329 return ResultReg; 330 } 331 332 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 333 const TargetRegisterClass *RC, 334 unsigned Op0, bool Op0IsKill, 335 unsigned Op1, bool Op1IsKill, 336 unsigned Op2, bool Op2IsKill) { 337 unsigned ResultReg = createResultReg(RC); 338 const MCInstrDesc &II = TII.get(MachineInstOpcode); 339 340 if (II.getNumDefs() >= 1) 341 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 342 .addReg(Op0, Op0IsKill * RegState::Kill) 343 .addReg(Op1, Op1IsKill * RegState::Kill) 344 .addReg(Op2, Op2IsKill * RegState::Kill)); 345 else { 346 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 347 .addReg(Op0, Op0IsKill * RegState::Kill) 348 .addReg(Op1, Op1IsKill * RegState::Kill) 349 .addReg(Op2, Op2IsKill * RegState::Kill)); 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 351 TII.get(TargetOpcode::COPY), ResultReg) 352 .addReg(II.ImplicitDefs[0])); 353 } 354 return ResultReg; 355 } 356 357 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 358 const TargetRegisterClass *RC, 359 unsigned Op0, bool Op0IsKill, 360 uint64_t Imm) { 361 unsigned ResultReg = createResultReg(RC); 362 const MCInstrDesc &II = TII.get(MachineInstOpcode); 363 364 if (II.getNumDefs() >= 1) 365 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 366 .addReg(Op0, Op0IsKill * RegState::Kill) 367 .addImm(Imm)); 368 else { 369 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 370 .addReg(Op0, Op0IsKill * RegState::Kill) 371 .addImm(Imm)); 372 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 373 TII.get(TargetOpcode::COPY), ResultReg) 374 .addReg(II.ImplicitDefs[0])); 375 } 376 return ResultReg; 377 } 378 379 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 380 const TargetRegisterClass *RC, 381 unsigned Op0, bool Op0IsKill, 382 const ConstantFP *FPImm) { 383 unsigned ResultReg = createResultReg(RC); 384 const MCInstrDesc &II = TII.get(MachineInstOpcode); 385 386 if (II.getNumDefs() >= 1) 387 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 388 .addReg(Op0, Op0IsKill * RegState::Kill) 389 .addFPImm(FPImm)); 390 else { 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 392 .addReg(Op0, Op0IsKill * RegState::Kill) 393 .addFPImm(FPImm)); 394 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 395 TII.get(TargetOpcode::COPY), ResultReg) 396 .addReg(II.ImplicitDefs[0])); 397 } 398 return ResultReg; 399 } 400 401 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 402 const TargetRegisterClass *RC, 403 unsigned Op0, bool Op0IsKill, 404 unsigned Op1, bool Op1IsKill, 405 uint64_t Imm) { 406 unsigned ResultReg = createResultReg(RC); 407 const MCInstrDesc &II = TII.get(MachineInstOpcode); 408 409 if (II.getNumDefs() >= 1) 410 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 411 .addReg(Op0, Op0IsKill * RegState::Kill) 412 .addReg(Op1, Op1IsKill * RegState::Kill) 413 .addImm(Imm)); 414 else { 415 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 416 .addReg(Op0, Op0IsKill * RegState::Kill) 417 .addReg(Op1, Op1IsKill * RegState::Kill) 418 .addImm(Imm)); 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 420 TII.get(TargetOpcode::COPY), ResultReg) 421 .addReg(II.ImplicitDefs[0])); 422 } 423 return ResultReg; 424 } 425 426 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 427 const TargetRegisterClass *RC, 428 uint64_t Imm) { 429 unsigned ResultReg = createResultReg(RC); 430 const MCInstrDesc &II = TII.get(MachineInstOpcode); 431 432 if (II.getNumDefs() >= 1) 433 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 434 .addImm(Imm)); 435 else { 436 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 437 .addImm(Imm)); 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 439 TII.get(TargetOpcode::COPY), ResultReg) 440 .addReg(II.ImplicitDefs[0])); 441 } 442 return ResultReg; 443 } 444 445 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 446 const TargetRegisterClass *RC, 447 uint64_t Imm1, uint64_t Imm2) { 448 unsigned ResultReg = createResultReg(RC); 449 const MCInstrDesc &II = TII.get(MachineInstOpcode); 450 451 if (II.getNumDefs() >= 1) 452 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 453 .addImm(Imm1).addImm(Imm2)); 454 else { 455 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 456 .addImm(Imm1).addImm(Imm2)); 457 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 458 TII.get(TargetOpcode::COPY), 459 ResultReg) 460 .addReg(II.ImplicitDefs[0])); 461 } 462 return ResultReg; 463 } 464 465 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 466 unsigned Op0, bool Op0IsKill, 467 uint32_t Idx) { 468 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 469 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 470 "Cannot yet extract from physregs"); 471 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 472 DL, TII.get(TargetOpcode::COPY), ResultReg) 473 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 474 return ResultReg; 475 } 476 477 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 478 // checks from the various callers. 479 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 480 if (VT == MVT::f64) return 0; 481 482 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 483 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 484 TII.get(ARM::VMOVRS), MoveReg) 485 .addReg(SrcReg)); 486 return MoveReg; 487 } 488 489 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 490 if (VT == MVT::i64) return 0; 491 492 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 493 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 494 TII.get(ARM::VMOVSR), MoveReg) 495 .addReg(SrcReg)); 496 return MoveReg; 497 } 498 499 // For double width floating point we need to materialize two constants 500 // (the high and the low) into integer registers then use a move to get 501 // the combined constant into an FP reg. 502 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 503 const APFloat Val = CFP->getValueAPF(); 504 bool is64bit = VT == MVT::f64; 505 506 // This checks to see if we can use VFP3 instructions to materialize 507 // a constant, otherwise we have to go through the constant pool. 508 if (TLI.isFPImmLegal(Val, VT)) { 509 int Imm; 510 unsigned Opc; 511 if (is64bit) { 512 Imm = ARM_AM::getFP64Imm(Val); 513 Opc = ARM::FCONSTD; 514 } else { 515 Imm = ARM_AM::getFP32Imm(Val); 516 Opc = ARM::FCONSTS; 517 } 518 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 519 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 520 DestReg) 521 .addImm(Imm)); 522 return DestReg; 523 } 524 525 // Require VFP2 for loading fp constants. 526 if (!Subtarget->hasVFP2()) return false; 527 528 // MachineConstantPool wants an explicit alignment. 529 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 530 if (Align == 0) { 531 // TODO: Figure out if this is correct. 532 Align = TD.getTypeAllocSize(CFP->getType()); 533 } 534 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 535 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 536 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 537 538 // The extra reg is for addrmode5. 539 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 540 DestReg) 541 .addConstantPoolIndex(Idx) 542 .addReg(0)); 543 return DestReg; 544 } 545 546 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 547 548 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 549 return false; 550 551 // If we can do this in a single instruction without a constant pool entry 552 // do so now. 553 const ConstantInt *CI = cast<ConstantInt>(C); 554 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 555 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 556 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 557 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 558 TII.get(Opc), ImmReg) 559 .addImm(CI->getZExtValue())); 560 return ImmReg; 561 } 562 563 // Use MVN to emit negative constants. 564 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 565 unsigned Imm = (unsigned)~(CI->getSExtValue()); 566 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 567 (ARM_AM::getSOImmVal(Imm) != -1); 568 if (UseImm) { 569 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 570 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 571 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 572 TII.get(Opc), ImmReg) 573 .addImm(Imm)); 574 return ImmReg; 575 } 576 } 577 578 // Load from constant pool. For now 32-bit only. 579 if (VT != MVT::i32) 580 return false; 581 582 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 583 584 // MachineConstantPool wants an explicit alignment. 585 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 586 if (Align == 0) { 587 // TODO: Figure out if this is correct. 588 Align = TD.getTypeAllocSize(C->getType()); 589 } 590 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 591 592 if (isThumb2) 593 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 594 TII.get(ARM::t2LDRpci), DestReg) 595 .addConstantPoolIndex(Idx)); 596 else 597 // The extra immediate is for addrmode2. 598 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 599 TII.get(ARM::LDRcp), DestReg) 600 .addConstantPoolIndex(Idx) 601 .addImm(0)); 602 603 return DestReg; 604 } 605 606 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 607 // For now 32-bit only. 608 if (VT != MVT::i32) return 0; 609 610 Reloc::Model RelocM = TM.getRelocationModel(); 611 612 // TODO: Need more magic for ARM PIC. 613 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 614 615 // MachineConstantPool wants an explicit alignment. 616 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 617 if (Align == 0) { 618 // TODO: Figure out if this is correct. 619 Align = TD.getTypeAllocSize(GV->getType()); 620 } 621 622 // Grab index. 623 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 624 unsigned Id = AFI->createPICLabelUId(); 625 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 626 ARMCP::CPValue, 627 PCAdj); 628 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 629 630 // Load value. 631 MachineInstrBuilder MIB; 632 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 633 if (isThumb2) { 634 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 635 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 636 .addConstantPoolIndex(Idx); 637 if (RelocM == Reloc::PIC_) 638 MIB.addImm(Id); 639 } else { 640 // The extra immediate is for addrmode2. 641 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 642 DestReg) 643 .addConstantPoolIndex(Idx) 644 .addImm(0); 645 } 646 AddOptionalDefs(MIB); 647 648 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 649 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 650 if (isThumb2) 651 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 652 TII.get(ARM::t2LDRi12), NewDestReg) 653 .addReg(DestReg) 654 .addImm(0); 655 else 656 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 657 NewDestReg) 658 .addReg(DestReg) 659 .addImm(0); 660 DestReg = NewDestReg; 661 AddOptionalDefs(MIB); 662 } 663 664 return DestReg; 665 } 666 667 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 668 EVT VT = TLI.getValueType(C->getType(), true); 669 670 // Only handle simple types. 671 if (!VT.isSimple()) return 0; 672 673 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 674 return ARMMaterializeFP(CFP, VT); 675 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 676 return ARMMaterializeGV(GV, VT); 677 else if (isa<ConstantInt>(C)) 678 return ARMMaterializeInt(C, VT); 679 680 return 0; 681 } 682 683 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 684 // Don't handle dynamic allocas. 685 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 686 687 MVT VT; 688 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 689 690 DenseMap<const AllocaInst*, int>::iterator SI = 691 FuncInfo.StaticAllocaMap.find(AI); 692 693 // This will get lowered later into the correct offsets and registers 694 // via rewriteXFrameIndex. 695 if (SI != FuncInfo.StaticAllocaMap.end()) { 696 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 697 unsigned ResultReg = createResultReg(RC); 698 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 699 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 700 TII.get(Opc), ResultReg) 701 .addFrameIndex(SI->second) 702 .addImm(0)); 703 return ResultReg; 704 } 705 706 return 0; 707 } 708 709 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 710 EVT evt = TLI.getValueType(Ty, true); 711 712 // Only handle simple types. 713 if (evt == MVT::Other || !evt.isSimple()) return false; 714 VT = evt.getSimpleVT(); 715 716 // Handle all legal types, i.e. a register that will directly hold this 717 // value. 718 return TLI.isTypeLegal(VT); 719 } 720 721 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 722 if (isTypeLegal(Ty, VT)) return true; 723 724 // If this is a type than can be sign or zero-extended to a basic operation 725 // go ahead and accept it now. 726 if (VT == MVT::i8 || VT == MVT::i16) 727 return true; 728 729 return false; 730 } 731 732 // Computes the address to get to an object. 733 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 734 // Some boilerplate from the X86 FastISel. 735 const User *U = NULL; 736 unsigned Opcode = Instruction::UserOp1; 737 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 738 // Don't walk into other basic blocks unless the object is an alloca from 739 // another block, otherwise it may not have a virtual register assigned. 740 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 741 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 742 Opcode = I->getOpcode(); 743 U = I; 744 } 745 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 746 Opcode = C->getOpcode(); 747 U = C; 748 } 749 750 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 751 if (Ty->getAddressSpace() > 255) 752 // Fast instruction selection doesn't support the special 753 // address spaces. 754 return false; 755 756 switch (Opcode) { 757 default: 758 break; 759 case Instruction::BitCast: { 760 // Look through bitcasts. 761 return ARMComputeAddress(U->getOperand(0), Addr); 762 } 763 case Instruction::IntToPtr: { 764 // Look past no-op inttoptrs. 765 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 766 return ARMComputeAddress(U->getOperand(0), Addr); 767 break; 768 } 769 case Instruction::PtrToInt: { 770 // Look past no-op ptrtoints. 771 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 772 return ARMComputeAddress(U->getOperand(0), Addr); 773 break; 774 } 775 case Instruction::GetElementPtr: { 776 Address SavedAddr = Addr; 777 int TmpOffset = Addr.Offset; 778 779 // Iterate through the GEP folding the constants into offsets where 780 // we can. 781 gep_type_iterator GTI = gep_type_begin(U); 782 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 783 i != e; ++i, ++GTI) { 784 const Value *Op = *i; 785 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 786 const StructLayout *SL = TD.getStructLayout(STy); 787 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 788 TmpOffset += SL->getElementOffset(Idx); 789 } else { 790 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 791 for (;;) { 792 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 793 // Constant-offset addressing. 794 TmpOffset += CI->getSExtValue() * S; 795 break; 796 } 797 if (isa<AddOperator>(Op) && 798 (!isa<Instruction>(Op) || 799 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 800 == FuncInfo.MBB) && 801 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 802 // An add (in the same block) with a constant operand. Fold the 803 // constant. 804 ConstantInt *CI = 805 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 806 TmpOffset += CI->getSExtValue() * S; 807 // Iterate on the other operand. 808 Op = cast<AddOperator>(Op)->getOperand(0); 809 continue; 810 } 811 // Unsupported 812 goto unsupported_gep; 813 } 814 } 815 } 816 817 // Try to grab the base operand now. 818 Addr.Offset = TmpOffset; 819 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 820 821 // We failed, restore everything and try the other options. 822 Addr = SavedAddr; 823 824 unsupported_gep: 825 break; 826 } 827 case Instruction::Alloca: { 828 const AllocaInst *AI = cast<AllocaInst>(Obj); 829 DenseMap<const AllocaInst*, int>::iterator SI = 830 FuncInfo.StaticAllocaMap.find(AI); 831 if (SI != FuncInfo.StaticAllocaMap.end()) { 832 Addr.BaseType = Address::FrameIndexBase; 833 Addr.Base.FI = SI->second; 834 return true; 835 } 836 break; 837 } 838 } 839 840 // Materialize the global variable's address into a reg which can 841 // then be used later to load the variable. 842 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 843 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 844 if (Tmp == 0) return false; 845 846 Addr.Base.Reg = Tmp; 847 return true; 848 } 849 850 // Try to get this in a register if nothing else has worked. 851 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 852 return Addr.Base.Reg != 0; 853 } 854 855 void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { 856 857 assert(VT.isSimple() && "Non-simple types are invalid here!"); 858 859 bool needsLowering = false; 860 switch (VT.getSimpleVT().SimpleTy) { 861 default: 862 assert(false && "Unhandled load/store type!"); 863 case MVT::i16: 864 if (isThumb2) 865 // Integer loads/stores handle 12-bit offsets. 866 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 867 else 868 // ARM i16 integer loads/stores handle +/-imm8 offsets. 869 // FIXME: Negative offsets require special handling. 870 if (Addr.Offset > 255 || Addr.Offset < 0) 871 needsLowering = true; 872 break; 873 case MVT::i1: 874 case MVT::i8: 875 case MVT::i32: 876 // Integer loads/stores handle 12-bit offsets. 877 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 878 break; 879 case MVT::f32: 880 case MVT::f64: 881 // Floating point operands handle 8-bit offsets. 882 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 883 break; 884 } 885 886 // If this is a stack pointer and the offset needs to be simplified then 887 // put the alloca address into a register, set the base type back to 888 // register and continue. This should almost never happen. 889 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 890 TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass : 891 ARM::GPRRegisterClass; 892 unsigned ResultReg = createResultReg(RC); 893 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 894 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 895 TII.get(Opc), ResultReg) 896 .addFrameIndex(Addr.Base.FI) 897 .addImm(0)); 898 Addr.Base.Reg = ResultReg; 899 Addr.BaseType = Address::RegBase; 900 } 901 902 // Since the offset is too large for the load/store instruction 903 // get the reg+offset into a register. 904 if (needsLowering) { 905 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 906 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 907 Addr.Offset = 0; 908 } 909 } 910 911 void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 912 const MachineInstrBuilder &MIB, 913 unsigned Flags) { 914 // addrmode5 output depends on the selection dag addressing dividing the 915 // offset by 4 that it then later multiplies. Do this here as well. 916 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 917 VT.getSimpleVT().SimpleTy == MVT::f64) 918 Addr.Offset /= 4; 919 920 // Frame base works a bit differently. Handle it separately. 921 if (Addr.BaseType == Address::FrameIndexBase) { 922 int FI = Addr.Base.FI; 923 int Offset = Addr.Offset; 924 MachineMemOperand *MMO = 925 FuncInfo.MF->getMachineMemOperand( 926 MachinePointerInfo::getFixedStack(FI, Offset), 927 Flags, 928 MFI.getObjectSize(FI), 929 MFI.getObjectAlignment(FI)); 930 // Now add the rest of the operands. 931 MIB.addFrameIndex(FI); 932 933 // ARM halfword load/stores need an additional operand. 934 if (!isThumb2 && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 935 936 MIB.addImm(Addr.Offset); 937 MIB.addMemOperand(MMO); 938 } else { 939 // Now add the rest of the operands. 940 MIB.addReg(Addr.Base.Reg); 941 942 // ARM halfword load/stores need an additional operand. 943 if (!isThumb2 && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 944 945 MIB.addImm(Addr.Offset); 946 } 947 AddOptionalDefs(MIB); 948 } 949 950 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { 951 952 assert(VT.isSimple() && "Non-simple types are invalid here!"); 953 unsigned Opc; 954 TargetRegisterClass *RC; 955 switch (VT.getSimpleVT().SimpleTy) { 956 // This is mostly going to be Neon/vector support. 957 default: return false; 958 case MVT::i1: 959 case MVT::i8: 960 Opc = isThumb2 ? ARM::t2LDRBi12 : ARM::LDRBi12; 961 RC = ARM::GPRRegisterClass; 962 break; 963 case MVT::i16: 964 Opc = isThumb2 ? ARM::t2LDRHi12 : ARM::LDRH; 965 RC = ARM::GPRRegisterClass; 966 break; 967 case MVT::i32: 968 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 969 RC = ARM::GPRRegisterClass; 970 break; 971 case MVT::f32: 972 Opc = ARM::VLDRS; 973 RC = TLI.getRegClassFor(VT); 974 break; 975 case MVT::f64: 976 Opc = ARM::VLDRD; 977 RC = TLI.getRegClassFor(VT); 978 break; 979 } 980 // Simplify this down to something we can handle. 981 ARMSimplifyAddress(Addr, VT); 982 983 // Create the base instruction, then add the operands. 984 ResultReg = createResultReg(RC); 985 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 986 TII.get(Opc), ResultReg); 987 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad); 988 return true; 989 } 990 991 bool ARMFastISel::SelectLoad(const Instruction *I) { 992 // Atomic loads need special handling. 993 if (cast<LoadInst>(I)->isAtomic()) 994 return false; 995 996 // Verify we have a legal type before going any further. 997 MVT VT; 998 if (!isLoadTypeLegal(I->getType(), VT)) 999 return false; 1000 1001 // See if we can handle this address. 1002 Address Addr; 1003 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1004 1005 unsigned ResultReg; 1006 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 1007 UpdateValueMap(I, ResultReg); 1008 return true; 1009 } 1010 1011 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { 1012 unsigned StrOpc; 1013 switch (VT.getSimpleVT().SimpleTy) { 1014 // This is mostly going to be Neon/vector support. 1015 default: return false; 1016 case MVT::i1: { 1017 unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass : 1018 ARM::GPRRegisterClass); 1019 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1020 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1021 TII.get(Opc), Res) 1022 .addReg(SrcReg).addImm(1)); 1023 SrcReg = Res; 1024 } // Fallthrough here. 1025 case MVT::i8: 1026 StrOpc = isThumb2 ? ARM::t2STRBi12 : ARM::STRBi12; 1027 break; 1028 case MVT::i16: 1029 StrOpc = isThumb2 ? ARM::t2STRHi12 : ARM::STRH; 1030 break; 1031 case MVT::i32: 1032 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1033 break; 1034 case MVT::f32: 1035 if (!Subtarget->hasVFP2()) return false; 1036 StrOpc = ARM::VSTRS; 1037 break; 1038 case MVT::f64: 1039 if (!Subtarget->hasVFP2()) return false; 1040 StrOpc = ARM::VSTRD; 1041 break; 1042 } 1043 // Simplify this down to something we can handle. 1044 ARMSimplifyAddress(Addr, VT); 1045 1046 // Create the base instruction, then add the operands. 1047 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1048 TII.get(StrOpc)) 1049 .addReg(SrcReg, getKillRegState(true)); 1050 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore); 1051 return true; 1052 } 1053 1054 bool ARMFastISel::SelectStore(const Instruction *I) { 1055 Value *Op0 = I->getOperand(0); 1056 unsigned SrcReg = 0; 1057 1058 // Atomic stores need special handling. 1059 if (cast<StoreInst>(I)->isAtomic()) 1060 return false; 1061 1062 // Verify we have a legal type before going any further. 1063 MVT VT; 1064 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1065 return false; 1066 1067 // Get the value to be stored into a register. 1068 SrcReg = getRegForValue(Op0); 1069 if (SrcReg == 0) return false; 1070 1071 // See if we can handle this address. 1072 Address Addr; 1073 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1074 return false; 1075 1076 if (!ARMEmitStore(VT, SrcReg, Addr)) return false; 1077 return true; 1078 } 1079 1080 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1081 switch (Pred) { 1082 // Needs two compares... 1083 case CmpInst::FCMP_ONE: 1084 case CmpInst::FCMP_UEQ: 1085 default: 1086 // AL is our "false" for now. The other two need more compares. 1087 return ARMCC::AL; 1088 case CmpInst::ICMP_EQ: 1089 case CmpInst::FCMP_OEQ: 1090 return ARMCC::EQ; 1091 case CmpInst::ICMP_SGT: 1092 case CmpInst::FCMP_OGT: 1093 return ARMCC::GT; 1094 case CmpInst::ICMP_SGE: 1095 case CmpInst::FCMP_OGE: 1096 return ARMCC::GE; 1097 case CmpInst::ICMP_UGT: 1098 case CmpInst::FCMP_UGT: 1099 return ARMCC::HI; 1100 case CmpInst::FCMP_OLT: 1101 return ARMCC::MI; 1102 case CmpInst::ICMP_ULE: 1103 case CmpInst::FCMP_OLE: 1104 return ARMCC::LS; 1105 case CmpInst::FCMP_ORD: 1106 return ARMCC::VC; 1107 case CmpInst::FCMP_UNO: 1108 return ARMCC::VS; 1109 case CmpInst::FCMP_UGE: 1110 return ARMCC::PL; 1111 case CmpInst::ICMP_SLT: 1112 case CmpInst::FCMP_ULT: 1113 return ARMCC::LT; 1114 case CmpInst::ICMP_SLE: 1115 case CmpInst::FCMP_ULE: 1116 return ARMCC::LE; 1117 case CmpInst::FCMP_UNE: 1118 case CmpInst::ICMP_NE: 1119 return ARMCC::NE; 1120 case CmpInst::ICMP_UGE: 1121 return ARMCC::HS; 1122 case CmpInst::ICMP_ULT: 1123 return ARMCC::LO; 1124 } 1125 } 1126 1127 bool ARMFastISel::SelectBranch(const Instruction *I) { 1128 const BranchInst *BI = cast<BranchInst>(I); 1129 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1130 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1131 1132 // Simple branch support. 1133 1134 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1135 // behavior. 1136 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1137 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1138 1139 // Get the compare predicate. 1140 // Try to take advantage of fallthrough opportunities. 1141 CmpInst::Predicate Predicate = CI->getPredicate(); 1142 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1143 std::swap(TBB, FBB); 1144 Predicate = CmpInst::getInversePredicate(Predicate); 1145 } 1146 1147 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1148 1149 // We may not handle every CC for now. 1150 if (ARMPred == ARMCC::AL) return false; 1151 1152 // Emit the compare. 1153 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1154 return false; 1155 1156 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1157 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1158 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1159 FastEmitBranch(FBB, DL); 1160 FuncInfo.MBB->addSuccessor(TBB); 1161 return true; 1162 } 1163 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1164 MVT SourceVT; 1165 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1166 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1167 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1168 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1169 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1170 TII.get(TstOpc)) 1171 .addReg(OpReg).addImm(1)); 1172 1173 unsigned CCMode = ARMCC::NE; 1174 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1175 std::swap(TBB, FBB); 1176 CCMode = ARMCC::EQ; 1177 } 1178 1179 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1180 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1181 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1182 1183 FastEmitBranch(FBB, DL); 1184 FuncInfo.MBB->addSuccessor(TBB); 1185 return true; 1186 } 1187 } else if (const ConstantInt *CI = 1188 dyn_cast<ConstantInt>(BI->getCondition())) { 1189 uint64_t Imm = CI->getZExtValue(); 1190 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1191 FastEmitBranch(Target, DL); 1192 return true; 1193 } 1194 1195 unsigned CmpReg = getRegForValue(BI->getCondition()); 1196 if (CmpReg == 0) return false; 1197 1198 // We've been divorced from our compare! Our block was split, and 1199 // now our compare lives in a predecessor block. We musn't 1200 // re-compare here, as the children of the compare aren't guaranteed 1201 // live across the block boundary (we *could* check for this). 1202 // Regardless, the compare has been done in the predecessor block, 1203 // and it left a value for us in a virtual register. Ergo, we test 1204 // the one-bit value left in the virtual register. 1205 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1206 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1207 .addReg(CmpReg).addImm(1)); 1208 1209 unsigned CCMode = ARMCC::NE; 1210 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1211 std::swap(TBB, FBB); 1212 CCMode = ARMCC::EQ; 1213 } 1214 1215 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1216 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1217 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1218 FastEmitBranch(FBB, DL); 1219 FuncInfo.MBB->addSuccessor(TBB); 1220 return true; 1221 } 1222 1223 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1224 bool isZExt) { 1225 Type *Ty = Src1Value->getType(); 1226 EVT SrcVT = TLI.getValueType(Ty, true); 1227 if (!SrcVT.isSimple()) return false; 1228 1229 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1230 if (isFloat && !Subtarget->hasVFP2()) 1231 return false; 1232 1233 // Check to see if the 2nd operand is a constant that we can encode directly 1234 // in the compare. 1235 int Imm = 0; 1236 bool UseImm = false; 1237 bool isNegativeImm = false; 1238 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1239 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1240 SrcVT == MVT::i1) { 1241 const APInt &CIVal = ConstInt->getValue(); 1242 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1243 if (Imm < 0) { 1244 isNegativeImm = true; 1245 Imm = -Imm; 1246 } 1247 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1248 (ARM_AM::getSOImmVal(Imm) != -1); 1249 } 1250 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1251 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1252 if (ConstFP->isZero() && !ConstFP->isNegative()) 1253 UseImm = true; 1254 } 1255 1256 unsigned CmpOpc; 1257 bool isICmp = true; 1258 bool needsExt = false; 1259 switch (SrcVT.getSimpleVT().SimpleTy) { 1260 default: return false; 1261 // TODO: Verify compares. 1262 case MVT::f32: 1263 isICmp = false; 1264 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1265 break; 1266 case MVT::f64: 1267 isICmp = false; 1268 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1269 break; 1270 case MVT::i1: 1271 case MVT::i8: 1272 case MVT::i16: 1273 needsExt = true; 1274 // Intentional fall-through. 1275 case MVT::i32: 1276 if (isThumb2) { 1277 if (!UseImm) 1278 CmpOpc = ARM::t2CMPrr; 1279 else 1280 CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri; 1281 } else { 1282 if (!UseImm) 1283 CmpOpc = ARM::CMPrr; 1284 else 1285 CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri; 1286 } 1287 break; 1288 } 1289 1290 unsigned SrcReg1 = getRegForValue(Src1Value); 1291 if (SrcReg1 == 0) return false; 1292 1293 unsigned SrcReg2; 1294 if (!UseImm) { 1295 SrcReg2 = getRegForValue(Src2Value); 1296 if (SrcReg2 == 0) return false; 1297 } 1298 1299 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1300 if (needsExt) { 1301 unsigned ResultReg; 1302 ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1303 if (ResultReg == 0) return false; 1304 SrcReg1 = ResultReg; 1305 if (!UseImm) { 1306 ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1307 if (ResultReg == 0) return false; 1308 SrcReg2 = ResultReg; 1309 } 1310 } 1311 1312 if (!UseImm) { 1313 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1314 TII.get(CmpOpc)) 1315 .addReg(SrcReg1).addReg(SrcReg2)); 1316 } else { 1317 MachineInstrBuilder MIB; 1318 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1319 .addReg(SrcReg1); 1320 1321 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1322 if (isICmp) 1323 MIB.addImm(Imm); 1324 AddOptionalDefs(MIB); 1325 } 1326 1327 // For floating point we need to move the result to a comparison register 1328 // that we can then use for branches. 1329 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1331 TII.get(ARM::FMSTAT))); 1332 return true; 1333 } 1334 1335 bool ARMFastISel::SelectCmp(const Instruction *I) { 1336 const CmpInst *CI = cast<CmpInst>(I); 1337 Type *Ty = CI->getOperand(0)->getType(); 1338 1339 // Get the compare predicate. 1340 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1341 1342 // We may not handle every CC for now. 1343 if (ARMPred == ARMCC::AL) return false; 1344 1345 // Emit the compare. 1346 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1347 return false; 1348 1349 // Now set a register based on the comparison. Explicitly set the predicates 1350 // here. 1351 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1352 TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass 1353 : ARM::GPRRegisterClass; 1354 unsigned DestReg = createResultReg(RC); 1355 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1356 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1357 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1358 unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; 1359 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1360 .addReg(ZeroReg).addImm(1) 1361 .addImm(ARMPred).addReg(CondReg); 1362 1363 UpdateValueMap(I, DestReg); 1364 return true; 1365 } 1366 1367 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1368 // Make sure we have VFP and that we're extending float to double. 1369 if (!Subtarget->hasVFP2()) return false; 1370 1371 Value *V = I->getOperand(0); 1372 if (!I->getType()->isDoubleTy() || 1373 !V->getType()->isFloatTy()) return false; 1374 1375 unsigned Op = getRegForValue(V); 1376 if (Op == 0) return false; 1377 1378 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1379 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1380 TII.get(ARM::VCVTDS), Result) 1381 .addReg(Op)); 1382 UpdateValueMap(I, Result); 1383 return true; 1384 } 1385 1386 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1387 // Make sure we have VFP and that we're truncating double to float. 1388 if (!Subtarget->hasVFP2()) return false; 1389 1390 Value *V = I->getOperand(0); 1391 if (!(I->getType()->isFloatTy() && 1392 V->getType()->isDoubleTy())) return false; 1393 1394 unsigned Op = getRegForValue(V); 1395 if (Op == 0) return false; 1396 1397 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1398 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1399 TII.get(ARM::VCVTSD), Result) 1400 .addReg(Op)); 1401 UpdateValueMap(I, Result); 1402 return true; 1403 } 1404 1405 bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1406 // Make sure we have VFP. 1407 if (!Subtarget->hasVFP2()) return false; 1408 1409 MVT DstVT; 1410 Type *Ty = I->getType(); 1411 if (!isTypeLegal(Ty, DstVT)) 1412 return false; 1413 1414 Value *Src = I->getOperand(0); 1415 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1416 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1417 return false; 1418 1419 unsigned SrcReg = getRegForValue(Src); 1420 if (SrcReg == 0) return false; 1421 1422 // Handle sign-extension. 1423 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1424 EVT DestVT = MVT::i32; 1425 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, /*isZExt*/ false); 1426 if (ResultReg == 0) return false; 1427 SrcReg = ResultReg; 1428 } 1429 1430 // The conversion routine works on fp-reg to fp-reg and the operand above 1431 // was an integer, move it to the fp registers if possible. 1432 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1433 if (FP == 0) return false; 1434 1435 unsigned Opc; 1436 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1437 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1438 else return false; 1439 1440 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1441 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1442 ResultReg) 1443 .addReg(FP)); 1444 UpdateValueMap(I, ResultReg); 1445 return true; 1446 } 1447 1448 bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1449 // Make sure we have VFP. 1450 if (!Subtarget->hasVFP2()) return false; 1451 1452 MVT DstVT; 1453 Type *RetTy = I->getType(); 1454 if (!isTypeLegal(RetTy, DstVT)) 1455 return false; 1456 1457 unsigned Op = getRegForValue(I->getOperand(0)); 1458 if (Op == 0) return false; 1459 1460 unsigned Opc; 1461 Type *OpTy = I->getOperand(0)->getType(); 1462 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1463 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1464 else return false; 1465 1466 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1467 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1468 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1469 ResultReg) 1470 .addReg(Op)); 1471 1472 // This result needs to be in an integer register, but the conversion only 1473 // takes place in fp-regs. 1474 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1475 if (IntReg == 0) return false; 1476 1477 UpdateValueMap(I, IntReg); 1478 return true; 1479 } 1480 1481 bool ARMFastISel::SelectSelect(const Instruction *I) { 1482 MVT VT; 1483 if (!isTypeLegal(I->getType(), VT)) 1484 return false; 1485 1486 // Things need to be register sized for register moves. 1487 if (VT != MVT::i32) return false; 1488 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1489 1490 unsigned CondReg = getRegForValue(I->getOperand(0)); 1491 if (CondReg == 0) return false; 1492 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1493 if (Op1Reg == 0) return false; 1494 1495 // Check to see if we can use an immediate in the conditional move. 1496 int Imm = 0; 1497 bool UseImm = false; 1498 bool isNegativeImm = false; 1499 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1500 assert (VT == MVT::i32 && "Expecting an i32."); 1501 Imm = (int)ConstInt->getValue().getZExtValue(); 1502 if (Imm < 0) { 1503 isNegativeImm = true; 1504 Imm = ~Imm; 1505 } 1506 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1507 (ARM_AM::getSOImmVal(Imm) != -1); 1508 } 1509 1510 unsigned Op2Reg; 1511 if (!UseImm) { 1512 Op2Reg = getRegForValue(I->getOperand(2)); 1513 if (Op2Reg == 0) return false; 1514 } 1515 1516 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1517 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1518 .addReg(CondReg).addImm(0)); 1519 1520 unsigned MovCCOpc; 1521 if (!UseImm) { 1522 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1523 } else { 1524 if (!isNegativeImm) { 1525 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1526 } else { 1527 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1528 } 1529 } 1530 unsigned ResultReg = createResultReg(RC); 1531 if (!UseImm) 1532 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1533 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1534 else 1535 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1536 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1537 UpdateValueMap(I, ResultReg); 1538 return true; 1539 } 1540 1541 bool ARMFastISel::SelectSDiv(const Instruction *I) { 1542 MVT VT; 1543 Type *Ty = I->getType(); 1544 if (!isTypeLegal(Ty, VT)) 1545 return false; 1546 1547 // If we have integer div support we should have selected this automagically. 1548 // In case we have a real miss go ahead and return false and we'll pick 1549 // it up later. 1550 if (Subtarget->hasDivide()) return false; 1551 1552 // Otherwise emit a libcall. 1553 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1554 if (VT == MVT::i8) 1555 LC = RTLIB::SDIV_I8; 1556 else if (VT == MVT::i16) 1557 LC = RTLIB::SDIV_I16; 1558 else if (VT == MVT::i32) 1559 LC = RTLIB::SDIV_I32; 1560 else if (VT == MVT::i64) 1561 LC = RTLIB::SDIV_I64; 1562 else if (VT == MVT::i128) 1563 LC = RTLIB::SDIV_I128; 1564 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1565 1566 return ARMEmitLibcall(I, LC); 1567 } 1568 1569 bool ARMFastISel::SelectSRem(const Instruction *I) { 1570 MVT VT; 1571 Type *Ty = I->getType(); 1572 if (!isTypeLegal(Ty, VT)) 1573 return false; 1574 1575 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1576 if (VT == MVT::i8) 1577 LC = RTLIB::SREM_I8; 1578 else if (VT == MVT::i16) 1579 LC = RTLIB::SREM_I16; 1580 else if (VT == MVT::i32) 1581 LC = RTLIB::SREM_I32; 1582 else if (VT == MVT::i64) 1583 LC = RTLIB::SREM_I64; 1584 else if (VT == MVT::i128) 1585 LC = RTLIB::SREM_I128; 1586 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1587 1588 return ARMEmitLibcall(I, LC); 1589 } 1590 1591 bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1592 EVT VT = TLI.getValueType(I->getType(), true); 1593 1594 // We can get here in the case when we want to use NEON for our fp 1595 // operations, but can't figure out how to. Just use the vfp instructions 1596 // if we have them. 1597 // FIXME: It'd be nice to use NEON instructions. 1598 Type *Ty = I->getType(); 1599 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1600 if (isFloat && !Subtarget->hasVFP2()) 1601 return false; 1602 1603 unsigned Op1 = getRegForValue(I->getOperand(0)); 1604 if (Op1 == 0) return false; 1605 1606 unsigned Op2 = getRegForValue(I->getOperand(1)); 1607 if (Op2 == 0) return false; 1608 1609 unsigned Opc; 1610 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1611 switch (ISDOpcode) { 1612 default: return false; 1613 case ISD::FADD: 1614 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1615 break; 1616 case ISD::FSUB: 1617 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1618 break; 1619 case ISD::FMUL: 1620 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1621 break; 1622 } 1623 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1624 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1625 TII.get(Opc), ResultReg) 1626 .addReg(Op1).addReg(Op2)); 1627 UpdateValueMap(I, ResultReg); 1628 return true; 1629 } 1630 1631 // Call Handling Code 1632 1633 bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1634 EVT SrcVT, unsigned &ResultReg) { 1635 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1636 Src, /*TODO: Kill=*/false); 1637 1638 if (RR != 0) { 1639 ResultReg = RR; 1640 return true; 1641 } else 1642 return false; 1643 } 1644 1645 // This is largely taken directly from CCAssignFnForNode - we don't support 1646 // varargs in FastISel so that part has been removed. 1647 // TODO: We may not support all of this. 1648 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1649 switch (CC) { 1650 default: 1651 llvm_unreachable("Unsupported calling convention"); 1652 case CallingConv::Fast: 1653 // Ignore fastcc. Silence compiler warnings. 1654 (void)RetFastCC_ARM_APCS; 1655 (void)FastCC_ARM_APCS; 1656 // Fallthrough 1657 case CallingConv::C: 1658 // Use target triple & subtarget features to do actual dispatch. 1659 if (Subtarget->isAAPCS_ABI()) { 1660 if (Subtarget->hasVFP2() && 1661 FloatABIType == FloatABI::Hard) 1662 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1663 else 1664 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1665 } else 1666 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1667 case CallingConv::ARM_AAPCS_VFP: 1668 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1669 case CallingConv::ARM_AAPCS: 1670 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1671 case CallingConv::ARM_APCS: 1672 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1673 } 1674 } 1675 1676 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1677 SmallVectorImpl<unsigned> &ArgRegs, 1678 SmallVectorImpl<MVT> &ArgVTs, 1679 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1680 SmallVectorImpl<unsigned> &RegArgs, 1681 CallingConv::ID CC, 1682 unsigned &NumBytes) { 1683 SmallVector<CCValAssign, 16> ArgLocs; 1684 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1685 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1686 1687 // Get a count of how many bytes are to be pushed on the stack. 1688 NumBytes = CCInfo.getNextStackOffset(); 1689 1690 // Issue CALLSEQ_START 1691 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1692 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1693 TII.get(AdjStackDown)) 1694 .addImm(NumBytes)); 1695 1696 // Process the args. 1697 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1698 CCValAssign &VA = ArgLocs[i]; 1699 unsigned Arg = ArgRegs[VA.getValNo()]; 1700 MVT ArgVT = ArgVTs[VA.getValNo()]; 1701 1702 // We don't handle NEON/vector parameters yet. 1703 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1704 return false; 1705 1706 // Handle arg promotion, etc. 1707 switch (VA.getLocInfo()) { 1708 case CCValAssign::Full: break; 1709 case CCValAssign::SExt: { 1710 EVT DestVT = VA.getLocVT(); 1711 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1712 /*isZExt*/false); 1713 assert (ResultReg != 0 && "Failed to emit a sext"); 1714 Arg = ResultReg; 1715 break; 1716 } 1717 case CCValAssign::AExt: 1718 // Intentional fall-through. Handle AExt and ZExt. 1719 case CCValAssign::ZExt: { 1720 EVT DestVT = VA.getLocVT(); 1721 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1722 /*isZExt*/true); 1723 assert (ResultReg != 0 && "Failed to emit a sext"); 1724 Arg = ResultReg; 1725 break; 1726 } 1727 case CCValAssign::BCvt: { 1728 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1729 /*TODO: Kill=*/false); 1730 assert(BC != 0 && "Failed to emit a bitcast!"); 1731 Arg = BC; 1732 ArgVT = VA.getLocVT(); 1733 break; 1734 } 1735 default: llvm_unreachable("Unknown arg promotion!"); 1736 } 1737 1738 // Now copy/store arg to correct locations. 1739 if (VA.isRegLoc() && !VA.needsCustom()) { 1740 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1741 VA.getLocReg()) 1742 .addReg(Arg); 1743 RegArgs.push_back(VA.getLocReg()); 1744 } else if (VA.needsCustom()) { 1745 // TODO: We need custom lowering for vector (v2f64) args. 1746 if (VA.getLocVT() != MVT::f64) return false; 1747 1748 CCValAssign &NextVA = ArgLocs[++i]; 1749 1750 // TODO: Only handle register args for now. 1751 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1752 1753 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1754 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1755 .addReg(NextVA.getLocReg(), RegState::Define) 1756 .addReg(Arg)); 1757 RegArgs.push_back(VA.getLocReg()); 1758 RegArgs.push_back(NextVA.getLocReg()); 1759 } else { 1760 assert(VA.isMemLoc()); 1761 // Need to store on the stack. 1762 Address Addr; 1763 Addr.BaseType = Address::RegBase; 1764 Addr.Base.Reg = ARM::SP; 1765 Addr.Offset = VA.getLocMemOffset(); 1766 1767 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1768 } 1769 } 1770 return true; 1771 } 1772 1773 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1774 const Instruction *I, CallingConv::ID CC, 1775 unsigned &NumBytes) { 1776 // Issue CALLSEQ_END 1777 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1778 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1779 TII.get(AdjStackUp)) 1780 .addImm(NumBytes).addImm(0)); 1781 1782 // Now the return value. 1783 if (RetVT != MVT::isVoid) { 1784 SmallVector<CCValAssign, 16> RVLocs; 1785 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1786 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1787 1788 // Copy all of the result registers out of their specified physreg. 1789 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1790 // For this move we copy into two registers and then move into the 1791 // double fp reg we want. 1792 EVT DestVT = RVLocs[0].getValVT(); 1793 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1794 unsigned ResultReg = createResultReg(DstRC); 1795 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1796 TII.get(ARM::VMOVDRR), ResultReg) 1797 .addReg(RVLocs[0].getLocReg()) 1798 .addReg(RVLocs[1].getLocReg())); 1799 1800 UsedRegs.push_back(RVLocs[0].getLocReg()); 1801 UsedRegs.push_back(RVLocs[1].getLocReg()); 1802 1803 // Finally update the result. 1804 UpdateValueMap(I, ResultReg); 1805 } else { 1806 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1807 EVT CopyVT = RVLocs[0].getValVT(); 1808 1809 // Special handling for extended integers. 1810 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 1811 CopyVT = MVT::i32; 1812 1813 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1814 1815 unsigned ResultReg = createResultReg(DstRC); 1816 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1817 ResultReg).addReg(RVLocs[0].getLocReg()); 1818 UsedRegs.push_back(RVLocs[0].getLocReg()); 1819 1820 // Finally update the result. 1821 UpdateValueMap(I, ResultReg); 1822 } 1823 } 1824 1825 return true; 1826 } 1827 1828 bool ARMFastISel::SelectRet(const Instruction *I) { 1829 const ReturnInst *Ret = cast<ReturnInst>(I); 1830 const Function &F = *I->getParent()->getParent(); 1831 1832 if (!FuncInfo.CanLowerReturn) 1833 return false; 1834 1835 if (F.isVarArg()) 1836 return false; 1837 1838 CallingConv::ID CC = F.getCallingConv(); 1839 if (Ret->getNumOperands() > 0) { 1840 SmallVector<ISD::OutputArg, 4> Outs; 1841 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1842 Outs, TLI); 1843 1844 // Analyze operands of the call, assigning locations to each operand. 1845 SmallVector<CCValAssign, 16> ValLocs; 1846 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 1847 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1848 1849 const Value *RV = Ret->getOperand(0); 1850 unsigned Reg = getRegForValue(RV); 1851 if (Reg == 0) 1852 return false; 1853 1854 // Only handle a single return value for now. 1855 if (ValLocs.size() != 1) 1856 return false; 1857 1858 CCValAssign &VA = ValLocs[0]; 1859 1860 // Don't bother handling odd stuff for now. 1861 if (VA.getLocInfo() != CCValAssign::Full) 1862 return false; 1863 // Only handle register returns for now. 1864 if (!VA.isRegLoc()) 1865 return false; 1866 1867 unsigned SrcReg = Reg + VA.getValNo(); 1868 EVT RVVT = TLI.getValueType(RV->getType()); 1869 EVT DestVT = VA.getValVT(); 1870 // Special handling for extended integers. 1871 if (RVVT != DestVT) { 1872 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 1873 return false; 1874 1875 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()) 1876 return false; 1877 1878 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 1879 1880 bool isZExt = Outs[0].Flags.isZExt(); 1881 unsigned ResultReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, isZExt); 1882 if (ResultReg == 0) return false; 1883 SrcReg = ResultReg; 1884 } 1885 1886 // Make the copy. 1887 unsigned DstReg = VA.getLocReg(); 1888 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1889 // Avoid a cross-class copy. This is very unlikely. 1890 if (!SrcRC->contains(DstReg)) 1891 return false; 1892 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1893 DstReg).addReg(SrcReg); 1894 1895 // Mark the register as live out of the function. 1896 MRI.addLiveOut(VA.getLocReg()); 1897 } 1898 1899 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 1900 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1901 TII.get(RetOpc))); 1902 return true; 1903 } 1904 1905 unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 1906 1907 // Darwin needs the r9 versions of the opcodes. 1908 bool isDarwin = Subtarget->isTargetDarwin(); 1909 if (isThumb2) { 1910 return isDarwin ? ARM::tBLr9 : ARM::tBL; 1911 } else { 1912 return isDarwin ? ARM::BLr9 : ARM::BL; 1913 } 1914 } 1915 1916 // A quick function that will emit a call for a named libcall in F with the 1917 // vector of passed arguments for the Instruction in I. We can assume that we 1918 // can emit a call for any libcall we can produce. This is an abridged version 1919 // of the full call infrastructure since we won't need to worry about things 1920 // like computed function pointers or strange arguments at call sites. 1921 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 1922 // with X86. 1923 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1924 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1925 1926 // Handle *simple* calls for now. 1927 Type *RetTy = I->getType(); 1928 MVT RetVT; 1929 if (RetTy->isVoidTy()) 1930 RetVT = MVT::isVoid; 1931 else if (!isTypeLegal(RetTy, RetVT)) 1932 return false; 1933 1934 // TODO: For now if we have long calls specified we don't handle the call. 1935 if (EnableARMLongCalls) return false; 1936 1937 // Set up the argument vectors. 1938 SmallVector<Value*, 8> Args; 1939 SmallVector<unsigned, 8> ArgRegs; 1940 SmallVector<MVT, 8> ArgVTs; 1941 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1942 Args.reserve(I->getNumOperands()); 1943 ArgRegs.reserve(I->getNumOperands()); 1944 ArgVTs.reserve(I->getNumOperands()); 1945 ArgFlags.reserve(I->getNumOperands()); 1946 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1947 Value *Op = I->getOperand(i); 1948 unsigned Arg = getRegForValue(Op); 1949 if (Arg == 0) return false; 1950 1951 Type *ArgTy = Op->getType(); 1952 MVT ArgVT; 1953 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1954 1955 ISD::ArgFlagsTy Flags; 1956 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1957 Flags.setOrigAlign(OriginalAlignment); 1958 1959 Args.push_back(Op); 1960 ArgRegs.push_back(Arg); 1961 ArgVTs.push_back(ArgVT); 1962 ArgFlags.push_back(Flags); 1963 } 1964 1965 // Handle the arguments now that we've gotten them. 1966 SmallVector<unsigned, 4> RegArgs; 1967 unsigned NumBytes; 1968 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1969 return false; 1970 1971 // Issue the call, BLr9 for darwin, BL otherwise. 1972 // TODO: Turn this into the table of arm call ops. 1973 MachineInstrBuilder MIB; 1974 unsigned CallOpc = ARMSelectCallOp(NULL); 1975 if(isThumb2) 1976 // Explicitly adding the predicate here. 1977 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1978 TII.get(CallOpc))) 1979 .addExternalSymbol(TLI.getLibcallName(Call)); 1980 else 1981 // Explicitly adding the predicate here. 1982 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1983 TII.get(CallOpc)) 1984 .addExternalSymbol(TLI.getLibcallName(Call))); 1985 1986 // Add implicit physical register uses to the call. 1987 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1988 MIB.addReg(RegArgs[i]); 1989 1990 // Finish off the call including any return values. 1991 SmallVector<unsigned, 4> UsedRegs; 1992 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1993 1994 // Set all unused physreg defs as dead. 1995 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1996 1997 return true; 1998 } 1999 2000 bool ARMFastISel::SelectCall(const Instruction *I) { 2001 const CallInst *CI = cast<CallInst>(I); 2002 const Value *Callee = CI->getCalledValue(); 2003 2004 // Can't handle inline asm or worry about intrinsics yet. 2005 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 2006 2007 // Only handle global variable Callees. 2008 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2009 if (!GV) 2010 return false; 2011 2012 // Check the calling convention. 2013 ImmutableCallSite CS(CI); 2014 CallingConv::ID CC = CS.getCallingConv(); 2015 2016 // TODO: Avoid some calling conventions? 2017 2018 // Let SDISel handle vararg functions. 2019 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2020 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2021 if (FTy->isVarArg()) 2022 return false; 2023 2024 // Handle *simple* calls for now. 2025 Type *RetTy = I->getType(); 2026 MVT RetVT; 2027 if (RetTy->isVoidTy()) 2028 RetVT = MVT::isVoid; 2029 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2030 RetVT != MVT::i8 && RetVT != MVT::i1) 2031 return false; 2032 2033 // TODO: For now if we have long calls specified we don't handle the call. 2034 if (EnableARMLongCalls) return false; 2035 2036 // Set up the argument vectors. 2037 SmallVector<Value*, 8> Args; 2038 SmallVector<unsigned, 8> ArgRegs; 2039 SmallVector<MVT, 8> ArgVTs; 2040 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2041 Args.reserve(CS.arg_size()); 2042 ArgRegs.reserve(CS.arg_size()); 2043 ArgVTs.reserve(CS.arg_size()); 2044 ArgFlags.reserve(CS.arg_size()); 2045 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2046 i != e; ++i) { 2047 unsigned Arg = getRegForValue(*i); 2048 2049 if (Arg == 0) 2050 return false; 2051 ISD::ArgFlagsTy Flags; 2052 unsigned AttrInd = i - CS.arg_begin() + 1; 2053 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2054 Flags.setSExt(); 2055 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2056 Flags.setZExt(); 2057 2058 // FIXME: Only handle *easy* calls for now. 2059 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2060 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2061 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2062 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2063 return false; 2064 2065 Type *ArgTy = (*i)->getType(); 2066 MVT ArgVT; 2067 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2068 ArgVT != MVT::i1) 2069 return false; 2070 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2071 Flags.setOrigAlign(OriginalAlignment); 2072 2073 Args.push_back(*i); 2074 ArgRegs.push_back(Arg); 2075 ArgVTs.push_back(ArgVT); 2076 ArgFlags.push_back(Flags); 2077 } 2078 2079 // Handle the arguments now that we've gotten them. 2080 SmallVector<unsigned, 4> RegArgs; 2081 unsigned NumBytes; 2082 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2083 return false; 2084 2085 // Issue the call, BLr9 for darwin, BL otherwise. 2086 // TODO: Turn this into the table of arm call ops. 2087 MachineInstrBuilder MIB; 2088 unsigned CallOpc = ARMSelectCallOp(GV); 2089 // Explicitly adding the predicate here. 2090 if(isThumb2) 2091 // Explicitly adding the predicate here. 2092 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2093 TII.get(CallOpc))) 2094 .addGlobalAddress(GV, 0, 0); 2095 else 2096 // Explicitly adding the predicate here. 2097 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2098 TII.get(CallOpc)) 2099 .addGlobalAddress(GV, 0, 0)); 2100 2101 // Add implicit physical register uses to the call. 2102 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2103 MIB.addReg(RegArgs[i]); 2104 2105 // Finish off the call including any return values. 2106 SmallVector<unsigned, 4> UsedRegs; 2107 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2108 2109 // Set all unused physreg defs as dead. 2110 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2111 2112 return true; 2113 } 2114 2115 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2116 // The high bits for a type smaller than the register size are assumed to be 2117 // undefined. 2118 Value *Op = I->getOperand(0); 2119 2120 EVT SrcVT, DestVT; 2121 SrcVT = TLI.getValueType(Op->getType(), true); 2122 DestVT = TLI.getValueType(I->getType(), true); 2123 2124 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2125 return false; 2126 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2127 return false; 2128 2129 unsigned SrcReg = getRegForValue(Op); 2130 if (!SrcReg) return false; 2131 2132 // Because the high bits are undefined, a truncate doesn't generate 2133 // any code. 2134 UpdateValueMap(I, SrcReg); 2135 return true; 2136 } 2137 2138 unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2139 bool isZExt) { 2140 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2141 return 0; 2142 2143 unsigned Opc; 2144 bool isBoolZext = false; 2145 if (!SrcVT.isSimple()) return 0; 2146 switch (SrcVT.getSimpleVT().SimpleTy) { 2147 default: return 0; 2148 case MVT::i16: 2149 if (!Subtarget->hasV6Ops()) return 0; 2150 if (isZExt) 2151 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2152 else 2153 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2154 break; 2155 case MVT::i8: 2156 if (!Subtarget->hasV6Ops()) return 0; 2157 if (isZExt) 2158 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2159 else 2160 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2161 break; 2162 case MVT::i1: 2163 if (isZExt) { 2164 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2165 isBoolZext = true; 2166 break; 2167 } 2168 return 0; 2169 } 2170 2171 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2172 MachineInstrBuilder MIB; 2173 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2174 .addReg(SrcReg); 2175 if (isBoolZext) 2176 MIB.addImm(1); 2177 else 2178 MIB.addImm(0); 2179 AddOptionalDefs(MIB); 2180 return ResultReg; 2181 } 2182 2183 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2184 // On ARM, in general, integer casts don't involve legal types; this code 2185 // handles promotable integers. 2186 // FIXME: We could save an instruction in many cases by special-casing 2187 // load instructions. 2188 Type *DestTy = I->getType(); 2189 Value *Src = I->getOperand(0); 2190 Type *SrcTy = Src->getType(); 2191 2192 EVT SrcVT, DestVT; 2193 SrcVT = TLI.getValueType(SrcTy, true); 2194 DestVT = TLI.getValueType(DestTy, true); 2195 2196 bool isZExt = isa<ZExtInst>(I); 2197 unsigned SrcReg = getRegForValue(Src); 2198 if (!SrcReg) return false; 2199 2200 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2201 if (ResultReg == 0) return false; 2202 UpdateValueMap(I, ResultReg); 2203 return true; 2204 } 2205 2206 // TODO: SoftFP support. 2207 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2208 2209 switch (I->getOpcode()) { 2210 case Instruction::Load: 2211 return SelectLoad(I); 2212 case Instruction::Store: 2213 return SelectStore(I); 2214 case Instruction::Br: 2215 return SelectBranch(I); 2216 case Instruction::ICmp: 2217 case Instruction::FCmp: 2218 return SelectCmp(I); 2219 case Instruction::FPExt: 2220 return SelectFPExt(I); 2221 case Instruction::FPTrunc: 2222 return SelectFPTrunc(I); 2223 case Instruction::SIToFP: 2224 return SelectSIToFP(I); 2225 case Instruction::FPToSI: 2226 return SelectFPToSI(I); 2227 case Instruction::FAdd: 2228 return SelectBinaryOp(I, ISD::FADD); 2229 case Instruction::FSub: 2230 return SelectBinaryOp(I, ISD::FSUB); 2231 case Instruction::FMul: 2232 return SelectBinaryOp(I, ISD::FMUL); 2233 case Instruction::SDiv: 2234 return SelectSDiv(I); 2235 case Instruction::SRem: 2236 return SelectSRem(I); 2237 case Instruction::Call: 2238 return SelectCall(I); 2239 case Instruction::Select: 2240 return SelectSelect(I); 2241 case Instruction::Ret: 2242 return SelectRet(I); 2243 case Instruction::Trunc: 2244 return SelectTrunc(I); 2245 case Instruction::ZExt: 2246 case Instruction::SExt: 2247 return SelectIntExt(I); 2248 default: break; 2249 } 2250 return false; 2251 } 2252 2253 namespace llvm { 2254 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2255 // Completely untested on non-darwin. 2256 const TargetMachine &TM = funcInfo.MF->getTarget(); 2257 2258 // Darwin and thumb1 only for now. 2259 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2260 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 2261 !DisableARMFastISel) 2262 return new ARMFastISel(funcInfo); 2263 return 0; 2264 } 2265 } 2266