1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseInstrInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMRegisterInfo.h" 20 #include "ARMTargetMachine.h" 21 #include "ARMSubtarget.h" 22 #include "ARMConstantPoolValue.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/CallingConv.h" 25 #include "llvm/DerivedTypes.h" 26 #include "llvm/GlobalVariable.h" 27 #include "llvm/Instructions.h" 28 #include "llvm/IntrinsicInst.h" 29 #include "llvm/Module.h" 30 #include "llvm/Operator.h" 31 #include "llvm/CodeGen/Analysis.h" 32 #include "llvm/CodeGen/FastISel.h" 33 #include "llvm/CodeGen/FunctionLoweringInfo.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineModuleInfo.h" 36 #include "llvm/CodeGen/MachineConstantPool.h" 37 #include "llvm/CodeGen/MachineFrameInfo.h" 38 #include "llvm/CodeGen/MachineMemOperand.h" 39 #include "llvm/CodeGen/MachineRegisterInfo.h" 40 #include "llvm/Support/CallSite.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/GetElementPtrTypeIterator.h" 44 #include "llvm/Target/TargetData.h" 45 #include "llvm/Target/TargetInstrInfo.h" 46 #include "llvm/Target/TargetLowering.h" 47 #include "llvm/Target/TargetMachine.h" 48 #include "llvm/Target/TargetOptions.h" 49 using namespace llvm; 50 51 static cl::opt<bool> 52 DisableARMFastISel("disable-arm-fast-isel", 53 cl::desc("Turn off experimental ARM fast-isel support"), 54 cl::init(false), cl::Hidden); 55 56 extern cl::opt<bool> EnableARMLongCalls; 57 58 namespace { 59 60 // All possible address modes, plus some. 61 typedef struct Address { 62 enum { 63 RegBase, 64 FrameIndexBase 65 } BaseType; 66 67 union { 68 unsigned Reg; 69 int FI; 70 } Base; 71 72 int Offset; 73 74 // Innocuous defaults for our address. 75 Address() 76 : BaseType(RegBase), Offset(0) { 77 Base.Reg = 0; 78 } 79 } Address; 80 81 class ARMFastISel : public FastISel { 82 83 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 84 /// make the right decision when generating code for different targets. 85 const ARMSubtarget *Subtarget; 86 const TargetMachine &TM; 87 const TargetInstrInfo &TII; 88 const TargetLowering &TLI; 89 ARMFunctionInfo *AFI; 90 91 // Convenience variables to avoid some queries. 92 bool isThumb2; 93 LLVMContext *Context; 94 95 public: 96 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 97 : FastISel(funcInfo), 98 TM(funcInfo.MF->getTarget()), 99 TII(*TM.getInstrInfo()), 100 TLI(*TM.getTargetLowering()) { 101 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 102 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 103 isThumb2 = AFI->isThumbFunction(); 104 Context = &funcInfo.Fn->getContext(); 105 } 106 107 // Code from FastISel.cpp. 108 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 109 const TargetRegisterClass *RC); 110 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 111 const TargetRegisterClass *RC, 112 unsigned Op0, bool Op0IsKill); 113 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill, 116 unsigned Op1, bool Op1IsKill); 117 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 118 const TargetRegisterClass *RC, 119 unsigned Op0, bool Op0IsKill, 120 unsigned Op1, bool Op1IsKill, 121 unsigned Op2, bool Op2IsKill); 122 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 uint64_t Imm); 126 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 unsigned Op0, bool Op0IsKill, 129 const ConstantFP *FPImm); 130 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 131 const TargetRegisterClass *RC, 132 unsigned Op0, bool Op0IsKill, 133 unsigned Op1, bool Op1IsKill, 134 uint64_t Imm); 135 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 136 const TargetRegisterClass *RC, 137 uint64_t Imm); 138 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 139 const TargetRegisterClass *RC, 140 uint64_t Imm1, uint64_t Imm2); 141 142 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 143 unsigned Op0, bool Op0IsKill, 144 uint32_t Idx); 145 146 // Backend specific FastISel code. 147 virtual bool TargetSelectInstruction(const Instruction *I); 148 virtual unsigned TargetMaterializeConstant(const Constant *C); 149 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 150 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 151 const LoadInst *LI); 152 153 #include "ARMGenFastISel.inc" 154 155 // Instruction selection routines. 156 private: 157 bool SelectLoad(const Instruction *I); 158 bool SelectStore(const Instruction *I); 159 bool SelectBranch(const Instruction *I); 160 bool SelectCmp(const Instruction *I); 161 bool SelectFPExt(const Instruction *I); 162 bool SelectFPTrunc(const Instruction *I); 163 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 164 bool SelectSIToFP(const Instruction *I); 165 bool SelectFPToSI(const Instruction *I); 166 bool SelectSDiv(const Instruction *I); 167 bool SelectSRem(const Instruction *I); 168 bool SelectCall(const Instruction *I, const char *IntrMemName); 169 bool SelectIntrinsicCall(const IntrinsicInst &I); 170 bool SelectSelect(const Instruction *I); 171 bool SelectRet(const Instruction *I); 172 bool SelectTrunc(const Instruction *I); 173 bool SelectIntExt(const Instruction *I); 174 175 // Utility routines. 176 private: 177 bool isTypeLegal(Type *Ty, MVT &VT); 178 bool isLoadTypeLegal(Type *Ty, MVT &VT); 179 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 180 bool isZExt); 181 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, bool isZExt, 182 bool allocReg); 183 184 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 185 unsigned Alignment = 0); 186 bool ARMComputeAddress(const Value *Obj, Address &Addr); 187 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 188 bool ARMIsMemCpySmall(uint64_t Len); 189 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 190 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 191 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 192 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 193 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 194 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 195 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 196 unsigned ARMSelectCallOp(const GlobalValue *GV); 197 198 // Call handling routines. 199 private: 200 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 201 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 202 SmallVectorImpl<unsigned> &ArgRegs, 203 SmallVectorImpl<MVT> &ArgVTs, 204 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 205 SmallVectorImpl<unsigned> &RegArgs, 206 CallingConv::ID CC, 207 unsigned &NumBytes); 208 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 209 const Instruction *I, CallingConv::ID CC, 210 unsigned &NumBytes); 211 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 212 213 // OptionalDef handling routines. 214 private: 215 bool isARMNEONPred(const MachineInstr *MI); 216 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 217 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 218 void AddLoadStoreOperands(EVT VT, Address &Addr, 219 const MachineInstrBuilder &MIB, 220 unsigned Flags, bool useAM3); 221 }; 222 223 } // end anonymous namespace 224 225 #include "ARMGenCallingConv.inc" 226 227 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 228 // we don't care about implicit defs here, just places we'll need to add a 229 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 230 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 231 const MCInstrDesc &MCID = MI->getDesc(); 232 if (!MCID.hasOptionalDef()) 233 return false; 234 235 // Look to see if our OptionalDef is defining CPSR or CCR. 236 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 237 const MachineOperand &MO = MI->getOperand(i); 238 if (!MO.isReg() || !MO.isDef()) continue; 239 if (MO.getReg() == ARM::CPSR) 240 *CPSR = true; 241 } 242 return true; 243 } 244 245 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 246 const MCInstrDesc &MCID = MI->getDesc(); 247 248 // If we're a thumb2 or not NEON function we were handled via isPredicable. 249 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 250 AFI->isThumb2Function()) 251 return false; 252 253 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 254 if (MCID.OpInfo[i].isPredicate()) 255 return true; 256 257 return false; 258 } 259 260 // If the machine is predicable go ahead and add the predicate operands, if 261 // it needs default CC operands add those. 262 // TODO: If we want to support thumb1 then we'll need to deal with optional 263 // CPSR defs that need to be added before the remaining operands. See s_cc_out 264 // for descriptions why. 265 const MachineInstrBuilder & 266 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 267 MachineInstr *MI = &*MIB; 268 269 // Do we use a predicate? or... 270 // Are we NEON in ARM mode and have a predicate operand? If so, I know 271 // we're not predicable but add it anyways. 272 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 273 AddDefaultPred(MIB); 274 275 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 276 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 277 bool CPSR = false; 278 if (DefinesOptionalPredicate(MI, &CPSR)) { 279 if (CPSR) 280 AddDefaultT1CC(MIB); 281 else 282 AddDefaultCC(MIB); 283 } 284 return MIB; 285 } 286 287 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 288 const TargetRegisterClass* RC) { 289 unsigned ResultReg = createResultReg(RC); 290 const MCInstrDesc &II = TII.get(MachineInstOpcode); 291 292 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 293 return ResultReg; 294 } 295 296 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 297 const TargetRegisterClass *RC, 298 unsigned Op0, bool Op0IsKill) { 299 unsigned ResultReg = createResultReg(RC); 300 const MCInstrDesc &II = TII.get(MachineInstOpcode); 301 302 if (II.getNumDefs() >= 1) 303 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 304 .addReg(Op0, Op0IsKill * RegState::Kill)); 305 else { 306 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 307 .addReg(Op0, Op0IsKill * RegState::Kill)); 308 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 309 TII.get(TargetOpcode::COPY), ResultReg) 310 .addReg(II.ImplicitDefs[0])); 311 } 312 return ResultReg; 313 } 314 315 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 316 const TargetRegisterClass *RC, 317 unsigned Op0, bool Op0IsKill, 318 unsigned Op1, bool Op1IsKill) { 319 unsigned ResultReg = createResultReg(RC); 320 const MCInstrDesc &II = TII.get(MachineInstOpcode); 321 322 if (II.getNumDefs() >= 1) 323 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 324 .addReg(Op0, Op0IsKill * RegState::Kill) 325 .addReg(Op1, Op1IsKill * RegState::Kill)); 326 else { 327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 328 .addReg(Op0, Op0IsKill * RegState::Kill) 329 .addReg(Op1, Op1IsKill * RegState::Kill)); 330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 331 TII.get(TargetOpcode::COPY), ResultReg) 332 .addReg(II.ImplicitDefs[0])); 333 } 334 return ResultReg; 335 } 336 337 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 338 const TargetRegisterClass *RC, 339 unsigned Op0, bool Op0IsKill, 340 unsigned Op1, bool Op1IsKill, 341 unsigned Op2, bool Op2IsKill) { 342 unsigned ResultReg = createResultReg(RC); 343 const MCInstrDesc &II = TII.get(MachineInstOpcode); 344 345 if (II.getNumDefs() >= 1) 346 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 347 .addReg(Op0, Op0IsKill * RegState::Kill) 348 .addReg(Op1, Op1IsKill * RegState::Kill) 349 .addReg(Op2, Op2IsKill * RegState::Kill)); 350 else { 351 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 352 .addReg(Op0, Op0IsKill * RegState::Kill) 353 .addReg(Op1, Op1IsKill * RegState::Kill) 354 .addReg(Op2, Op2IsKill * RegState::Kill)); 355 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 356 TII.get(TargetOpcode::COPY), ResultReg) 357 .addReg(II.ImplicitDefs[0])); 358 } 359 return ResultReg; 360 } 361 362 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 363 const TargetRegisterClass *RC, 364 unsigned Op0, bool Op0IsKill, 365 uint64_t Imm) { 366 unsigned ResultReg = createResultReg(RC); 367 const MCInstrDesc &II = TII.get(MachineInstOpcode); 368 369 if (II.getNumDefs() >= 1) 370 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 371 .addReg(Op0, Op0IsKill * RegState::Kill) 372 .addImm(Imm)); 373 else { 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 375 .addReg(Op0, Op0IsKill * RegState::Kill) 376 .addImm(Imm)); 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 378 TII.get(TargetOpcode::COPY), ResultReg) 379 .addReg(II.ImplicitDefs[0])); 380 } 381 return ResultReg; 382 } 383 384 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 385 const TargetRegisterClass *RC, 386 unsigned Op0, bool Op0IsKill, 387 const ConstantFP *FPImm) { 388 unsigned ResultReg = createResultReg(RC); 389 const MCInstrDesc &II = TII.get(MachineInstOpcode); 390 391 if (II.getNumDefs() >= 1) 392 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 393 .addReg(Op0, Op0IsKill * RegState::Kill) 394 .addFPImm(FPImm)); 395 else { 396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 397 .addReg(Op0, Op0IsKill * RegState::Kill) 398 .addFPImm(FPImm)); 399 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 400 TII.get(TargetOpcode::COPY), ResultReg) 401 .addReg(II.ImplicitDefs[0])); 402 } 403 return ResultReg; 404 } 405 406 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 407 const TargetRegisterClass *RC, 408 unsigned Op0, bool Op0IsKill, 409 unsigned Op1, bool Op1IsKill, 410 uint64_t Imm) { 411 unsigned ResultReg = createResultReg(RC); 412 const MCInstrDesc &II = TII.get(MachineInstOpcode); 413 414 if (II.getNumDefs() >= 1) 415 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 416 .addReg(Op0, Op0IsKill * RegState::Kill) 417 .addReg(Op1, Op1IsKill * RegState::Kill) 418 .addImm(Imm)); 419 else { 420 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 421 .addReg(Op0, Op0IsKill * RegState::Kill) 422 .addReg(Op1, Op1IsKill * RegState::Kill) 423 .addImm(Imm)); 424 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 425 TII.get(TargetOpcode::COPY), ResultReg) 426 .addReg(II.ImplicitDefs[0])); 427 } 428 return ResultReg; 429 } 430 431 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 432 const TargetRegisterClass *RC, 433 uint64_t Imm) { 434 unsigned ResultReg = createResultReg(RC); 435 const MCInstrDesc &II = TII.get(MachineInstOpcode); 436 437 if (II.getNumDefs() >= 1) 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 439 .addImm(Imm)); 440 else { 441 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 442 .addImm(Imm)); 443 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 444 TII.get(TargetOpcode::COPY), ResultReg) 445 .addReg(II.ImplicitDefs[0])); 446 } 447 return ResultReg; 448 } 449 450 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 451 const TargetRegisterClass *RC, 452 uint64_t Imm1, uint64_t Imm2) { 453 unsigned ResultReg = createResultReg(RC); 454 const MCInstrDesc &II = TII.get(MachineInstOpcode); 455 456 if (II.getNumDefs() >= 1) 457 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 458 .addImm(Imm1).addImm(Imm2)); 459 else { 460 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 461 .addImm(Imm1).addImm(Imm2)); 462 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 463 TII.get(TargetOpcode::COPY), 464 ResultReg) 465 .addReg(II.ImplicitDefs[0])); 466 } 467 return ResultReg; 468 } 469 470 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 471 unsigned Op0, bool Op0IsKill, 472 uint32_t Idx) { 473 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 474 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 475 "Cannot yet extract from physregs"); 476 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 477 DL, TII.get(TargetOpcode::COPY), ResultReg) 478 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 479 return ResultReg; 480 } 481 482 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 483 // checks from the various callers. 484 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 485 if (VT == MVT::f64) return 0; 486 487 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 488 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 489 TII.get(ARM::VMOVRS), MoveReg) 490 .addReg(SrcReg)); 491 return MoveReg; 492 } 493 494 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 495 if (VT == MVT::i64) return 0; 496 497 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 498 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 499 TII.get(ARM::VMOVSR), MoveReg) 500 .addReg(SrcReg)); 501 return MoveReg; 502 } 503 504 // For double width floating point we need to materialize two constants 505 // (the high and the low) into integer registers then use a move to get 506 // the combined constant into an FP reg. 507 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 508 const APFloat Val = CFP->getValueAPF(); 509 bool is64bit = VT == MVT::f64; 510 511 // This checks to see if we can use VFP3 instructions to materialize 512 // a constant, otherwise we have to go through the constant pool. 513 if (TLI.isFPImmLegal(Val, VT)) { 514 int Imm; 515 unsigned Opc; 516 if (is64bit) { 517 Imm = ARM_AM::getFP64Imm(Val); 518 Opc = ARM::FCONSTD; 519 } else { 520 Imm = ARM_AM::getFP32Imm(Val); 521 Opc = ARM::FCONSTS; 522 } 523 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 524 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 525 DestReg) 526 .addImm(Imm)); 527 return DestReg; 528 } 529 530 // Require VFP2 for loading fp constants. 531 if (!Subtarget->hasVFP2()) return false; 532 533 // MachineConstantPool wants an explicit alignment. 534 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 535 if (Align == 0) { 536 // TODO: Figure out if this is correct. 537 Align = TD.getTypeAllocSize(CFP->getType()); 538 } 539 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 540 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 541 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 542 543 // The extra reg is for addrmode5. 544 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 545 DestReg) 546 .addConstantPoolIndex(Idx) 547 .addReg(0)); 548 return DestReg; 549 } 550 551 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 552 553 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 554 return false; 555 556 // If we can do this in a single instruction without a constant pool entry 557 // do so now. 558 const ConstantInt *CI = cast<ConstantInt>(C); 559 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 560 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 561 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 562 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 563 TII.get(Opc), ImmReg) 564 .addImm(CI->getZExtValue())); 565 return ImmReg; 566 } 567 568 // Use MVN to emit negative constants. 569 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 570 unsigned Imm = (unsigned)~(CI->getSExtValue()); 571 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 572 (ARM_AM::getSOImmVal(Imm) != -1); 573 if (UseImm) { 574 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 575 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 576 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 577 TII.get(Opc), ImmReg) 578 .addImm(Imm)); 579 return ImmReg; 580 } 581 } 582 583 // Load from constant pool. For now 32-bit only. 584 if (VT != MVT::i32) 585 return false; 586 587 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 588 589 // MachineConstantPool wants an explicit alignment. 590 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 591 if (Align == 0) { 592 // TODO: Figure out if this is correct. 593 Align = TD.getTypeAllocSize(C->getType()); 594 } 595 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 596 597 if (isThumb2) 598 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 599 TII.get(ARM::t2LDRpci), DestReg) 600 .addConstantPoolIndex(Idx)); 601 else 602 // The extra immediate is for addrmode2. 603 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 604 TII.get(ARM::LDRcp), DestReg) 605 .addConstantPoolIndex(Idx) 606 .addImm(0)); 607 608 return DestReg; 609 } 610 611 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 612 // For now 32-bit only. 613 if (VT != MVT::i32) return 0; 614 615 Reloc::Model RelocM = TM.getRelocationModel(); 616 617 // TODO: Need more magic for ARM PIC. 618 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 619 620 // MachineConstantPool wants an explicit alignment. 621 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 622 if (Align == 0) { 623 // TODO: Figure out if this is correct. 624 Align = TD.getTypeAllocSize(GV->getType()); 625 } 626 627 // Grab index. 628 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 629 unsigned Id = AFI->createPICLabelUId(); 630 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 631 ARMCP::CPValue, 632 PCAdj); 633 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 634 635 // Load value. 636 MachineInstrBuilder MIB; 637 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 638 if (isThumb2) { 639 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 640 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 641 .addConstantPoolIndex(Idx); 642 if (RelocM == Reloc::PIC_) 643 MIB.addImm(Id); 644 } else { 645 // The extra immediate is for addrmode2. 646 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 647 DestReg) 648 .addConstantPoolIndex(Idx) 649 .addImm(0); 650 } 651 AddOptionalDefs(MIB); 652 653 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 654 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 655 if (isThumb2) 656 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 657 TII.get(ARM::t2LDRi12), NewDestReg) 658 .addReg(DestReg) 659 .addImm(0); 660 else 661 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 662 NewDestReg) 663 .addReg(DestReg) 664 .addImm(0); 665 DestReg = NewDestReg; 666 AddOptionalDefs(MIB); 667 } 668 669 return DestReg; 670 } 671 672 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 673 EVT VT = TLI.getValueType(C->getType(), true); 674 675 // Only handle simple types. 676 if (!VT.isSimple()) return 0; 677 678 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 679 return ARMMaterializeFP(CFP, VT); 680 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 681 return ARMMaterializeGV(GV, VT); 682 else if (isa<ConstantInt>(C)) 683 return ARMMaterializeInt(C, VT); 684 685 return 0; 686 } 687 688 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 689 690 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 691 // Don't handle dynamic allocas. 692 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 693 694 MVT VT; 695 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 696 697 DenseMap<const AllocaInst*, int>::iterator SI = 698 FuncInfo.StaticAllocaMap.find(AI); 699 700 // This will get lowered later into the correct offsets and registers 701 // via rewriteXFrameIndex. 702 if (SI != FuncInfo.StaticAllocaMap.end()) { 703 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 704 unsigned ResultReg = createResultReg(RC); 705 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 706 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 707 TII.get(Opc), ResultReg) 708 .addFrameIndex(SI->second) 709 .addImm(0)); 710 return ResultReg; 711 } 712 713 return 0; 714 } 715 716 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 717 EVT evt = TLI.getValueType(Ty, true); 718 719 // Only handle simple types. 720 if (evt == MVT::Other || !evt.isSimple()) return false; 721 VT = evt.getSimpleVT(); 722 723 // Handle all legal types, i.e. a register that will directly hold this 724 // value. 725 return TLI.isTypeLegal(VT); 726 } 727 728 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 729 if (isTypeLegal(Ty, VT)) return true; 730 731 // If this is a type than can be sign or zero-extended to a basic operation 732 // go ahead and accept it now. 733 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 734 return true; 735 736 return false; 737 } 738 739 // Computes the address to get to an object. 740 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 741 // Some boilerplate from the X86 FastISel. 742 const User *U = NULL; 743 unsigned Opcode = Instruction::UserOp1; 744 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 745 // Don't walk into other basic blocks unless the object is an alloca from 746 // another block, otherwise it may not have a virtual register assigned. 747 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 748 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 749 Opcode = I->getOpcode(); 750 U = I; 751 } 752 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 753 Opcode = C->getOpcode(); 754 U = C; 755 } 756 757 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 758 if (Ty->getAddressSpace() > 255) 759 // Fast instruction selection doesn't support the special 760 // address spaces. 761 return false; 762 763 switch (Opcode) { 764 default: 765 break; 766 case Instruction::BitCast: { 767 // Look through bitcasts. 768 return ARMComputeAddress(U->getOperand(0), Addr); 769 } 770 case Instruction::IntToPtr: { 771 // Look past no-op inttoptrs. 772 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 773 return ARMComputeAddress(U->getOperand(0), Addr); 774 break; 775 } 776 case Instruction::PtrToInt: { 777 // Look past no-op ptrtoints. 778 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 779 return ARMComputeAddress(U->getOperand(0), Addr); 780 break; 781 } 782 case Instruction::GetElementPtr: { 783 Address SavedAddr = Addr; 784 int TmpOffset = Addr.Offset; 785 786 // Iterate through the GEP folding the constants into offsets where 787 // we can. 788 gep_type_iterator GTI = gep_type_begin(U); 789 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 790 i != e; ++i, ++GTI) { 791 const Value *Op = *i; 792 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 793 const StructLayout *SL = TD.getStructLayout(STy); 794 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 795 TmpOffset += SL->getElementOffset(Idx); 796 } else { 797 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 798 for (;;) { 799 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 800 // Constant-offset addressing. 801 TmpOffset += CI->getSExtValue() * S; 802 break; 803 } 804 if (isa<AddOperator>(Op) && 805 (!isa<Instruction>(Op) || 806 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 807 == FuncInfo.MBB) && 808 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 809 // An add (in the same block) with a constant operand. Fold the 810 // constant. 811 ConstantInt *CI = 812 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 813 TmpOffset += CI->getSExtValue() * S; 814 // Iterate on the other operand. 815 Op = cast<AddOperator>(Op)->getOperand(0); 816 continue; 817 } 818 // Unsupported 819 goto unsupported_gep; 820 } 821 } 822 } 823 824 // Try to grab the base operand now. 825 Addr.Offset = TmpOffset; 826 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 827 828 // We failed, restore everything and try the other options. 829 Addr = SavedAddr; 830 831 unsupported_gep: 832 break; 833 } 834 case Instruction::Alloca: { 835 const AllocaInst *AI = cast<AllocaInst>(Obj); 836 DenseMap<const AllocaInst*, int>::iterator SI = 837 FuncInfo.StaticAllocaMap.find(AI); 838 if (SI != FuncInfo.StaticAllocaMap.end()) { 839 Addr.BaseType = Address::FrameIndexBase; 840 Addr.Base.FI = SI->second; 841 return true; 842 } 843 break; 844 } 845 } 846 847 // Materialize the global variable's address into a reg which can 848 // then be used later to load the variable. 849 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 850 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 851 if (Tmp == 0) return false; 852 853 Addr.Base.Reg = Tmp; 854 return true; 855 } 856 857 // Try to get this in a register if nothing else has worked. 858 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 859 return Addr.Base.Reg != 0; 860 } 861 862 void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 863 864 assert(VT.isSimple() && "Non-simple types are invalid here!"); 865 866 bool needsLowering = false; 867 switch (VT.getSimpleVT().SimpleTy) { 868 default: 869 assert(false && "Unhandled load/store type!"); 870 break; 871 case MVT::i1: 872 case MVT::i8: 873 case MVT::i16: 874 case MVT::i32: 875 if (!useAM3) { 876 // Integer loads/stores handle 12-bit offsets. 877 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 878 // Handle negative offsets. 879 if (needsLowering && isThumb2) 880 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 881 Addr.Offset > -256); 882 } else { 883 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 884 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 885 } 886 break; 887 case MVT::f32: 888 case MVT::f64: 889 // Floating point operands handle 8-bit offsets. 890 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 891 break; 892 } 893 894 // If this is a stack pointer and the offset needs to be simplified then 895 // put the alloca address into a register, set the base type back to 896 // register and continue. This should almost never happen. 897 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 898 TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass : 899 ARM::GPRRegisterClass; 900 unsigned ResultReg = createResultReg(RC); 901 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 902 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 903 TII.get(Opc), ResultReg) 904 .addFrameIndex(Addr.Base.FI) 905 .addImm(0)); 906 Addr.Base.Reg = ResultReg; 907 Addr.BaseType = Address::RegBase; 908 } 909 910 // Since the offset is too large for the load/store instruction 911 // get the reg+offset into a register. 912 if (needsLowering) { 913 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 914 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 915 Addr.Offset = 0; 916 } 917 } 918 919 void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 920 const MachineInstrBuilder &MIB, 921 unsigned Flags, bool useAM3) { 922 // addrmode5 output depends on the selection dag addressing dividing the 923 // offset by 4 that it then later multiplies. Do this here as well. 924 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 925 VT.getSimpleVT().SimpleTy == MVT::f64) 926 Addr.Offset /= 4; 927 928 // Frame base works a bit differently. Handle it separately. 929 if (Addr.BaseType == Address::FrameIndexBase) { 930 int FI = Addr.Base.FI; 931 int Offset = Addr.Offset; 932 MachineMemOperand *MMO = 933 FuncInfo.MF->getMachineMemOperand( 934 MachinePointerInfo::getFixedStack(FI, Offset), 935 Flags, 936 MFI.getObjectSize(FI), 937 MFI.getObjectAlignment(FI)); 938 // Now add the rest of the operands. 939 MIB.addFrameIndex(FI); 940 941 // ARM halfword load/stores and signed byte loads need an additional 942 // operand. 943 if (useAM3) { 944 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 945 MIB.addReg(0); 946 MIB.addImm(Imm); 947 } else { 948 MIB.addImm(Addr.Offset); 949 } 950 MIB.addMemOperand(MMO); 951 } else { 952 // Now add the rest of the operands. 953 MIB.addReg(Addr.Base.Reg); 954 955 // ARM halfword load/stores and signed byte loads need an additional 956 // operand. 957 if (useAM3) { 958 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 959 MIB.addReg(0); 960 MIB.addImm(Imm); 961 } else { 962 MIB.addImm(Addr.Offset); 963 } 964 } 965 AddOptionalDefs(MIB); 966 } 967 968 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 969 bool isZExt = true, bool allocReg = true) { 970 assert(VT.isSimple() && "Non-simple types are invalid here!"); 971 unsigned Opc; 972 bool useAM3 = false; 973 TargetRegisterClass *RC; 974 switch (VT.getSimpleVT().SimpleTy) { 975 // This is mostly going to be Neon/vector support. 976 default: return false; 977 case MVT::i1: 978 case MVT::i8: 979 if (isThumb2) { 980 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 981 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 982 else 983 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 984 } else { 985 if (isZExt) { 986 Opc = ARM::LDRBi12; 987 } else { 988 Opc = ARM::LDRSB; 989 useAM3 = true; 990 } 991 } 992 RC = ARM::GPRRegisterClass; 993 break; 994 case MVT::i16: 995 if (isThumb2) { 996 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 997 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 998 else 999 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1000 } else { 1001 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1002 useAM3 = true; 1003 } 1004 RC = ARM::GPRRegisterClass; 1005 break; 1006 case MVT::i32: 1007 if (isThumb2) { 1008 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1009 Opc = ARM::t2LDRi8; 1010 else 1011 Opc = ARM::t2LDRi12; 1012 } else { 1013 Opc = ARM::LDRi12; 1014 } 1015 RC = ARM::GPRRegisterClass; 1016 break; 1017 case MVT::f32: 1018 Opc = ARM::VLDRS; 1019 RC = TLI.getRegClassFor(VT); 1020 break; 1021 case MVT::f64: 1022 Opc = ARM::VLDRD; 1023 RC = TLI.getRegClassFor(VT); 1024 break; 1025 } 1026 // Simplify this down to something we can handle. 1027 ARMSimplifyAddress(Addr, VT, useAM3); 1028 1029 // Create the base instruction, then add the operands. 1030 if (allocReg) 1031 ResultReg = createResultReg(RC); 1032 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1033 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1034 TII.get(Opc), ResultReg); 1035 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1036 return true; 1037 } 1038 1039 bool ARMFastISel::SelectLoad(const Instruction *I) { 1040 // Atomic loads need special handling. 1041 if (cast<LoadInst>(I)->isAtomic()) 1042 return false; 1043 1044 // Verify we have a legal type before going any further. 1045 MVT VT; 1046 if (!isLoadTypeLegal(I->getType(), VT)) 1047 return false; 1048 1049 // See if we can handle this address. 1050 Address Addr; 1051 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1052 1053 unsigned ResultReg; 1054 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 1055 UpdateValueMap(I, ResultReg); 1056 return true; 1057 } 1058 1059 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1060 unsigned Alignment) { 1061 unsigned StrOpc; 1062 bool useAM3 = false; 1063 switch (VT.getSimpleVT().SimpleTy) { 1064 // This is mostly going to be Neon/vector support. 1065 default: return false; 1066 case MVT::i1: { 1067 unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass : 1068 ARM::GPRRegisterClass); 1069 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1070 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1071 TII.get(Opc), Res) 1072 .addReg(SrcReg).addImm(1)); 1073 SrcReg = Res; 1074 } // Fallthrough here. 1075 case MVT::i8: 1076 if (isThumb2) { 1077 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1078 StrOpc = ARM::t2STRBi8; 1079 else 1080 StrOpc = ARM::t2STRBi12; 1081 } else { 1082 StrOpc = ARM::STRBi12; 1083 } 1084 break; 1085 case MVT::i16: 1086 if (isThumb2) { 1087 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1088 StrOpc = ARM::t2STRHi8; 1089 else 1090 StrOpc = ARM::t2STRHi12; 1091 } else { 1092 StrOpc = ARM::STRH; 1093 useAM3 = true; 1094 } 1095 break; 1096 case MVT::i32: 1097 if (isThumb2) { 1098 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1099 StrOpc = ARM::t2STRi8; 1100 else 1101 StrOpc = ARM::t2STRi12; 1102 } else { 1103 StrOpc = ARM::STRi12; 1104 } 1105 break; 1106 case MVT::f32: 1107 if (!Subtarget->hasVFP2()) return false; 1108 StrOpc = ARM::VSTRS; 1109 // Unaligned stores need special handling. 1110 if (Alignment && Alignment < 4) { 1111 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1112 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1113 TII.get(ARM::VMOVRS), MoveReg) 1114 .addReg(SrcReg)); 1115 SrcReg = MoveReg; 1116 VT = MVT::i32; 1117 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1118 } 1119 break; 1120 case MVT::f64: 1121 if (!Subtarget->hasVFP2()) return false; 1122 // FIXME: Unaligned stores need special handling. 1123 if (Alignment && Alignment < 8) { 1124 return false; 1125 } 1126 StrOpc = ARM::VSTRD; 1127 break; 1128 } 1129 // Simplify this down to something we can handle. 1130 ARMSimplifyAddress(Addr, VT, useAM3); 1131 1132 // Create the base instruction, then add the operands. 1133 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1134 TII.get(StrOpc)) 1135 .addReg(SrcReg); 1136 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1137 return true; 1138 } 1139 1140 bool ARMFastISel::SelectStore(const Instruction *I) { 1141 Value *Op0 = I->getOperand(0); 1142 unsigned SrcReg = 0; 1143 1144 // Atomic stores need special handling. 1145 if (cast<StoreInst>(I)->isAtomic()) 1146 return false; 1147 1148 // Verify we have a legal type before going any further. 1149 MVT VT; 1150 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1151 return false; 1152 1153 // Get the value to be stored into a register. 1154 SrcReg = getRegForValue(Op0); 1155 if (SrcReg == 0) return false; 1156 1157 // See if we can handle this address. 1158 Address Addr; 1159 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1160 return false; 1161 1162 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1163 return false; 1164 return true; 1165 } 1166 1167 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1168 switch (Pred) { 1169 // Needs two compares... 1170 case CmpInst::FCMP_ONE: 1171 case CmpInst::FCMP_UEQ: 1172 default: 1173 // AL is our "false" for now. The other two need more compares. 1174 return ARMCC::AL; 1175 case CmpInst::ICMP_EQ: 1176 case CmpInst::FCMP_OEQ: 1177 return ARMCC::EQ; 1178 case CmpInst::ICMP_SGT: 1179 case CmpInst::FCMP_OGT: 1180 return ARMCC::GT; 1181 case CmpInst::ICMP_SGE: 1182 case CmpInst::FCMP_OGE: 1183 return ARMCC::GE; 1184 case CmpInst::ICMP_UGT: 1185 case CmpInst::FCMP_UGT: 1186 return ARMCC::HI; 1187 case CmpInst::FCMP_OLT: 1188 return ARMCC::MI; 1189 case CmpInst::ICMP_ULE: 1190 case CmpInst::FCMP_OLE: 1191 return ARMCC::LS; 1192 case CmpInst::FCMP_ORD: 1193 return ARMCC::VC; 1194 case CmpInst::FCMP_UNO: 1195 return ARMCC::VS; 1196 case CmpInst::FCMP_UGE: 1197 return ARMCC::PL; 1198 case CmpInst::ICMP_SLT: 1199 case CmpInst::FCMP_ULT: 1200 return ARMCC::LT; 1201 case CmpInst::ICMP_SLE: 1202 case CmpInst::FCMP_ULE: 1203 return ARMCC::LE; 1204 case CmpInst::FCMP_UNE: 1205 case CmpInst::ICMP_NE: 1206 return ARMCC::NE; 1207 case CmpInst::ICMP_UGE: 1208 return ARMCC::HS; 1209 case CmpInst::ICMP_ULT: 1210 return ARMCC::LO; 1211 } 1212 } 1213 1214 bool ARMFastISel::SelectBranch(const Instruction *I) { 1215 const BranchInst *BI = cast<BranchInst>(I); 1216 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1217 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1218 1219 // Simple branch support. 1220 1221 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1222 // behavior. 1223 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1224 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1225 1226 // Get the compare predicate. 1227 // Try to take advantage of fallthrough opportunities. 1228 CmpInst::Predicate Predicate = CI->getPredicate(); 1229 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1230 std::swap(TBB, FBB); 1231 Predicate = CmpInst::getInversePredicate(Predicate); 1232 } 1233 1234 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1235 1236 // We may not handle every CC for now. 1237 if (ARMPred == ARMCC::AL) return false; 1238 1239 // Emit the compare. 1240 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1241 return false; 1242 1243 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1244 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1245 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1246 FastEmitBranch(FBB, DL); 1247 FuncInfo.MBB->addSuccessor(TBB); 1248 return true; 1249 } 1250 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1251 MVT SourceVT; 1252 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1253 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1254 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1255 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1256 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1257 TII.get(TstOpc)) 1258 .addReg(OpReg).addImm(1)); 1259 1260 unsigned CCMode = ARMCC::NE; 1261 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1262 std::swap(TBB, FBB); 1263 CCMode = ARMCC::EQ; 1264 } 1265 1266 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1267 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1268 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1269 1270 FastEmitBranch(FBB, DL); 1271 FuncInfo.MBB->addSuccessor(TBB); 1272 return true; 1273 } 1274 } else if (const ConstantInt *CI = 1275 dyn_cast<ConstantInt>(BI->getCondition())) { 1276 uint64_t Imm = CI->getZExtValue(); 1277 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1278 FastEmitBranch(Target, DL); 1279 return true; 1280 } 1281 1282 unsigned CmpReg = getRegForValue(BI->getCondition()); 1283 if (CmpReg == 0) return false; 1284 1285 // We've been divorced from our compare! Our block was split, and 1286 // now our compare lives in a predecessor block. We musn't 1287 // re-compare here, as the children of the compare aren't guaranteed 1288 // live across the block boundary (we *could* check for this). 1289 // Regardless, the compare has been done in the predecessor block, 1290 // and it left a value for us in a virtual register. Ergo, we test 1291 // the one-bit value left in the virtual register. 1292 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1293 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1294 .addReg(CmpReg).addImm(1)); 1295 1296 unsigned CCMode = ARMCC::NE; 1297 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1298 std::swap(TBB, FBB); 1299 CCMode = ARMCC::EQ; 1300 } 1301 1302 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1303 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1304 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1305 FastEmitBranch(FBB, DL); 1306 FuncInfo.MBB->addSuccessor(TBB); 1307 return true; 1308 } 1309 1310 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1311 bool isZExt) { 1312 Type *Ty = Src1Value->getType(); 1313 EVT SrcVT = TLI.getValueType(Ty, true); 1314 if (!SrcVT.isSimple()) return false; 1315 1316 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1317 if (isFloat && !Subtarget->hasVFP2()) 1318 return false; 1319 1320 // Check to see if the 2nd operand is a constant that we can encode directly 1321 // in the compare. 1322 int Imm = 0; 1323 bool UseImm = false; 1324 bool isNegativeImm = false; 1325 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1326 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1327 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1328 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1329 SrcVT == MVT::i1) { 1330 const APInt &CIVal = ConstInt->getValue(); 1331 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1332 if (Imm < 0) { 1333 isNegativeImm = true; 1334 Imm = -Imm; 1335 } 1336 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1337 (ARM_AM::getSOImmVal(Imm) != -1); 1338 } 1339 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1340 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1341 if (ConstFP->isZero() && !ConstFP->isNegative()) 1342 UseImm = true; 1343 } 1344 1345 unsigned CmpOpc; 1346 bool isICmp = true; 1347 bool needsExt = false; 1348 switch (SrcVT.getSimpleVT().SimpleTy) { 1349 default: return false; 1350 // TODO: Verify compares. 1351 case MVT::f32: 1352 isICmp = false; 1353 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1354 break; 1355 case MVT::f64: 1356 isICmp = false; 1357 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1358 break; 1359 case MVT::i1: 1360 case MVT::i8: 1361 case MVT::i16: 1362 needsExt = true; 1363 // Intentional fall-through. 1364 case MVT::i32: 1365 if (isThumb2) { 1366 if (!UseImm) 1367 CmpOpc = ARM::t2CMPrr; 1368 else 1369 CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri; 1370 } else { 1371 if (!UseImm) 1372 CmpOpc = ARM::CMPrr; 1373 else 1374 CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri; 1375 } 1376 break; 1377 } 1378 1379 unsigned SrcReg1 = getRegForValue(Src1Value); 1380 if (SrcReg1 == 0) return false; 1381 1382 unsigned SrcReg2 = 0; 1383 if (!UseImm) { 1384 SrcReg2 = getRegForValue(Src2Value); 1385 if (SrcReg2 == 0) return false; 1386 } 1387 1388 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1389 if (needsExt) { 1390 unsigned ResultReg; 1391 ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1392 if (ResultReg == 0) return false; 1393 SrcReg1 = ResultReg; 1394 if (!UseImm) { 1395 ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1396 if (ResultReg == 0) return false; 1397 SrcReg2 = ResultReg; 1398 } 1399 } 1400 1401 if (!UseImm) { 1402 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1403 TII.get(CmpOpc)) 1404 .addReg(SrcReg1).addReg(SrcReg2)); 1405 } else { 1406 MachineInstrBuilder MIB; 1407 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1408 .addReg(SrcReg1); 1409 1410 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1411 if (isICmp) 1412 MIB.addImm(Imm); 1413 AddOptionalDefs(MIB); 1414 } 1415 1416 // For floating point we need to move the result to a comparison register 1417 // that we can then use for branches. 1418 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1420 TII.get(ARM::FMSTAT))); 1421 return true; 1422 } 1423 1424 bool ARMFastISel::SelectCmp(const Instruction *I) { 1425 const CmpInst *CI = cast<CmpInst>(I); 1426 Type *Ty = CI->getOperand(0)->getType(); 1427 1428 // Get the compare predicate. 1429 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1430 1431 // We may not handle every CC for now. 1432 if (ARMPred == ARMCC::AL) return false; 1433 1434 // Emit the compare. 1435 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1436 return false; 1437 1438 // Now set a register based on the comparison. Explicitly set the predicates 1439 // here. 1440 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1441 TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass 1442 : ARM::GPRRegisterClass; 1443 unsigned DestReg = createResultReg(RC); 1444 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1445 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1446 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1447 unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; 1448 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1449 .addReg(ZeroReg).addImm(1) 1450 .addImm(ARMPred).addReg(CondReg); 1451 1452 UpdateValueMap(I, DestReg); 1453 return true; 1454 } 1455 1456 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1457 // Make sure we have VFP and that we're extending float to double. 1458 if (!Subtarget->hasVFP2()) return false; 1459 1460 Value *V = I->getOperand(0); 1461 if (!I->getType()->isDoubleTy() || 1462 !V->getType()->isFloatTy()) return false; 1463 1464 unsigned Op = getRegForValue(V); 1465 if (Op == 0) return false; 1466 1467 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1468 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1469 TII.get(ARM::VCVTDS), Result) 1470 .addReg(Op)); 1471 UpdateValueMap(I, Result); 1472 return true; 1473 } 1474 1475 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1476 // Make sure we have VFP and that we're truncating double to float. 1477 if (!Subtarget->hasVFP2()) return false; 1478 1479 Value *V = I->getOperand(0); 1480 if (!(I->getType()->isFloatTy() && 1481 V->getType()->isDoubleTy())) return false; 1482 1483 unsigned Op = getRegForValue(V); 1484 if (Op == 0) return false; 1485 1486 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1487 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1488 TII.get(ARM::VCVTSD), Result) 1489 .addReg(Op)); 1490 UpdateValueMap(I, Result); 1491 return true; 1492 } 1493 1494 bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1495 // Make sure we have VFP. 1496 if (!Subtarget->hasVFP2()) return false; 1497 1498 MVT DstVT; 1499 Type *Ty = I->getType(); 1500 if (!isTypeLegal(Ty, DstVT)) 1501 return false; 1502 1503 Value *Src = I->getOperand(0); 1504 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1505 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1506 return false; 1507 1508 unsigned SrcReg = getRegForValue(Src); 1509 if (SrcReg == 0) return false; 1510 1511 // Handle sign-extension. 1512 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1513 EVT DestVT = MVT::i32; 1514 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, /*isZExt*/ false); 1515 if (ResultReg == 0) return false; 1516 SrcReg = ResultReg; 1517 } 1518 1519 // The conversion routine works on fp-reg to fp-reg and the operand above 1520 // was an integer, move it to the fp registers if possible. 1521 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1522 if (FP == 0) return false; 1523 1524 unsigned Opc; 1525 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1526 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1527 else return false; 1528 1529 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1530 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1531 ResultReg) 1532 .addReg(FP)); 1533 UpdateValueMap(I, ResultReg); 1534 return true; 1535 } 1536 1537 bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1538 // Make sure we have VFP. 1539 if (!Subtarget->hasVFP2()) return false; 1540 1541 MVT DstVT; 1542 Type *RetTy = I->getType(); 1543 if (!isTypeLegal(RetTy, DstVT)) 1544 return false; 1545 1546 unsigned Op = getRegForValue(I->getOperand(0)); 1547 if (Op == 0) return false; 1548 1549 unsigned Opc; 1550 Type *OpTy = I->getOperand(0)->getType(); 1551 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1552 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1553 else return false; 1554 1555 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1556 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1557 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1558 ResultReg) 1559 .addReg(Op)); 1560 1561 // This result needs to be in an integer register, but the conversion only 1562 // takes place in fp-regs. 1563 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1564 if (IntReg == 0) return false; 1565 1566 UpdateValueMap(I, IntReg); 1567 return true; 1568 } 1569 1570 bool ARMFastISel::SelectSelect(const Instruction *I) { 1571 MVT VT; 1572 if (!isTypeLegal(I->getType(), VT)) 1573 return false; 1574 1575 // Things need to be register sized for register moves. 1576 if (VT != MVT::i32) return false; 1577 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1578 1579 unsigned CondReg = getRegForValue(I->getOperand(0)); 1580 if (CondReg == 0) return false; 1581 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1582 if (Op1Reg == 0) return false; 1583 1584 // Check to see if we can use an immediate in the conditional move. 1585 int Imm = 0; 1586 bool UseImm = false; 1587 bool isNegativeImm = false; 1588 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1589 assert (VT == MVT::i32 && "Expecting an i32."); 1590 Imm = (int)ConstInt->getValue().getZExtValue(); 1591 if (Imm < 0) { 1592 isNegativeImm = true; 1593 Imm = ~Imm; 1594 } 1595 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1596 (ARM_AM::getSOImmVal(Imm) != -1); 1597 } 1598 1599 unsigned Op2Reg = 0; 1600 if (!UseImm) { 1601 Op2Reg = getRegForValue(I->getOperand(2)); 1602 if (Op2Reg == 0) return false; 1603 } 1604 1605 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1606 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1607 .addReg(CondReg).addImm(0)); 1608 1609 unsigned MovCCOpc; 1610 if (!UseImm) { 1611 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1612 } else { 1613 if (!isNegativeImm) { 1614 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1615 } else { 1616 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1617 } 1618 } 1619 unsigned ResultReg = createResultReg(RC); 1620 if (!UseImm) 1621 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1622 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1623 else 1624 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1625 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1626 UpdateValueMap(I, ResultReg); 1627 return true; 1628 } 1629 1630 bool ARMFastISel::SelectSDiv(const Instruction *I) { 1631 MVT VT; 1632 Type *Ty = I->getType(); 1633 if (!isTypeLegal(Ty, VT)) 1634 return false; 1635 1636 // If we have integer div support we should have selected this automagically. 1637 // In case we have a real miss go ahead and return false and we'll pick 1638 // it up later. 1639 if (Subtarget->hasDivide()) return false; 1640 1641 // Otherwise emit a libcall. 1642 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1643 if (VT == MVT::i8) 1644 LC = RTLIB::SDIV_I8; 1645 else if (VT == MVT::i16) 1646 LC = RTLIB::SDIV_I16; 1647 else if (VT == MVT::i32) 1648 LC = RTLIB::SDIV_I32; 1649 else if (VT == MVT::i64) 1650 LC = RTLIB::SDIV_I64; 1651 else if (VT == MVT::i128) 1652 LC = RTLIB::SDIV_I128; 1653 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1654 1655 return ARMEmitLibcall(I, LC); 1656 } 1657 1658 bool ARMFastISel::SelectSRem(const Instruction *I) { 1659 MVT VT; 1660 Type *Ty = I->getType(); 1661 if (!isTypeLegal(Ty, VT)) 1662 return false; 1663 1664 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1665 if (VT == MVT::i8) 1666 LC = RTLIB::SREM_I8; 1667 else if (VT == MVT::i16) 1668 LC = RTLIB::SREM_I16; 1669 else if (VT == MVT::i32) 1670 LC = RTLIB::SREM_I32; 1671 else if (VT == MVT::i64) 1672 LC = RTLIB::SREM_I64; 1673 else if (VT == MVT::i128) 1674 LC = RTLIB::SREM_I128; 1675 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1676 1677 return ARMEmitLibcall(I, LC); 1678 } 1679 1680 bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1681 EVT VT = TLI.getValueType(I->getType(), true); 1682 1683 // We can get here in the case when we want to use NEON for our fp 1684 // operations, but can't figure out how to. Just use the vfp instructions 1685 // if we have them. 1686 // FIXME: It'd be nice to use NEON instructions. 1687 Type *Ty = I->getType(); 1688 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1689 if (isFloat && !Subtarget->hasVFP2()) 1690 return false; 1691 1692 unsigned Opc; 1693 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1694 switch (ISDOpcode) { 1695 default: return false; 1696 case ISD::FADD: 1697 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1698 break; 1699 case ISD::FSUB: 1700 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1701 break; 1702 case ISD::FMUL: 1703 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1704 break; 1705 } 1706 unsigned Op1 = getRegForValue(I->getOperand(0)); 1707 if (Op1 == 0) return false; 1708 1709 unsigned Op2 = getRegForValue(I->getOperand(1)); 1710 if (Op2 == 0) return false; 1711 1712 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1713 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1714 TII.get(Opc), ResultReg) 1715 .addReg(Op1).addReg(Op2)); 1716 UpdateValueMap(I, ResultReg); 1717 return true; 1718 } 1719 1720 // Call Handling Code 1721 1722 // This is largely taken directly from CCAssignFnForNode - we don't support 1723 // varargs in FastISel so that part has been removed. 1724 // TODO: We may not support all of this. 1725 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1726 switch (CC) { 1727 default: 1728 llvm_unreachable("Unsupported calling convention"); 1729 case CallingConv::Fast: 1730 // Ignore fastcc. Silence compiler warnings. 1731 (void)RetFastCC_ARM_APCS; 1732 (void)FastCC_ARM_APCS; 1733 // Fallthrough 1734 case CallingConv::C: 1735 // Use target triple & subtarget features to do actual dispatch. 1736 if (Subtarget->isAAPCS_ABI()) { 1737 if (Subtarget->hasVFP2() && 1738 TM.Options.FloatABIType == FloatABI::Hard) 1739 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1740 else 1741 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1742 } else 1743 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1744 case CallingConv::ARM_AAPCS_VFP: 1745 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1746 case CallingConv::ARM_AAPCS: 1747 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1748 case CallingConv::ARM_APCS: 1749 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1750 } 1751 } 1752 1753 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1754 SmallVectorImpl<unsigned> &ArgRegs, 1755 SmallVectorImpl<MVT> &ArgVTs, 1756 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1757 SmallVectorImpl<unsigned> &RegArgs, 1758 CallingConv::ID CC, 1759 unsigned &NumBytes) { 1760 SmallVector<CCValAssign, 16> ArgLocs; 1761 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1762 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1763 1764 // Get a count of how many bytes are to be pushed on the stack. 1765 NumBytes = CCInfo.getNextStackOffset(); 1766 1767 // Issue CALLSEQ_START 1768 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1769 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1770 TII.get(AdjStackDown)) 1771 .addImm(NumBytes)); 1772 1773 // Process the args. 1774 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1775 CCValAssign &VA = ArgLocs[i]; 1776 unsigned Arg = ArgRegs[VA.getValNo()]; 1777 MVT ArgVT = ArgVTs[VA.getValNo()]; 1778 1779 // We don't handle NEON/vector parameters yet. 1780 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1781 return false; 1782 1783 // Handle arg promotion, etc. 1784 switch (VA.getLocInfo()) { 1785 case CCValAssign::Full: break; 1786 case CCValAssign::SExt: { 1787 MVT DestVT = VA.getLocVT(); 1788 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1789 /*isZExt*/false); 1790 assert (ResultReg != 0 && "Failed to emit a sext"); 1791 Arg = ResultReg; 1792 ArgVT = DestVT; 1793 break; 1794 } 1795 case CCValAssign::AExt: 1796 // Intentional fall-through. Handle AExt and ZExt. 1797 case CCValAssign::ZExt: { 1798 MVT DestVT = VA.getLocVT(); 1799 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1800 /*isZExt*/true); 1801 assert (ResultReg != 0 && "Failed to emit a sext"); 1802 Arg = ResultReg; 1803 ArgVT = DestVT; 1804 break; 1805 } 1806 case CCValAssign::BCvt: { 1807 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1808 /*TODO: Kill=*/false); 1809 assert(BC != 0 && "Failed to emit a bitcast!"); 1810 Arg = BC; 1811 ArgVT = VA.getLocVT(); 1812 break; 1813 } 1814 default: llvm_unreachable("Unknown arg promotion!"); 1815 } 1816 1817 // Now copy/store arg to correct locations. 1818 if (VA.isRegLoc() && !VA.needsCustom()) { 1819 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1820 VA.getLocReg()) 1821 .addReg(Arg); 1822 RegArgs.push_back(VA.getLocReg()); 1823 } else if (VA.needsCustom()) { 1824 // TODO: We need custom lowering for vector (v2f64) args. 1825 if (VA.getLocVT() != MVT::f64) return false; 1826 1827 CCValAssign &NextVA = ArgLocs[++i]; 1828 1829 // TODO: Only handle register args for now. 1830 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1831 1832 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1833 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1834 .addReg(NextVA.getLocReg(), RegState::Define) 1835 .addReg(Arg)); 1836 RegArgs.push_back(VA.getLocReg()); 1837 RegArgs.push_back(NextVA.getLocReg()); 1838 } else { 1839 assert(VA.isMemLoc()); 1840 // Need to store on the stack. 1841 Address Addr; 1842 Addr.BaseType = Address::RegBase; 1843 Addr.Base.Reg = ARM::SP; 1844 Addr.Offset = VA.getLocMemOffset(); 1845 1846 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1847 } 1848 } 1849 return true; 1850 } 1851 1852 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1853 const Instruction *I, CallingConv::ID CC, 1854 unsigned &NumBytes) { 1855 // Issue CALLSEQ_END 1856 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1857 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1858 TII.get(AdjStackUp)) 1859 .addImm(NumBytes).addImm(0)); 1860 1861 // Now the return value. 1862 if (RetVT != MVT::isVoid) { 1863 SmallVector<CCValAssign, 16> RVLocs; 1864 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1865 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1866 1867 // Copy all of the result registers out of their specified physreg. 1868 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1869 // For this move we copy into two registers and then move into the 1870 // double fp reg we want. 1871 EVT DestVT = RVLocs[0].getValVT(); 1872 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1873 unsigned ResultReg = createResultReg(DstRC); 1874 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1875 TII.get(ARM::VMOVDRR), ResultReg) 1876 .addReg(RVLocs[0].getLocReg()) 1877 .addReg(RVLocs[1].getLocReg())); 1878 1879 UsedRegs.push_back(RVLocs[0].getLocReg()); 1880 UsedRegs.push_back(RVLocs[1].getLocReg()); 1881 1882 // Finally update the result. 1883 UpdateValueMap(I, ResultReg); 1884 } else { 1885 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1886 EVT CopyVT = RVLocs[0].getValVT(); 1887 1888 // Special handling for extended integers. 1889 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 1890 CopyVT = MVT::i32; 1891 1892 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1893 1894 unsigned ResultReg = createResultReg(DstRC); 1895 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1896 ResultReg).addReg(RVLocs[0].getLocReg()); 1897 UsedRegs.push_back(RVLocs[0].getLocReg()); 1898 1899 // Finally update the result. 1900 UpdateValueMap(I, ResultReg); 1901 } 1902 } 1903 1904 return true; 1905 } 1906 1907 bool ARMFastISel::SelectRet(const Instruction *I) { 1908 const ReturnInst *Ret = cast<ReturnInst>(I); 1909 const Function &F = *I->getParent()->getParent(); 1910 1911 if (!FuncInfo.CanLowerReturn) 1912 return false; 1913 1914 if (F.isVarArg()) 1915 return false; 1916 1917 CallingConv::ID CC = F.getCallingConv(); 1918 if (Ret->getNumOperands() > 0) { 1919 SmallVector<ISD::OutputArg, 4> Outs; 1920 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1921 Outs, TLI); 1922 1923 // Analyze operands of the call, assigning locations to each operand. 1924 SmallVector<CCValAssign, 16> ValLocs; 1925 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 1926 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1927 1928 const Value *RV = Ret->getOperand(0); 1929 unsigned Reg = getRegForValue(RV); 1930 if (Reg == 0) 1931 return false; 1932 1933 // Only handle a single return value for now. 1934 if (ValLocs.size() != 1) 1935 return false; 1936 1937 CCValAssign &VA = ValLocs[0]; 1938 1939 // Don't bother handling odd stuff for now. 1940 if (VA.getLocInfo() != CCValAssign::Full) 1941 return false; 1942 // Only handle register returns for now. 1943 if (!VA.isRegLoc()) 1944 return false; 1945 1946 unsigned SrcReg = Reg + VA.getValNo(); 1947 EVT RVVT = TLI.getValueType(RV->getType()); 1948 EVT DestVT = VA.getValVT(); 1949 // Special handling for extended integers. 1950 if (RVVT != DestVT) { 1951 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 1952 return false; 1953 1954 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()) 1955 return false; 1956 1957 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 1958 1959 bool isZExt = Outs[0].Flags.isZExt(); 1960 unsigned ResultReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, isZExt); 1961 if (ResultReg == 0) return false; 1962 SrcReg = ResultReg; 1963 } 1964 1965 // Make the copy. 1966 unsigned DstReg = VA.getLocReg(); 1967 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1968 // Avoid a cross-class copy. This is very unlikely. 1969 if (!SrcRC->contains(DstReg)) 1970 return false; 1971 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1972 DstReg).addReg(SrcReg); 1973 1974 // Mark the register as live out of the function. 1975 MRI.addLiveOut(VA.getLocReg()); 1976 } 1977 1978 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 1979 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1980 TII.get(RetOpc))); 1981 return true; 1982 } 1983 1984 unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 1985 1986 // Darwin needs the r9 versions of the opcodes. 1987 bool isDarwin = Subtarget->isTargetDarwin(); 1988 if (isThumb2) { 1989 return isDarwin ? ARM::tBLr9 : ARM::tBL; 1990 } else { 1991 return isDarwin ? ARM::BLr9 : ARM::BL; 1992 } 1993 } 1994 1995 // A quick function that will emit a call for a named libcall in F with the 1996 // vector of passed arguments for the Instruction in I. We can assume that we 1997 // can emit a call for any libcall we can produce. This is an abridged version 1998 // of the full call infrastructure since we won't need to worry about things 1999 // like computed function pointers or strange arguments at call sites. 2000 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2001 // with X86. 2002 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2003 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2004 2005 // Handle *simple* calls for now. 2006 Type *RetTy = I->getType(); 2007 MVT RetVT; 2008 if (RetTy->isVoidTy()) 2009 RetVT = MVT::isVoid; 2010 else if (!isTypeLegal(RetTy, RetVT)) 2011 return false; 2012 2013 // TODO: For now if we have long calls specified we don't handle the call. 2014 if (EnableARMLongCalls) return false; 2015 2016 // Set up the argument vectors. 2017 SmallVector<Value*, 8> Args; 2018 SmallVector<unsigned, 8> ArgRegs; 2019 SmallVector<MVT, 8> ArgVTs; 2020 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2021 Args.reserve(I->getNumOperands()); 2022 ArgRegs.reserve(I->getNumOperands()); 2023 ArgVTs.reserve(I->getNumOperands()); 2024 ArgFlags.reserve(I->getNumOperands()); 2025 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2026 Value *Op = I->getOperand(i); 2027 unsigned Arg = getRegForValue(Op); 2028 if (Arg == 0) return false; 2029 2030 Type *ArgTy = Op->getType(); 2031 MVT ArgVT; 2032 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2033 2034 ISD::ArgFlagsTy Flags; 2035 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2036 Flags.setOrigAlign(OriginalAlignment); 2037 2038 Args.push_back(Op); 2039 ArgRegs.push_back(Arg); 2040 ArgVTs.push_back(ArgVT); 2041 ArgFlags.push_back(Flags); 2042 } 2043 2044 // Handle the arguments now that we've gotten them. 2045 SmallVector<unsigned, 4> RegArgs; 2046 unsigned NumBytes; 2047 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2048 return false; 2049 2050 // Issue the call, BLr9 for darwin, BL otherwise. 2051 // TODO: Turn this into the table of arm call ops. 2052 MachineInstrBuilder MIB; 2053 unsigned CallOpc = ARMSelectCallOp(NULL); 2054 if(isThumb2) 2055 // Explicitly adding the predicate here. 2056 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2057 TII.get(CallOpc))) 2058 .addExternalSymbol(TLI.getLibcallName(Call)); 2059 else 2060 // Explicitly adding the predicate here. 2061 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2062 TII.get(CallOpc)) 2063 .addExternalSymbol(TLI.getLibcallName(Call))); 2064 2065 // Add implicit physical register uses to the call. 2066 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2067 MIB.addReg(RegArgs[i]); 2068 2069 // Finish off the call including any return values. 2070 SmallVector<unsigned, 4> UsedRegs; 2071 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2072 2073 // Set all unused physreg defs as dead. 2074 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2075 2076 return true; 2077 } 2078 2079 bool ARMFastISel::SelectCall(const Instruction *I, 2080 const char *IntrMemName = 0) { 2081 const CallInst *CI = cast<CallInst>(I); 2082 const Value *Callee = CI->getCalledValue(); 2083 2084 // Can't handle inline asm. 2085 if (isa<InlineAsm>(Callee)) return false; 2086 2087 // Only handle global variable Callees. 2088 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2089 if (!GV) 2090 return false; 2091 2092 // Check the calling convention. 2093 ImmutableCallSite CS(CI); 2094 CallingConv::ID CC = CS.getCallingConv(); 2095 2096 // TODO: Avoid some calling conventions? 2097 2098 // Let SDISel handle vararg functions. 2099 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2100 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2101 if (FTy->isVarArg()) 2102 return false; 2103 2104 // Handle *simple* calls for now. 2105 Type *RetTy = I->getType(); 2106 MVT RetVT; 2107 if (RetTy->isVoidTy()) 2108 RetVT = MVT::isVoid; 2109 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2110 RetVT != MVT::i8 && RetVT != MVT::i1) 2111 return false; 2112 2113 // TODO: For now if we have long calls specified we don't handle the call. 2114 if (EnableARMLongCalls) return false; 2115 2116 // Set up the argument vectors. 2117 SmallVector<Value*, 8> Args; 2118 SmallVector<unsigned, 8> ArgRegs; 2119 SmallVector<MVT, 8> ArgVTs; 2120 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2121 Args.reserve(CS.arg_size()); 2122 ArgRegs.reserve(CS.arg_size()); 2123 ArgVTs.reserve(CS.arg_size()); 2124 ArgFlags.reserve(CS.arg_size()); 2125 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2126 i != e; ++i) { 2127 // If we're lowering a memory intrinsic instead of a regular call, skip the 2128 // last two arguments, which shouldn't be passed to the underlying function. 2129 if (IntrMemName && e-i <= 2) 2130 break; 2131 2132 ISD::ArgFlagsTy Flags; 2133 unsigned AttrInd = i - CS.arg_begin() + 1; 2134 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2135 Flags.setSExt(); 2136 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2137 Flags.setZExt(); 2138 2139 // FIXME: Only handle *easy* calls for now. 2140 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2141 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2142 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2143 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2144 return false; 2145 2146 Type *ArgTy = (*i)->getType(); 2147 MVT ArgVT; 2148 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2149 ArgVT != MVT::i1) 2150 return false; 2151 2152 unsigned Arg = getRegForValue(*i); 2153 if (Arg == 0) 2154 return false; 2155 2156 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2157 Flags.setOrigAlign(OriginalAlignment); 2158 2159 Args.push_back(*i); 2160 ArgRegs.push_back(Arg); 2161 ArgVTs.push_back(ArgVT); 2162 ArgFlags.push_back(Flags); 2163 } 2164 2165 // Handle the arguments now that we've gotten them. 2166 SmallVector<unsigned, 4> RegArgs; 2167 unsigned NumBytes; 2168 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2169 return false; 2170 2171 // Issue the call, BLr9 for darwin, BL otherwise. 2172 // TODO: Turn this into the table of arm call ops. 2173 MachineInstrBuilder MIB; 2174 unsigned CallOpc = ARMSelectCallOp(GV); 2175 // Explicitly adding the predicate here. 2176 if(isThumb2) { 2177 // Explicitly adding the predicate here. 2178 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2179 TII.get(CallOpc))); 2180 if (!IntrMemName) 2181 MIB.addGlobalAddress(GV, 0, 0); 2182 else 2183 MIB.addExternalSymbol(IntrMemName, 0); 2184 } else { 2185 if (!IntrMemName) 2186 // Explicitly adding the predicate here. 2187 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2188 TII.get(CallOpc)) 2189 .addGlobalAddress(GV, 0, 0)); 2190 else 2191 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2192 TII.get(CallOpc)) 2193 .addExternalSymbol(IntrMemName, 0)); 2194 } 2195 2196 // Add implicit physical register uses to the call. 2197 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2198 MIB.addReg(RegArgs[i]); 2199 2200 // Finish off the call including any return values. 2201 SmallVector<unsigned, 4> UsedRegs; 2202 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2203 2204 // Set all unused physreg defs as dead. 2205 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2206 2207 return true; 2208 } 2209 2210 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2211 return Len <= 16; 2212 } 2213 2214 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) { 2215 // Make sure we don't bloat code by inlining very large memcpy's. 2216 if (!ARMIsMemCpySmall(Len)) 2217 return false; 2218 2219 // We don't care about alignment here since we just emit integer accesses. 2220 while (Len) { 2221 MVT VT; 2222 if (Len >= 4) 2223 VT = MVT::i32; 2224 else if (Len >= 2) 2225 VT = MVT::i16; 2226 else { 2227 assert(Len == 1); 2228 VT = MVT::i8; 2229 } 2230 2231 bool RV; 2232 unsigned ResultReg; 2233 RV = ARMEmitLoad(VT, ResultReg, Src); 2234 assert (RV = true && "Should be able to handle this load."); 2235 RV = ARMEmitStore(VT, ResultReg, Dest); 2236 assert (RV = true && "Should be able to handle this store."); 2237 2238 unsigned Size = VT.getSizeInBits()/8; 2239 Len -= Size; 2240 Dest.Offset += Size; 2241 Src.Offset += Size; 2242 } 2243 2244 return true; 2245 } 2246 2247 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2248 // FIXME: Handle more intrinsics. 2249 switch (I.getIntrinsicID()) { 2250 default: return false; 2251 case Intrinsic::memcpy: 2252 case Intrinsic::memmove: { 2253 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2254 // Don't handle volatile. 2255 if (MTI.isVolatile()) 2256 return false; 2257 2258 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2259 // we would emit dead code because we don't currently handle memmoves. 2260 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2261 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2262 // Small memcpy's are common enough that we want to do them without a call 2263 // if possible. 2264 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2265 if (ARMIsMemCpySmall(Len)) { 2266 Address Dest, Src; 2267 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2268 !ARMComputeAddress(MTI.getRawSource(), Src)) 2269 return false; 2270 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2271 return true; 2272 } 2273 } 2274 2275 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2276 return false; 2277 2278 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2279 return false; 2280 2281 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2282 return SelectCall(&I, IntrMemName); 2283 } 2284 case Intrinsic::memset: { 2285 const MemSetInst &MSI = cast<MemSetInst>(I); 2286 // Don't handle volatile. 2287 if (MSI.isVolatile()) 2288 return false; 2289 2290 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2291 return false; 2292 2293 if (MSI.getDestAddressSpace() > 255) 2294 return false; 2295 2296 return SelectCall(&I, "memset"); 2297 } 2298 } 2299 return false; 2300 } 2301 2302 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2303 // The high bits for a type smaller than the register size are assumed to be 2304 // undefined. 2305 Value *Op = I->getOperand(0); 2306 2307 EVT SrcVT, DestVT; 2308 SrcVT = TLI.getValueType(Op->getType(), true); 2309 DestVT = TLI.getValueType(I->getType(), true); 2310 2311 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2312 return false; 2313 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2314 return false; 2315 2316 unsigned SrcReg = getRegForValue(Op); 2317 if (!SrcReg) return false; 2318 2319 // Because the high bits are undefined, a truncate doesn't generate 2320 // any code. 2321 UpdateValueMap(I, SrcReg); 2322 return true; 2323 } 2324 2325 unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2326 bool isZExt) { 2327 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2328 return 0; 2329 2330 unsigned Opc; 2331 bool isBoolZext = false; 2332 if (!SrcVT.isSimple()) return 0; 2333 switch (SrcVT.getSimpleVT().SimpleTy) { 2334 default: return 0; 2335 case MVT::i16: 2336 if (!Subtarget->hasV6Ops()) return 0; 2337 if (isZExt) 2338 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2339 else 2340 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2341 break; 2342 case MVT::i8: 2343 if (!Subtarget->hasV6Ops()) return 0; 2344 if (isZExt) 2345 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2346 else 2347 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2348 break; 2349 case MVT::i1: 2350 if (isZExt) { 2351 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2352 isBoolZext = true; 2353 break; 2354 } 2355 return 0; 2356 } 2357 2358 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2359 MachineInstrBuilder MIB; 2360 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2361 .addReg(SrcReg); 2362 if (isBoolZext) 2363 MIB.addImm(1); 2364 else 2365 MIB.addImm(0); 2366 AddOptionalDefs(MIB); 2367 return ResultReg; 2368 } 2369 2370 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2371 // On ARM, in general, integer casts don't involve legal types; this code 2372 // handles promotable integers. 2373 Type *DestTy = I->getType(); 2374 Value *Src = I->getOperand(0); 2375 Type *SrcTy = Src->getType(); 2376 2377 EVT SrcVT, DestVT; 2378 SrcVT = TLI.getValueType(SrcTy, true); 2379 DestVT = TLI.getValueType(DestTy, true); 2380 2381 bool isZExt = isa<ZExtInst>(I); 2382 unsigned SrcReg = getRegForValue(Src); 2383 if (!SrcReg) return false; 2384 2385 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2386 if (ResultReg == 0) return false; 2387 UpdateValueMap(I, ResultReg); 2388 return true; 2389 } 2390 2391 // TODO: SoftFP support. 2392 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2393 2394 switch (I->getOpcode()) { 2395 case Instruction::Load: 2396 return SelectLoad(I); 2397 case Instruction::Store: 2398 return SelectStore(I); 2399 case Instruction::Br: 2400 return SelectBranch(I); 2401 case Instruction::ICmp: 2402 case Instruction::FCmp: 2403 return SelectCmp(I); 2404 case Instruction::FPExt: 2405 return SelectFPExt(I); 2406 case Instruction::FPTrunc: 2407 return SelectFPTrunc(I); 2408 case Instruction::SIToFP: 2409 return SelectSIToFP(I); 2410 case Instruction::FPToSI: 2411 return SelectFPToSI(I); 2412 case Instruction::FAdd: 2413 return SelectBinaryOp(I, ISD::FADD); 2414 case Instruction::FSub: 2415 return SelectBinaryOp(I, ISD::FSUB); 2416 case Instruction::FMul: 2417 return SelectBinaryOp(I, ISD::FMUL); 2418 case Instruction::SDiv: 2419 return SelectSDiv(I); 2420 case Instruction::SRem: 2421 return SelectSRem(I); 2422 case Instruction::Call: 2423 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2424 return SelectIntrinsicCall(*II); 2425 return SelectCall(I); 2426 case Instruction::Select: 2427 return SelectSelect(I); 2428 case Instruction::Ret: 2429 return SelectRet(I); 2430 case Instruction::Trunc: 2431 return SelectTrunc(I); 2432 case Instruction::ZExt: 2433 case Instruction::SExt: 2434 return SelectIntExt(I); 2435 default: break; 2436 } 2437 return false; 2438 } 2439 2440 /// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2441 /// vreg is being provided by the specified load instruction. If possible, 2442 /// try to fold the load as an operand to the instruction, returning true if 2443 /// successful. 2444 bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2445 const LoadInst *LI) { 2446 // Verify we have a legal type before going any further. 2447 MVT VT; 2448 if (!isLoadTypeLegal(LI->getType(), VT)) 2449 return false; 2450 2451 // Combine load followed by zero- or sign-extend. 2452 // ldrb r1, [r0] ldrb r1, [r0] 2453 // uxtb r2, r1 => 2454 // mov r3, r2 mov r3, r1 2455 bool isZExt = true; 2456 switch(MI->getOpcode()) { 2457 default: return false; 2458 case ARM::SXTH: 2459 case ARM::t2SXTH: 2460 isZExt = false; 2461 case ARM::UXTH: 2462 case ARM::t2UXTH: 2463 if (VT != MVT::i16) 2464 return false; 2465 break; 2466 case ARM::SXTB: 2467 case ARM::t2SXTB: 2468 isZExt = false; 2469 case ARM::UXTB: 2470 case ARM::t2UXTB: 2471 if (VT != MVT::i8) 2472 return false; 2473 break; 2474 } 2475 // See if we can handle this address. 2476 Address Addr; 2477 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2478 2479 unsigned ResultReg = MI->getOperand(0).getReg(); 2480 if (!ARMEmitLoad(VT, ResultReg, Addr, isZExt, false)) 2481 return false; 2482 MI->eraseFromParent(); 2483 return true; 2484 } 2485 2486 namespace llvm { 2487 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2488 // Completely untested on non-darwin. 2489 const TargetMachine &TM = funcInfo.MF->getTarget(); 2490 2491 // Darwin and thumb1 only for now. 2492 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2493 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 2494 !DisableARMFastISel) 2495 return new ARMFastISel(funcInfo); 2496 return 0; 2497 } 2498 } 2499