1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseInstrInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMConstantPoolValue.h" 20 #include "ARMSubtarget.h" 21 #include "ARMTargetMachine.h" 22 #include "MCTargetDesc/ARMAddressingModes.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/CodeGen/Analysis.h" 25 #include "llvm/CodeGen/FastISel.h" 26 #include "llvm/CodeGen/FunctionLoweringInfo.h" 27 #include "llvm/CodeGen/MachineConstantPool.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineMemOperand.h" 31 #include "llvm/CodeGen/MachineModuleInfo.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/IR/CallingConv.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/GlobalVariable.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/IR/Module.h" 40 #include "llvm/IR/Operator.h" 41 #include "llvm/Support/CallSite.h" 42 #include "llvm/Support/CommandLine.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/GetElementPtrTypeIterator.h" 45 #include "llvm/Support/MathExtras.h" 46 #include "llvm/Target/TargetInstrInfo.h" 47 #include "llvm/Target/TargetLowering.h" 48 #include "llvm/Target/TargetMachine.h" 49 #include "llvm/Target/TargetOptions.h" 50 using namespace llvm; 51 52 extern cl::opt<bool> EnableARMLongCalls; 53 54 namespace { 55 56 // All possible address modes, plus some. 57 typedef struct Address { 58 enum { 59 RegBase, 60 FrameIndexBase 61 } BaseType; 62 63 union { 64 unsigned Reg; 65 int FI; 66 } Base; 67 68 int Offset; 69 70 // Innocuous defaults for our address. 71 Address() 72 : BaseType(RegBase), Offset(0) { 73 Base.Reg = 0; 74 } 75 } Address; 76 77 class ARMFastISel : public FastISel { 78 79 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 80 /// make the right decision when generating code for different targets. 81 const ARMSubtarget *Subtarget; 82 const TargetMachine &TM; 83 const TargetInstrInfo &TII; 84 const TargetLowering &TLI; 85 ARMFunctionInfo *AFI; 86 87 // Convenience variables to avoid some queries. 88 bool isThumb2; 89 LLVMContext *Context; 90 91 public: 92 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 93 const TargetLibraryInfo *libInfo) 94 : FastISel(funcInfo, libInfo), 95 TM(funcInfo.MF->getTarget()), 96 TII(*TM.getInstrInfo()), 97 TLI(*TM.getTargetLowering()) { 98 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 99 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 100 isThumb2 = AFI->isThumbFunction(); 101 Context = &funcInfo.Fn->getContext(); 102 } 103 104 // Code from FastISel.cpp. 105 private: 106 unsigned FastEmitInst_(unsigned MachineInstOpcode, 107 const TargetRegisterClass *RC); 108 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 109 const TargetRegisterClass *RC, 110 unsigned Op0, bool Op0IsKill); 111 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill, 114 unsigned Op1, bool Op1IsKill); 115 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 116 const TargetRegisterClass *RC, 117 unsigned Op0, bool Op0IsKill, 118 unsigned Op1, bool Op1IsKill, 119 unsigned Op2, bool Op2IsKill); 120 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 unsigned Op0, bool Op0IsKill, 123 uint64_t Imm); 124 unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 125 const TargetRegisterClass *RC, 126 unsigned Op0, bool Op0IsKill, 127 const ConstantFP *FPImm); 128 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 129 const TargetRegisterClass *RC, 130 unsigned Op0, bool Op0IsKill, 131 unsigned Op1, bool Op1IsKill, 132 uint64_t Imm); 133 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 134 const TargetRegisterClass *RC, 135 uint64_t Imm); 136 unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 137 const TargetRegisterClass *RC, 138 uint64_t Imm1, uint64_t Imm2); 139 140 unsigned FastEmitInst_extractsubreg(MVT RetVT, 141 unsigned Op0, bool Op0IsKill, 142 uint32_t Idx); 143 144 // Backend specific FastISel code. 145 private: 146 virtual bool TargetSelectInstruction(const Instruction *I); 147 virtual unsigned TargetMaterializeConstant(const Constant *C); 148 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 149 virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 150 const LoadInst *LI); 151 virtual bool FastLowerArguments(); 152 private: 153 #include "ARMGenFastISel.inc" 154 155 // Instruction selection routines. 156 private: 157 bool SelectLoad(const Instruction *I); 158 bool SelectStore(const Instruction *I); 159 bool SelectBranch(const Instruction *I); 160 bool SelectIndirectBr(const Instruction *I); 161 bool SelectCmp(const Instruction *I); 162 bool SelectFPExt(const Instruction *I); 163 bool SelectFPTrunc(const Instruction *I); 164 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 165 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 166 bool SelectIToFP(const Instruction *I, bool isSigned); 167 bool SelectFPToI(const Instruction *I, bool isSigned); 168 bool SelectDiv(const Instruction *I, bool isSigned); 169 bool SelectRem(const Instruction *I, bool isSigned); 170 bool SelectCall(const Instruction *I, const char *IntrMemName); 171 bool SelectIntrinsicCall(const IntrinsicInst &I); 172 bool SelectSelect(const Instruction *I); 173 bool SelectRet(const Instruction *I); 174 bool SelectTrunc(const Instruction *I); 175 bool SelectIntExt(const Instruction *I); 176 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 177 178 // Utility routines. 179 private: 180 bool isTypeLegal(Type *Ty, MVT &VT); 181 bool isLoadTypeLegal(Type *Ty, MVT &VT); 182 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 183 bool isZExt); 184 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 185 unsigned Alignment = 0, bool isZExt = true, 186 bool allocReg = true); 187 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 188 unsigned Alignment = 0); 189 bool ARMComputeAddress(const Value *Obj, Address &Addr); 190 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 191 bool ARMIsMemCpySmall(uint64_t Len); 192 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 193 unsigned Alignment); 194 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 195 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 196 unsigned ARMMaterializeInt(const Constant *C, MVT VT); 197 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 198 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 199 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 200 unsigned ARMSelectCallOp(bool UseReg); 201 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 202 203 // Call handling routines. 204 private: 205 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 206 bool Return, 207 bool isVarArg); 208 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 209 SmallVectorImpl<unsigned> &ArgRegs, 210 SmallVectorImpl<MVT> &ArgVTs, 211 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 212 SmallVectorImpl<unsigned> &RegArgs, 213 CallingConv::ID CC, 214 unsigned &NumBytes, 215 bool isVarArg); 216 unsigned getLibcallReg(const Twine &Name); 217 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 218 const Instruction *I, CallingConv::ID CC, 219 unsigned &NumBytes, bool isVarArg); 220 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 221 222 // OptionalDef handling routines. 223 private: 224 bool isARMNEONPred(const MachineInstr *MI); 225 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 226 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 227 void AddLoadStoreOperands(MVT VT, Address &Addr, 228 const MachineInstrBuilder &MIB, 229 unsigned Flags, bool useAM3); 230 }; 231 232 } // end anonymous namespace 233 234 #include "ARMGenCallingConv.inc" 235 236 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 237 // we don't care about implicit defs here, just places we'll need to add a 238 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 239 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 240 if (!MI->hasOptionalDef()) 241 return false; 242 243 // Look to see if our OptionalDef is defining CPSR or CCR. 244 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 245 const MachineOperand &MO = MI->getOperand(i); 246 if (!MO.isReg() || !MO.isDef()) continue; 247 if (MO.getReg() == ARM::CPSR) 248 *CPSR = true; 249 } 250 return true; 251 } 252 253 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 254 const MCInstrDesc &MCID = MI->getDesc(); 255 256 // If we're a thumb2 or not NEON function we were handled via isPredicable. 257 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 258 AFI->isThumb2Function()) 259 return false; 260 261 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 262 if (MCID.OpInfo[i].isPredicate()) 263 return true; 264 265 return false; 266 } 267 268 // If the machine is predicable go ahead and add the predicate operands, if 269 // it needs default CC operands add those. 270 // TODO: If we want to support thumb1 then we'll need to deal with optional 271 // CPSR defs that need to be added before the remaining operands. See s_cc_out 272 // for descriptions why. 273 const MachineInstrBuilder & 274 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 275 MachineInstr *MI = &*MIB; 276 277 // Do we use a predicate? or... 278 // Are we NEON in ARM mode and have a predicate operand? If so, I know 279 // we're not predicable but add it anyways. 280 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 281 AddDefaultPred(MIB); 282 283 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 284 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 285 bool CPSR = false; 286 if (DefinesOptionalPredicate(MI, &CPSR)) { 287 if (CPSR) 288 AddDefaultT1CC(MIB); 289 else 290 AddDefaultCC(MIB); 291 } 292 return MIB; 293 } 294 295 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 296 const TargetRegisterClass* RC) { 297 unsigned ResultReg = createResultReg(RC); 298 const MCInstrDesc &II = TII.get(MachineInstOpcode); 299 300 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 301 return ResultReg; 302 } 303 304 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 305 const TargetRegisterClass *RC, 306 unsigned Op0, bool Op0IsKill) { 307 unsigned ResultReg = createResultReg(RC); 308 const MCInstrDesc &II = TII.get(MachineInstOpcode); 309 310 if (II.getNumDefs() >= 1) { 311 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 312 .addReg(Op0, Op0IsKill * RegState::Kill)); 313 } else { 314 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 315 .addReg(Op0, Op0IsKill * RegState::Kill)); 316 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 317 TII.get(TargetOpcode::COPY), ResultReg) 318 .addReg(II.ImplicitDefs[0])); 319 } 320 return ResultReg; 321 } 322 323 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 324 const TargetRegisterClass *RC, 325 unsigned Op0, bool Op0IsKill, 326 unsigned Op1, bool Op1IsKill) { 327 unsigned ResultReg = createResultReg(RC); 328 const MCInstrDesc &II = TII.get(MachineInstOpcode); 329 330 if (II.getNumDefs() >= 1) { 331 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 332 .addReg(Op0, Op0IsKill * RegState::Kill) 333 .addReg(Op1, Op1IsKill * RegState::Kill)); 334 } else { 335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 336 .addReg(Op0, Op0IsKill * RegState::Kill) 337 .addReg(Op1, Op1IsKill * RegState::Kill)); 338 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 339 TII.get(TargetOpcode::COPY), ResultReg) 340 .addReg(II.ImplicitDefs[0])); 341 } 342 return ResultReg; 343 } 344 345 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 346 const TargetRegisterClass *RC, 347 unsigned Op0, bool Op0IsKill, 348 unsigned Op1, bool Op1IsKill, 349 unsigned Op2, bool Op2IsKill) { 350 unsigned ResultReg = createResultReg(RC); 351 const MCInstrDesc &II = TII.get(MachineInstOpcode); 352 353 if (II.getNumDefs() >= 1) { 354 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 355 .addReg(Op0, Op0IsKill * RegState::Kill) 356 .addReg(Op1, Op1IsKill * RegState::Kill) 357 .addReg(Op2, Op2IsKill * RegState::Kill)); 358 } else { 359 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 360 .addReg(Op0, Op0IsKill * RegState::Kill) 361 .addReg(Op1, Op1IsKill * RegState::Kill) 362 .addReg(Op2, Op2IsKill * RegState::Kill)); 363 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 364 TII.get(TargetOpcode::COPY), ResultReg) 365 .addReg(II.ImplicitDefs[0])); 366 } 367 return ResultReg; 368 } 369 370 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 371 const TargetRegisterClass *RC, 372 unsigned Op0, bool Op0IsKill, 373 uint64_t Imm) { 374 unsigned ResultReg = createResultReg(RC); 375 const MCInstrDesc &II = TII.get(MachineInstOpcode); 376 377 if (II.getNumDefs() >= 1) { 378 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 379 .addReg(Op0, Op0IsKill * RegState::Kill) 380 .addImm(Imm)); 381 } else { 382 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 383 .addReg(Op0, Op0IsKill * RegState::Kill) 384 .addImm(Imm)); 385 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 386 TII.get(TargetOpcode::COPY), ResultReg) 387 .addReg(II.ImplicitDefs[0])); 388 } 389 return ResultReg; 390 } 391 392 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 393 const TargetRegisterClass *RC, 394 unsigned Op0, bool Op0IsKill, 395 const ConstantFP *FPImm) { 396 unsigned ResultReg = createResultReg(RC); 397 const MCInstrDesc &II = TII.get(MachineInstOpcode); 398 399 if (II.getNumDefs() >= 1) { 400 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 401 .addReg(Op0, Op0IsKill * RegState::Kill) 402 .addFPImm(FPImm)); 403 } else { 404 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 405 .addReg(Op0, Op0IsKill * RegState::Kill) 406 .addFPImm(FPImm)); 407 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 408 TII.get(TargetOpcode::COPY), ResultReg) 409 .addReg(II.ImplicitDefs[0])); 410 } 411 return ResultReg; 412 } 413 414 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 415 const TargetRegisterClass *RC, 416 unsigned Op0, bool Op0IsKill, 417 unsigned Op1, bool Op1IsKill, 418 uint64_t Imm) { 419 unsigned ResultReg = createResultReg(RC); 420 const MCInstrDesc &II = TII.get(MachineInstOpcode); 421 422 if (II.getNumDefs() >= 1) { 423 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 424 .addReg(Op0, Op0IsKill * RegState::Kill) 425 .addReg(Op1, Op1IsKill * RegState::Kill) 426 .addImm(Imm)); 427 } else { 428 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 429 .addReg(Op0, Op0IsKill * RegState::Kill) 430 .addReg(Op1, Op1IsKill * RegState::Kill) 431 .addImm(Imm)); 432 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 433 TII.get(TargetOpcode::COPY), ResultReg) 434 .addReg(II.ImplicitDefs[0])); 435 } 436 return ResultReg; 437 } 438 439 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 440 const TargetRegisterClass *RC, 441 uint64_t Imm) { 442 unsigned ResultReg = createResultReg(RC); 443 const MCInstrDesc &II = TII.get(MachineInstOpcode); 444 445 if (II.getNumDefs() >= 1) { 446 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 447 .addImm(Imm)); 448 } else { 449 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 450 .addImm(Imm)); 451 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 452 TII.get(TargetOpcode::COPY), ResultReg) 453 .addReg(II.ImplicitDefs[0])); 454 } 455 return ResultReg; 456 } 457 458 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 459 const TargetRegisterClass *RC, 460 uint64_t Imm1, uint64_t Imm2) { 461 unsigned ResultReg = createResultReg(RC); 462 const MCInstrDesc &II = TII.get(MachineInstOpcode); 463 464 if (II.getNumDefs() >= 1) { 465 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 466 .addImm(Imm1).addImm(Imm2)); 467 } else { 468 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 469 .addImm(Imm1).addImm(Imm2)); 470 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 471 TII.get(TargetOpcode::COPY), 472 ResultReg) 473 .addReg(II.ImplicitDefs[0])); 474 } 475 return ResultReg; 476 } 477 478 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 479 unsigned Op0, bool Op0IsKill, 480 uint32_t Idx) { 481 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 482 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 483 "Cannot yet extract from physregs"); 484 485 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 486 DL, TII.get(TargetOpcode::COPY), ResultReg) 487 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 488 return ResultReg; 489 } 490 491 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 492 // checks from the various callers. 493 unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 494 if (VT == MVT::f64) return 0; 495 496 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 497 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 498 TII.get(ARM::VMOVSR), MoveReg) 499 .addReg(SrcReg)); 500 return MoveReg; 501 } 502 503 unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 504 if (VT == MVT::i64) return 0; 505 506 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 507 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 508 TII.get(ARM::VMOVRS), MoveReg) 509 .addReg(SrcReg)); 510 return MoveReg; 511 } 512 513 // For double width floating point we need to materialize two constants 514 // (the high and the low) into integer registers then use a move to get 515 // the combined constant into an FP reg. 516 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 517 const APFloat Val = CFP->getValueAPF(); 518 bool is64bit = VT == MVT::f64; 519 520 // This checks to see if we can use VFP3 instructions to materialize 521 // a constant, otherwise we have to go through the constant pool. 522 if (TLI.isFPImmLegal(Val, VT)) { 523 int Imm; 524 unsigned Opc; 525 if (is64bit) { 526 Imm = ARM_AM::getFP64Imm(Val); 527 Opc = ARM::FCONSTD; 528 } else { 529 Imm = ARM_AM::getFP32Imm(Val); 530 Opc = ARM::FCONSTS; 531 } 532 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 533 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 534 DestReg) 535 .addImm(Imm)); 536 return DestReg; 537 } 538 539 // Require VFP2 for loading fp constants. 540 if (!Subtarget->hasVFP2()) return false; 541 542 // MachineConstantPool wants an explicit alignment. 543 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 544 if (Align == 0) { 545 // TODO: Figure out if this is correct. 546 Align = TD.getTypeAllocSize(CFP->getType()); 547 } 548 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 549 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 550 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 551 552 // The extra reg is for addrmode5. 553 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 554 DestReg) 555 .addConstantPoolIndex(Idx) 556 .addReg(0)); 557 return DestReg; 558 } 559 560 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 561 562 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 563 return false; 564 565 // If we can do this in a single instruction without a constant pool entry 566 // do so now. 567 const ConstantInt *CI = cast<ConstantInt>(C); 568 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 569 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 570 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 571 &ARM::GPRRegClass; 572 unsigned ImmReg = createResultReg(RC); 573 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 574 TII.get(Opc), ImmReg) 575 .addImm(CI->getZExtValue())); 576 return ImmReg; 577 } 578 579 // Use MVN to emit negative constants. 580 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 581 unsigned Imm = (unsigned)~(CI->getSExtValue()); 582 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 583 (ARM_AM::getSOImmVal(Imm) != -1); 584 if (UseImm) { 585 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 586 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 587 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 588 TII.get(Opc), ImmReg) 589 .addImm(Imm)); 590 return ImmReg; 591 } 592 } 593 594 // Load from constant pool. For now 32-bit only. 595 if (VT != MVT::i32) 596 return false; 597 598 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 599 600 // MachineConstantPool wants an explicit alignment. 601 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 602 if (Align == 0) { 603 // TODO: Figure out if this is correct. 604 Align = TD.getTypeAllocSize(C->getType()); 605 } 606 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 607 608 if (isThumb2) 609 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 610 TII.get(ARM::t2LDRpci), DestReg) 611 .addConstantPoolIndex(Idx)); 612 else 613 // The extra immediate is for addrmode2. 614 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 615 TII.get(ARM::LDRcp), DestReg) 616 .addConstantPoolIndex(Idx) 617 .addImm(0)); 618 619 return DestReg; 620 } 621 622 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 623 // For now 32-bit only. 624 if (VT != MVT::i32) return 0; 625 626 Reloc::Model RelocM = TM.getRelocationModel(); 627 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); 628 const TargetRegisterClass *RC = isThumb2 ? 629 (const TargetRegisterClass*)&ARM::rGPRRegClass : 630 (const TargetRegisterClass*)&ARM::GPRRegClass; 631 unsigned DestReg = createResultReg(RC); 632 633 // FastISel TLS support on non-Darwin is broken, punt to SelectionDAG. 634 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 635 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 636 if (!Subtarget->isTargetDarwin() && IsThreadLocal) return 0; 637 638 // Use movw+movt when possible, it avoids constant pool entries. 639 // Darwin targets don't support movt with Reloc::Static, see 640 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 641 // static movt relocations. 642 if (Subtarget->useMovt() && 643 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 644 unsigned Opc; 645 switch (RelocM) { 646 case Reloc::PIC_: 647 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 648 break; 649 case Reloc::DynamicNoPIC: 650 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 651 break; 652 default: 653 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 654 break; 655 } 656 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 657 DestReg).addGlobalAddress(GV)); 658 } else { 659 // MachineConstantPool wants an explicit alignment. 660 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 661 if (Align == 0) { 662 // TODO: Figure out if this is correct. 663 Align = TD.getTypeAllocSize(GV->getType()); 664 } 665 666 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) 667 return ARMLowerPICELF(GV, Align, VT); 668 669 // Grab index. 670 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 671 (Subtarget->isThumb() ? 4 : 8); 672 unsigned Id = AFI->createPICLabelUId(); 673 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 674 ARMCP::CPValue, 675 PCAdj); 676 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 677 678 // Load value. 679 MachineInstrBuilder MIB; 680 if (isThumb2) { 681 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 682 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 683 .addConstantPoolIndex(Idx); 684 if (RelocM == Reloc::PIC_) 685 MIB.addImm(Id); 686 AddOptionalDefs(MIB); 687 } else { 688 // The extra immediate is for addrmode2. 689 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 690 DestReg) 691 .addConstantPoolIndex(Idx) 692 .addImm(0); 693 AddOptionalDefs(MIB); 694 695 if (RelocM == Reloc::PIC_) { 696 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 697 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 698 699 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 700 DL, TII.get(Opc), NewDestReg) 701 .addReg(DestReg) 702 .addImm(Id); 703 AddOptionalDefs(MIB); 704 return NewDestReg; 705 } 706 } 707 } 708 709 if (IsIndirect) { 710 MachineInstrBuilder MIB; 711 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 712 if (isThumb2) 713 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 714 TII.get(ARM::t2LDRi12), NewDestReg) 715 .addReg(DestReg) 716 .addImm(0); 717 else 718 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 719 NewDestReg) 720 .addReg(DestReg) 721 .addImm(0); 722 DestReg = NewDestReg; 723 AddOptionalDefs(MIB); 724 } 725 726 return DestReg; 727 } 728 729 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 730 EVT CEVT = TLI.getValueType(C->getType(), true); 731 732 // Only handle simple types. 733 if (!CEVT.isSimple()) return 0; 734 MVT VT = CEVT.getSimpleVT(); 735 736 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 737 return ARMMaterializeFP(CFP, VT); 738 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 739 return ARMMaterializeGV(GV, VT); 740 else if (isa<ConstantInt>(C)) 741 return ARMMaterializeInt(C, VT); 742 743 return 0; 744 } 745 746 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 747 748 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 749 // Don't handle dynamic allocas. 750 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 751 752 MVT VT; 753 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 754 755 DenseMap<const AllocaInst*, int>::iterator SI = 756 FuncInfo.StaticAllocaMap.find(AI); 757 758 // This will get lowered later into the correct offsets and registers 759 // via rewriteXFrameIndex. 760 if (SI != FuncInfo.StaticAllocaMap.end()) { 761 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 762 unsigned ResultReg = createResultReg(RC); 763 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 764 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 765 TII.get(Opc), ResultReg) 766 .addFrameIndex(SI->second) 767 .addImm(0)); 768 return ResultReg; 769 } 770 771 return 0; 772 } 773 774 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 775 EVT evt = TLI.getValueType(Ty, true); 776 777 // Only handle simple types. 778 if (evt == MVT::Other || !evt.isSimple()) return false; 779 VT = evt.getSimpleVT(); 780 781 // Handle all legal types, i.e. a register that will directly hold this 782 // value. 783 return TLI.isTypeLegal(VT); 784 } 785 786 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 787 if (isTypeLegal(Ty, VT)) return true; 788 789 // If this is a type than can be sign or zero-extended to a basic operation 790 // go ahead and accept it now. 791 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 792 return true; 793 794 return false; 795 } 796 797 // Computes the address to get to an object. 798 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 799 // Some boilerplate from the X86 FastISel. 800 const User *U = NULL; 801 unsigned Opcode = Instruction::UserOp1; 802 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 803 // Don't walk into other basic blocks unless the object is an alloca from 804 // another block, otherwise it may not have a virtual register assigned. 805 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 806 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 807 Opcode = I->getOpcode(); 808 U = I; 809 } 810 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 811 Opcode = C->getOpcode(); 812 U = C; 813 } 814 815 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 816 if (Ty->getAddressSpace() > 255) 817 // Fast instruction selection doesn't support the special 818 // address spaces. 819 return false; 820 821 switch (Opcode) { 822 default: 823 break; 824 case Instruction::BitCast: 825 // Look through bitcasts. 826 return ARMComputeAddress(U->getOperand(0), Addr); 827 case Instruction::IntToPtr: 828 // Look past no-op inttoptrs. 829 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 830 return ARMComputeAddress(U->getOperand(0), Addr); 831 break; 832 case Instruction::PtrToInt: 833 // Look past no-op ptrtoints. 834 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 835 return ARMComputeAddress(U->getOperand(0), Addr); 836 break; 837 case Instruction::GetElementPtr: { 838 Address SavedAddr = Addr; 839 int TmpOffset = Addr.Offset; 840 841 // Iterate through the GEP folding the constants into offsets where 842 // we can. 843 gep_type_iterator GTI = gep_type_begin(U); 844 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 845 i != e; ++i, ++GTI) { 846 const Value *Op = *i; 847 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 848 const StructLayout *SL = TD.getStructLayout(STy); 849 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 850 TmpOffset += SL->getElementOffset(Idx); 851 } else { 852 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 853 for (;;) { 854 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 855 // Constant-offset addressing. 856 TmpOffset += CI->getSExtValue() * S; 857 break; 858 } 859 if (isa<AddOperator>(Op) && 860 (!isa<Instruction>(Op) || 861 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 862 == FuncInfo.MBB) && 863 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 864 // An add (in the same block) with a constant operand. Fold the 865 // constant. 866 ConstantInt *CI = 867 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 868 TmpOffset += CI->getSExtValue() * S; 869 // Iterate on the other operand. 870 Op = cast<AddOperator>(Op)->getOperand(0); 871 continue; 872 } 873 // Unsupported 874 goto unsupported_gep; 875 } 876 } 877 } 878 879 // Try to grab the base operand now. 880 Addr.Offset = TmpOffset; 881 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 882 883 // We failed, restore everything and try the other options. 884 Addr = SavedAddr; 885 886 unsupported_gep: 887 break; 888 } 889 case Instruction::Alloca: { 890 const AllocaInst *AI = cast<AllocaInst>(Obj); 891 DenseMap<const AllocaInst*, int>::iterator SI = 892 FuncInfo.StaticAllocaMap.find(AI); 893 if (SI != FuncInfo.StaticAllocaMap.end()) { 894 Addr.BaseType = Address::FrameIndexBase; 895 Addr.Base.FI = SI->second; 896 return true; 897 } 898 break; 899 } 900 } 901 902 // Try to get this in a register if nothing else has worked. 903 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 904 return Addr.Base.Reg != 0; 905 } 906 907 void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 908 bool needsLowering = false; 909 switch (VT.SimpleTy) { 910 default: llvm_unreachable("Unhandled load/store type!"); 911 case MVT::i1: 912 case MVT::i8: 913 case MVT::i16: 914 case MVT::i32: 915 if (!useAM3) { 916 // Integer loads/stores handle 12-bit offsets. 917 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 918 // Handle negative offsets. 919 if (needsLowering && isThumb2) 920 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 921 Addr.Offset > -256); 922 } else { 923 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 924 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 925 } 926 break; 927 case MVT::f32: 928 case MVT::f64: 929 // Floating point operands handle 8-bit offsets. 930 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 931 break; 932 } 933 934 // If this is a stack pointer and the offset needs to be simplified then 935 // put the alloca address into a register, set the base type back to 936 // register and continue. This should almost never happen. 937 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 938 const TargetRegisterClass *RC = isThumb2 ? 939 (const TargetRegisterClass*)&ARM::tGPRRegClass : 940 (const TargetRegisterClass*)&ARM::GPRRegClass; 941 unsigned ResultReg = createResultReg(RC); 942 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 943 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 944 TII.get(Opc), ResultReg) 945 .addFrameIndex(Addr.Base.FI) 946 .addImm(0)); 947 Addr.Base.Reg = ResultReg; 948 Addr.BaseType = Address::RegBase; 949 } 950 951 // Since the offset is too large for the load/store instruction 952 // get the reg+offset into a register. 953 if (needsLowering) { 954 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 955 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 956 Addr.Offset = 0; 957 } 958 } 959 960 void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 961 const MachineInstrBuilder &MIB, 962 unsigned Flags, bool useAM3) { 963 // addrmode5 output depends on the selection dag addressing dividing the 964 // offset by 4 that it then later multiplies. Do this here as well. 965 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 966 Addr.Offset /= 4; 967 968 // Frame base works a bit differently. Handle it separately. 969 if (Addr.BaseType == Address::FrameIndexBase) { 970 int FI = Addr.Base.FI; 971 int Offset = Addr.Offset; 972 MachineMemOperand *MMO = 973 FuncInfo.MF->getMachineMemOperand( 974 MachinePointerInfo::getFixedStack(FI, Offset), 975 Flags, 976 MFI.getObjectSize(FI), 977 MFI.getObjectAlignment(FI)); 978 // Now add the rest of the operands. 979 MIB.addFrameIndex(FI); 980 981 // ARM halfword load/stores and signed byte loads need an additional 982 // operand. 983 if (useAM3) { 984 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 985 MIB.addReg(0); 986 MIB.addImm(Imm); 987 } else { 988 MIB.addImm(Addr.Offset); 989 } 990 MIB.addMemOperand(MMO); 991 } else { 992 // Now add the rest of the operands. 993 MIB.addReg(Addr.Base.Reg); 994 995 // ARM halfword load/stores and signed byte loads need an additional 996 // operand. 997 if (useAM3) { 998 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 999 MIB.addReg(0); 1000 MIB.addImm(Imm); 1001 } else { 1002 MIB.addImm(Addr.Offset); 1003 } 1004 } 1005 AddOptionalDefs(MIB); 1006 } 1007 1008 bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 1009 unsigned Alignment, bool isZExt, bool allocReg) { 1010 unsigned Opc; 1011 bool useAM3 = false; 1012 bool needVMOV = false; 1013 const TargetRegisterClass *RC; 1014 switch (VT.SimpleTy) { 1015 // This is mostly going to be Neon/vector support. 1016 default: return false; 1017 case MVT::i1: 1018 case MVT::i8: 1019 if (isThumb2) { 1020 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1021 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 1022 else 1023 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1024 } else { 1025 if (isZExt) { 1026 Opc = ARM::LDRBi12; 1027 } else { 1028 Opc = ARM::LDRSB; 1029 useAM3 = true; 1030 } 1031 } 1032 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1033 break; 1034 case MVT::i16: 1035 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1036 return false; 1037 1038 if (isThumb2) { 1039 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1040 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1041 else 1042 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1043 } else { 1044 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1045 useAM3 = true; 1046 } 1047 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1048 break; 1049 case MVT::i32: 1050 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1051 return false; 1052 1053 if (isThumb2) { 1054 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1055 Opc = ARM::t2LDRi8; 1056 else 1057 Opc = ARM::t2LDRi12; 1058 } else { 1059 Opc = ARM::LDRi12; 1060 } 1061 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1062 break; 1063 case MVT::f32: 1064 if (!Subtarget->hasVFP2()) return false; 1065 // Unaligned loads need special handling. Floats require word-alignment. 1066 if (Alignment && Alignment < 4) { 1067 needVMOV = true; 1068 VT = MVT::i32; 1069 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1070 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1071 } else { 1072 Opc = ARM::VLDRS; 1073 RC = TLI.getRegClassFor(VT); 1074 } 1075 break; 1076 case MVT::f64: 1077 if (!Subtarget->hasVFP2()) return false; 1078 // FIXME: Unaligned loads need special handling. Doublewords require 1079 // word-alignment. 1080 if (Alignment && Alignment < 4) 1081 return false; 1082 1083 Opc = ARM::VLDRD; 1084 RC = TLI.getRegClassFor(VT); 1085 break; 1086 } 1087 // Simplify this down to something we can handle. 1088 ARMSimplifyAddress(Addr, VT, useAM3); 1089 1090 // Create the base instruction, then add the operands. 1091 if (allocReg) 1092 ResultReg = createResultReg(RC); 1093 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1094 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1095 TII.get(Opc), ResultReg); 1096 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1097 1098 // If we had an unaligned load of a float we've converted it to an regular 1099 // load. Now we must move from the GRP to the FP register. 1100 if (needVMOV) { 1101 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1102 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1103 TII.get(ARM::VMOVSR), MoveReg) 1104 .addReg(ResultReg)); 1105 ResultReg = MoveReg; 1106 } 1107 return true; 1108 } 1109 1110 bool ARMFastISel::SelectLoad(const Instruction *I) { 1111 // Atomic loads need special handling. 1112 if (cast<LoadInst>(I)->isAtomic()) 1113 return false; 1114 1115 // Verify we have a legal type before going any further. 1116 MVT VT; 1117 if (!isLoadTypeLegal(I->getType(), VT)) 1118 return false; 1119 1120 // See if we can handle this address. 1121 Address Addr; 1122 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1123 1124 unsigned ResultReg; 1125 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1126 return false; 1127 UpdateValueMap(I, ResultReg); 1128 return true; 1129 } 1130 1131 bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1132 unsigned Alignment) { 1133 unsigned StrOpc; 1134 bool useAM3 = false; 1135 switch (VT.SimpleTy) { 1136 // This is mostly going to be Neon/vector support. 1137 default: return false; 1138 case MVT::i1: { 1139 unsigned Res = createResultReg(isThumb2 ? 1140 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1141 (const TargetRegisterClass*)&ARM::GPRRegClass); 1142 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1143 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1144 TII.get(Opc), Res) 1145 .addReg(SrcReg).addImm(1)); 1146 SrcReg = Res; 1147 } // Fallthrough here. 1148 case MVT::i8: 1149 if (isThumb2) { 1150 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1151 StrOpc = ARM::t2STRBi8; 1152 else 1153 StrOpc = ARM::t2STRBi12; 1154 } else { 1155 StrOpc = ARM::STRBi12; 1156 } 1157 break; 1158 case MVT::i16: 1159 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1160 return false; 1161 1162 if (isThumb2) { 1163 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1164 StrOpc = ARM::t2STRHi8; 1165 else 1166 StrOpc = ARM::t2STRHi12; 1167 } else { 1168 StrOpc = ARM::STRH; 1169 useAM3 = true; 1170 } 1171 break; 1172 case MVT::i32: 1173 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1174 return false; 1175 1176 if (isThumb2) { 1177 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1178 StrOpc = ARM::t2STRi8; 1179 else 1180 StrOpc = ARM::t2STRi12; 1181 } else { 1182 StrOpc = ARM::STRi12; 1183 } 1184 break; 1185 case MVT::f32: 1186 if (!Subtarget->hasVFP2()) return false; 1187 // Unaligned stores need special handling. Floats require word-alignment. 1188 if (Alignment && Alignment < 4) { 1189 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1190 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1191 TII.get(ARM::VMOVRS), MoveReg) 1192 .addReg(SrcReg)); 1193 SrcReg = MoveReg; 1194 VT = MVT::i32; 1195 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1196 } else { 1197 StrOpc = ARM::VSTRS; 1198 } 1199 break; 1200 case MVT::f64: 1201 if (!Subtarget->hasVFP2()) return false; 1202 // FIXME: Unaligned stores need special handling. Doublewords require 1203 // word-alignment. 1204 if (Alignment && Alignment < 4) 1205 return false; 1206 1207 StrOpc = ARM::VSTRD; 1208 break; 1209 } 1210 // Simplify this down to something we can handle. 1211 ARMSimplifyAddress(Addr, VT, useAM3); 1212 1213 // Create the base instruction, then add the operands. 1214 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1215 TII.get(StrOpc)) 1216 .addReg(SrcReg); 1217 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1218 return true; 1219 } 1220 1221 bool ARMFastISel::SelectStore(const Instruction *I) { 1222 Value *Op0 = I->getOperand(0); 1223 unsigned SrcReg = 0; 1224 1225 // Atomic stores need special handling. 1226 if (cast<StoreInst>(I)->isAtomic()) 1227 return false; 1228 1229 // Verify we have a legal type before going any further. 1230 MVT VT; 1231 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1232 return false; 1233 1234 // Get the value to be stored into a register. 1235 SrcReg = getRegForValue(Op0); 1236 if (SrcReg == 0) return false; 1237 1238 // See if we can handle this address. 1239 Address Addr; 1240 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1241 return false; 1242 1243 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1244 return false; 1245 return true; 1246 } 1247 1248 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1249 switch (Pred) { 1250 // Needs two compares... 1251 case CmpInst::FCMP_ONE: 1252 case CmpInst::FCMP_UEQ: 1253 default: 1254 // AL is our "false" for now. The other two need more compares. 1255 return ARMCC::AL; 1256 case CmpInst::ICMP_EQ: 1257 case CmpInst::FCMP_OEQ: 1258 return ARMCC::EQ; 1259 case CmpInst::ICMP_SGT: 1260 case CmpInst::FCMP_OGT: 1261 return ARMCC::GT; 1262 case CmpInst::ICMP_SGE: 1263 case CmpInst::FCMP_OGE: 1264 return ARMCC::GE; 1265 case CmpInst::ICMP_UGT: 1266 case CmpInst::FCMP_UGT: 1267 return ARMCC::HI; 1268 case CmpInst::FCMP_OLT: 1269 return ARMCC::MI; 1270 case CmpInst::ICMP_ULE: 1271 case CmpInst::FCMP_OLE: 1272 return ARMCC::LS; 1273 case CmpInst::FCMP_ORD: 1274 return ARMCC::VC; 1275 case CmpInst::FCMP_UNO: 1276 return ARMCC::VS; 1277 case CmpInst::FCMP_UGE: 1278 return ARMCC::PL; 1279 case CmpInst::ICMP_SLT: 1280 case CmpInst::FCMP_ULT: 1281 return ARMCC::LT; 1282 case CmpInst::ICMP_SLE: 1283 case CmpInst::FCMP_ULE: 1284 return ARMCC::LE; 1285 case CmpInst::FCMP_UNE: 1286 case CmpInst::ICMP_NE: 1287 return ARMCC::NE; 1288 case CmpInst::ICMP_UGE: 1289 return ARMCC::HS; 1290 case CmpInst::ICMP_ULT: 1291 return ARMCC::LO; 1292 } 1293 } 1294 1295 bool ARMFastISel::SelectBranch(const Instruction *I) { 1296 const BranchInst *BI = cast<BranchInst>(I); 1297 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1298 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1299 1300 // Simple branch support. 1301 1302 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1303 // behavior. 1304 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1305 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1306 1307 // Get the compare predicate. 1308 // Try to take advantage of fallthrough opportunities. 1309 CmpInst::Predicate Predicate = CI->getPredicate(); 1310 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1311 std::swap(TBB, FBB); 1312 Predicate = CmpInst::getInversePredicate(Predicate); 1313 } 1314 1315 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1316 1317 // We may not handle every CC for now. 1318 if (ARMPred == ARMCC::AL) return false; 1319 1320 // Emit the compare. 1321 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1322 return false; 1323 1324 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1325 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1326 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1327 FastEmitBranch(FBB, DL); 1328 FuncInfo.MBB->addSuccessor(TBB); 1329 return true; 1330 } 1331 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1332 MVT SourceVT; 1333 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1334 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1335 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1336 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1337 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1338 TII.get(TstOpc)) 1339 .addReg(OpReg).addImm(1)); 1340 1341 unsigned CCMode = ARMCC::NE; 1342 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1343 std::swap(TBB, FBB); 1344 CCMode = ARMCC::EQ; 1345 } 1346 1347 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1349 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1350 1351 FastEmitBranch(FBB, DL); 1352 FuncInfo.MBB->addSuccessor(TBB); 1353 return true; 1354 } 1355 } else if (const ConstantInt *CI = 1356 dyn_cast<ConstantInt>(BI->getCondition())) { 1357 uint64_t Imm = CI->getZExtValue(); 1358 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1359 FastEmitBranch(Target, DL); 1360 return true; 1361 } 1362 1363 unsigned CmpReg = getRegForValue(BI->getCondition()); 1364 if (CmpReg == 0) return false; 1365 1366 // We've been divorced from our compare! Our block was split, and 1367 // now our compare lives in a predecessor block. We musn't 1368 // re-compare here, as the children of the compare aren't guaranteed 1369 // live across the block boundary (we *could* check for this). 1370 // Regardless, the compare has been done in the predecessor block, 1371 // and it left a value for us in a virtual register. Ergo, we test 1372 // the one-bit value left in the virtual register. 1373 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1375 .addReg(CmpReg).addImm(1)); 1376 1377 unsigned CCMode = ARMCC::NE; 1378 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1379 std::swap(TBB, FBB); 1380 CCMode = ARMCC::EQ; 1381 } 1382 1383 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1384 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1385 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1386 FastEmitBranch(FBB, DL); 1387 FuncInfo.MBB->addSuccessor(TBB); 1388 return true; 1389 } 1390 1391 bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1392 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1393 if (AddrReg == 0) return false; 1394 1395 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1397 .addReg(AddrReg)); 1398 1399 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1400 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) 1401 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); 1402 1403 return true; 1404 } 1405 1406 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1407 bool isZExt) { 1408 Type *Ty = Src1Value->getType(); 1409 EVT SrcEVT = TLI.getValueType(Ty, true); 1410 if (!SrcEVT.isSimple()) return false; 1411 MVT SrcVT = SrcEVT.getSimpleVT(); 1412 1413 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1414 if (isFloat && !Subtarget->hasVFP2()) 1415 return false; 1416 1417 // Check to see if the 2nd operand is a constant that we can encode directly 1418 // in the compare. 1419 int Imm = 0; 1420 bool UseImm = false; 1421 bool isNegativeImm = false; 1422 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1423 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1424 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1425 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1426 SrcVT == MVT::i1) { 1427 const APInt &CIVal = ConstInt->getValue(); 1428 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1429 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1430 // then a cmn, because there is no way to represent 2147483648 as a 1431 // signed 32-bit int. 1432 if (Imm < 0 && Imm != (int)0x80000000) { 1433 isNegativeImm = true; 1434 Imm = -Imm; 1435 } 1436 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1437 (ARM_AM::getSOImmVal(Imm) != -1); 1438 } 1439 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1440 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1441 if (ConstFP->isZero() && !ConstFP->isNegative()) 1442 UseImm = true; 1443 } 1444 1445 unsigned CmpOpc; 1446 bool isICmp = true; 1447 bool needsExt = false; 1448 switch (SrcVT.SimpleTy) { 1449 default: return false; 1450 // TODO: Verify compares. 1451 case MVT::f32: 1452 isICmp = false; 1453 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1454 break; 1455 case MVT::f64: 1456 isICmp = false; 1457 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1458 break; 1459 case MVT::i1: 1460 case MVT::i8: 1461 case MVT::i16: 1462 needsExt = true; 1463 // Intentional fall-through. 1464 case MVT::i32: 1465 if (isThumb2) { 1466 if (!UseImm) 1467 CmpOpc = ARM::t2CMPrr; 1468 else 1469 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1470 } else { 1471 if (!UseImm) 1472 CmpOpc = ARM::CMPrr; 1473 else 1474 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1475 } 1476 break; 1477 } 1478 1479 unsigned SrcReg1 = getRegForValue(Src1Value); 1480 if (SrcReg1 == 0) return false; 1481 1482 unsigned SrcReg2 = 0; 1483 if (!UseImm) { 1484 SrcReg2 = getRegForValue(Src2Value); 1485 if (SrcReg2 == 0) return false; 1486 } 1487 1488 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1489 if (needsExt) { 1490 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1491 if (SrcReg1 == 0) return false; 1492 if (!UseImm) { 1493 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1494 if (SrcReg2 == 0) return false; 1495 } 1496 } 1497 1498 if (!UseImm) { 1499 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1500 TII.get(CmpOpc)) 1501 .addReg(SrcReg1).addReg(SrcReg2)); 1502 } else { 1503 MachineInstrBuilder MIB; 1504 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1505 .addReg(SrcReg1); 1506 1507 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1508 if (isICmp) 1509 MIB.addImm(Imm); 1510 AddOptionalDefs(MIB); 1511 } 1512 1513 // For floating point we need to move the result to a comparison register 1514 // that we can then use for branches. 1515 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1516 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1517 TII.get(ARM::FMSTAT))); 1518 return true; 1519 } 1520 1521 bool ARMFastISel::SelectCmp(const Instruction *I) { 1522 const CmpInst *CI = cast<CmpInst>(I); 1523 1524 // Get the compare predicate. 1525 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1526 1527 // We may not handle every CC for now. 1528 if (ARMPred == ARMCC::AL) return false; 1529 1530 // Emit the compare. 1531 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1532 return false; 1533 1534 // Now set a register based on the comparison. Explicitly set the predicates 1535 // here. 1536 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1537 const TargetRegisterClass *RC = isThumb2 ? 1538 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1539 (const TargetRegisterClass*)&ARM::GPRRegClass; 1540 unsigned DestReg = createResultReg(RC); 1541 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1542 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1543 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1544 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1545 .addReg(ZeroReg).addImm(1) 1546 .addImm(ARMPred).addReg(ARM::CPSR); 1547 1548 UpdateValueMap(I, DestReg); 1549 return true; 1550 } 1551 1552 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1553 // Make sure we have VFP and that we're extending float to double. 1554 if (!Subtarget->hasVFP2()) return false; 1555 1556 Value *V = I->getOperand(0); 1557 if (!I->getType()->isDoubleTy() || 1558 !V->getType()->isFloatTy()) return false; 1559 1560 unsigned Op = getRegForValue(V); 1561 if (Op == 0) return false; 1562 1563 unsigned Result = createResultReg(&ARM::DPRRegClass); 1564 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1565 TII.get(ARM::VCVTDS), Result) 1566 .addReg(Op)); 1567 UpdateValueMap(I, Result); 1568 return true; 1569 } 1570 1571 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1572 // Make sure we have VFP and that we're truncating double to float. 1573 if (!Subtarget->hasVFP2()) return false; 1574 1575 Value *V = I->getOperand(0); 1576 if (!(I->getType()->isFloatTy() && 1577 V->getType()->isDoubleTy())) return false; 1578 1579 unsigned Op = getRegForValue(V); 1580 if (Op == 0) return false; 1581 1582 unsigned Result = createResultReg(&ARM::SPRRegClass); 1583 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1584 TII.get(ARM::VCVTSD), Result) 1585 .addReg(Op)); 1586 UpdateValueMap(I, Result); 1587 return true; 1588 } 1589 1590 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1591 // Make sure we have VFP. 1592 if (!Subtarget->hasVFP2()) return false; 1593 1594 MVT DstVT; 1595 Type *Ty = I->getType(); 1596 if (!isTypeLegal(Ty, DstVT)) 1597 return false; 1598 1599 Value *Src = I->getOperand(0); 1600 EVT SrcEVT = TLI.getValueType(Src->getType(), true); 1601 if (!SrcEVT.isSimple()) 1602 return false; 1603 MVT SrcVT = SrcEVT.getSimpleVT(); 1604 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1605 return false; 1606 1607 unsigned SrcReg = getRegForValue(Src); 1608 if (SrcReg == 0) return false; 1609 1610 // Handle sign-extension. 1611 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1612 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1613 /*isZExt*/!isSigned); 1614 if (SrcReg == 0) return false; 1615 } 1616 1617 // The conversion routine works on fp-reg to fp-reg and the operand above 1618 // was an integer, move it to the fp registers if possible. 1619 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1620 if (FP == 0) return false; 1621 1622 unsigned Opc; 1623 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1624 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1625 else return false; 1626 1627 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1628 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1629 ResultReg) 1630 .addReg(FP)); 1631 UpdateValueMap(I, ResultReg); 1632 return true; 1633 } 1634 1635 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1636 // Make sure we have VFP. 1637 if (!Subtarget->hasVFP2()) return false; 1638 1639 MVT DstVT; 1640 Type *RetTy = I->getType(); 1641 if (!isTypeLegal(RetTy, DstVT)) 1642 return false; 1643 1644 unsigned Op = getRegForValue(I->getOperand(0)); 1645 if (Op == 0) return false; 1646 1647 unsigned Opc; 1648 Type *OpTy = I->getOperand(0)->getType(); 1649 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1650 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1651 else return false; 1652 1653 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1654 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1655 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1656 ResultReg) 1657 .addReg(Op)); 1658 1659 // This result needs to be in an integer register, but the conversion only 1660 // takes place in fp-regs. 1661 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1662 if (IntReg == 0) return false; 1663 1664 UpdateValueMap(I, IntReg); 1665 return true; 1666 } 1667 1668 bool ARMFastISel::SelectSelect(const Instruction *I) { 1669 MVT VT; 1670 if (!isTypeLegal(I->getType(), VT)) 1671 return false; 1672 1673 // Things need to be register sized for register moves. 1674 if (VT != MVT::i32) return false; 1675 1676 unsigned CondReg = getRegForValue(I->getOperand(0)); 1677 if (CondReg == 0) return false; 1678 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1679 if (Op1Reg == 0) return false; 1680 1681 // Check to see if we can use an immediate in the conditional move. 1682 int Imm = 0; 1683 bool UseImm = false; 1684 bool isNegativeImm = false; 1685 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1686 assert (VT == MVT::i32 && "Expecting an i32."); 1687 Imm = (int)ConstInt->getValue().getZExtValue(); 1688 if (Imm < 0) { 1689 isNegativeImm = true; 1690 Imm = ~Imm; 1691 } 1692 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1693 (ARM_AM::getSOImmVal(Imm) != -1); 1694 } 1695 1696 unsigned Op2Reg = 0; 1697 if (!UseImm) { 1698 Op2Reg = getRegForValue(I->getOperand(2)); 1699 if (Op2Reg == 0) return false; 1700 } 1701 1702 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1703 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1704 .addReg(CondReg).addImm(0)); 1705 1706 unsigned MovCCOpc; 1707 const TargetRegisterClass *RC; 1708 if (!UseImm) { 1709 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1710 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1711 } else { 1712 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1713 if (!isNegativeImm) 1714 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1715 else 1716 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1717 } 1718 unsigned ResultReg = createResultReg(RC); 1719 if (!UseImm) 1720 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1721 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1722 else 1723 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1724 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1725 UpdateValueMap(I, ResultReg); 1726 return true; 1727 } 1728 1729 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1730 MVT VT; 1731 Type *Ty = I->getType(); 1732 if (!isTypeLegal(Ty, VT)) 1733 return false; 1734 1735 // If we have integer div support we should have selected this automagically. 1736 // In case we have a real miss go ahead and return false and we'll pick 1737 // it up later. 1738 if (Subtarget->hasDivide()) return false; 1739 1740 // Otherwise emit a libcall. 1741 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1742 if (VT == MVT::i8) 1743 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1744 else if (VT == MVT::i16) 1745 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1746 else if (VT == MVT::i32) 1747 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1748 else if (VT == MVT::i64) 1749 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1750 else if (VT == MVT::i128) 1751 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1752 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1753 1754 return ARMEmitLibcall(I, LC); 1755 } 1756 1757 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1758 MVT VT; 1759 Type *Ty = I->getType(); 1760 if (!isTypeLegal(Ty, VT)) 1761 return false; 1762 1763 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1764 if (VT == MVT::i8) 1765 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1766 else if (VT == MVT::i16) 1767 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1768 else if (VT == MVT::i32) 1769 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1770 else if (VT == MVT::i64) 1771 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1772 else if (VT == MVT::i128) 1773 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1774 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1775 1776 return ARMEmitLibcall(I, LC); 1777 } 1778 1779 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1780 EVT DestVT = TLI.getValueType(I->getType(), true); 1781 1782 // We can get here in the case when we have a binary operation on a non-legal 1783 // type and the target independent selector doesn't know how to handle it. 1784 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1785 return false; 1786 1787 unsigned Opc; 1788 switch (ISDOpcode) { 1789 default: return false; 1790 case ISD::ADD: 1791 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1792 break; 1793 case ISD::OR: 1794 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1795 break; 1796 case ISD::SUB: 1797 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1798 break; 1799 } 1800 1801 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1802 if (SrcReg1 == 0) return false; 1803 1804 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1805 // in the instruction, rather then materializing the value in a register. 1806 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1807 if (SrcReg2 == 0) return false; 1808 1809 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 1810 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1811 TII.get(Opc), ResultReg) 1812 .addReg(SrcReg1).addReg(SrcReg2)); 1813 UpdateValueMap(I, ResultReg); 1814 return true; 1815 } 1816 1817 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1818 EVT FPVT = TLI.getValueType(I->getType(), true); 1819 if (!FPVT.isSimple()) return false; 1820 MVT VT = FPVT.getSimpleVT(); 1821 1822 // We can get here in the case when we want to use NEON for our fp 1823 // operations, but can't figure out how to. Just use the vfp instructions 1824 // if we have them. 1825 // FIXME: It'd be nice to use NEON instructions. 1826 Type *Ty = I->getType(); 1827 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1828 if (isFloat && !Subtarget->hasVFP2()) 1829 return false; 1830 1831 unsigned Opc; 1832 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1833 switch (ISDOpcode) { 1834 default: return false; 1835 case ISD::FADD: 1836 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1837 break; 1838 case ISD::FSUB: 1839 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1840 break; 1841 case ISD::FMUL: 1842 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1843 break; 1844 } 1845 unsigned Op1 = getRegForValue(I->getOperand(0)); 1846 if (Op1 == 0) return false; 1847 1848 unsigned Op2 = getRegForValue(I->getOperand(1)); 1849 if (Op2 == 0) return false; 1850 1851 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1852 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1853 TII.get(Opc), ResultReg) 1854 .addReg(Op1).addReg(Op2)); 1855 UpdateValueMap(I, ResultReg); 1856 return true; 1857 } 1858 1859 // Call Handling Code 1860 1861 // This is largely taken directly from CCAssignFnForNode 1862 // TODO: We may not support all of this. 1863 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1864 bool Return, 1865 bool isVarArg) { 1866 switch (CC) { 1867 default: 1868 llvm_unreachable("Unsupported calling convention"); 1869 case CallingConv::Fast: 1870 if (Subtarget->hasVFP2() && !isVarArg) { 1871 if (!Subtarget->isAAPCS_ABI()) 1872 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1873 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1874 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1875 } 1876 // Fallthrough 1877 case CallingConv::C: 1878 // Use target triple & subtarget features to do actual dispatch. 1879 if (Subtarget->isAAPCS_ABI()) { 1880 if (Subtarget->hasVFP2() && 1881 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1882 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1883 else 1884 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1885 } else 1886 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1887 case CallingConv::ARM_AAPCS_VFP: 1888 if (!isVarArg) 1889 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1890 // Fall through to soft float variant, variadic functions don't 1891 // use hard floating point ABI. 1892 case CallingConv::ARM_AAPCS: 1893 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1894 case CallingConv::ARM_APCS: 1895 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1896 case CallingConv::GHC: 1897 if (Return) 1898 llvm_unreachable("Can't return in GHC call convention"); 1899 else 1900 return CC_ARM_APCS_GHC; 1901 } 1902 } 1903 1904 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1905 SmallVectorImpl<unsigned> &ArgRegs, 1906 SmallVectorImpl<MVT> &ArgVTs, 1907 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1908 SmallVectorImpl<unsigned> &RegArgs, 1909 CallingConv::ID CC, 1910 unsigned &NumBytes, 1911 bool isVarArg) { 1912 SmallVector<CCValAssign, 16> ArgLocs; 1913 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1914 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1915 CCAssignFnForCall(CC, false, isVarArg)); 1916 1917 // Check that we can handle all of the arguments. If we can't, then bail out 1918 // now before we add code to the MBB. 1919 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1920 CCValAssign &VA = ArgLocs[i]; 1921 MVT ArgVT = ArgVTs[VA.getValNo()]; 1922 1923 // We don't handle NEON/vector parameters yet. 1924 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1925 return false; 1926 1927 // Now copy/store arg to correct locations. 1928 if (VA.isRegLoc() && !VA.needsCustom()) { 1929 continue; 1930 } else if (VA.needsCustom()) { 1931 // TODO: We need custom lowering for vector (v2f64) args. 1932 if (VA.getLocVT() != MVT::f64 || 1933 // TODO: Only handle register args for now. 1934 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1935 return false; 1936 } else { 1937 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1938 default: 1939 return false; 1940 case MVT::i1: 1941 case MVT::i8: 1942 case MVT::i16: 1943 case MVT::i32: 1944 break; 1945 case MVT::f32: 1946 if (!Subtarget->hasVFP2()) 1947 return false; 1948 break; 1949 case MVT::f64: 1950 if (!Subtarget->hasVFP2()) 1951 return false; 1952 break; 1953 } 1954 } 1955 } 1956 1957 // At the point, we are able to handle the call's arguments in fast isel. 1958 1959 // Get a count of how many bytes are to be pushed on the stack. 1960 NumBytes = CCInfo.getNextStackOffset(); 1961 1962 // Issue CALLSEQ_START 1963 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1964 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1965 TII.get(AdjStackDown)) 1966 .addImm(NumBytes)); 1967 1968 // Process the args. 1969 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1970 CCValAssign &VA = ArgLocs[i]; 1971 unsigned Arg = ArgRegs[VA.getValNo()]; 1972 MVT ArgVT = ArgVTs[VA.getValNo()]; 1973 1974 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1975 "We don't handle NEON/vector parameters yet."); 1976 1977 // Handle arg promotion, etc. 1978 switch (VA.getLocInfo()) { 1979 case CCValAssign::Full: break; 1980 case CCValAssign::SExt: { 1981 MVT DestVT = VA.getLocVT(); 1982 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1983 assert (Arg != 0 && "Failed to emit a sext"); 1984 ArgVT = DestVT; 1985 break; 1986 } 1987 case CCValAssign::AExt: 1988 // Intentional fall-through. Handle AExt and ZExt. 1989 case CCValAssign::ZExt: { 1990 MVT DestVT = VA.getLocVT(); 1991 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1992 assert (Arg != 0 && "Failed to emit a zext"); 1993 ArgVT = DestVT; 1994 break; 1995 } 1996 case CCValAssign::BCvt: { 1997 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1998 /*TODO: Kill=*/false); 1999 assert(BC != 0 && "Failed to emit a bitcast!"); 2000 Arg = BC; 2001 ArgVT = VA.getLocVT(); 2002 break; 2003 } 2004 default: llvm_unreachable("Unknown arg promotion!"); 2005 } 2006 2007 // Now copy/store arg to correct locations. 2008 if (VA.isRegLoc() && !VA.needsCustom()) { 2009 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2010 VA.getLocReg()) 2011 .addReg(Arg); 2012 RegArgs.push_back(VA.getLocReg()); 2013 } else if (VA.needsCustom()) { 2014 // TODO: We need custom lowering for vector (v2f64) args. 2015 assert(VA.getLocVT() == MVT::f64 && 2016 "Custom lowering for v2f64 args not available"); 2017 2018 CCValAssign &NextVA = ArgLocs[++i]; 2019 2020 assert(VA.isRegLoc() && NextVA.isRegLoc() && 2021 "We only handle register args!"); 2022 2023 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2024 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2025 .addReg(NextVA.getLocReg(), RegState::Define) 2026 .addReg(Arg)); 2027 RegArgs.push_back(VA.getLocReg()); 2028 RegArgs.push_back(NextVA.getLocReg()); 2029 } else { 2030 assert(VA.isMemLoc()); 2031 // Need to store on the stack. 2032 Address Addr; 2033 Addr.BaseType = Address::RegBase; 2034 Addr.Base.Reg = ARM::SP; 2035 Addr.Offset = VA.getLocMemOffset(); 2036 2037 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2038 assert(EmitRet && "Could not emit a store for argument!"); 2039 } 2040 } 2041 2042 return true; 2043 } 2044 2045 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2046 const Instruction *I, CallingConv::ID CC, 2047 unsigned &NumBytes, bool isVarArg) { 2048 // Issue CALLSEQ_END 2049 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2050 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2051 TII.get(AdjStackUp)) 2052 .addImm(NumBytes).addImm(0)); 2053 2054 // Now the return value. 2055 if (RetVT != MVT::isVoid) { 2056 SmallVector<CCValAssign, 16> RVLocs; 2057 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2058 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2059 2060 // Copy all of the result registers out of their specified physreg. 2061 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2062 // For this move we copy into two registers and then move into the 2063 // double fp reg we want. 2064 MVT DestVT = RVLocs[0].getValVT(); 2065 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2066 unsigned ResultReg = createResultReg(DstRC); 2067 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2068 TII.get(ARM::VMOVDRR), ResultReg) 2069 .addReg(RVLocs[0].getLocReg()) 2070 .addReg(RVLocs[1].getLocReg())); 2071 2072 UsedRegs.push_back(RVLocs[0].getLocReg()); 2073 UsedRegs.push_back(RVLocs[1].getLocReg()); 2074 2075 // Finally update the result. 2076 UpdateValueMap(I, ResultReg); 2077 } else { 2078 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2079 MVT CopyVT = RVLocs[0].getValVT(); 2080 2081 // Special handling for extended integers. 2082 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2083 CopyVT = MVT::i32; 2084 2085 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2086 2087 unsigned ResultReg = createResultReg(DstRC); 2088 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2089 ResultReg).addReg(RVLocs[0].getLocReg()); 2090 UsedRegs.push_back(RVLocs[0].getLocReg()); 2091 2092 // Finally update the result. 2093 UpdateValueMap(I, ResultReg); 2094 } 2095 } 2096 2097 return true; 2098 } 2099 2100 bool ARMFastISel::SelectRet(const Instruction *I) { 2101 const ReturnInst *Ret = cast<ReturnInst>(I); 2102 const Function &F = *I->getParent()->getParent(); 2103 2104 if (!FuncInfo.CanLowerReturn) 2105 return false; 2106 2107 // Build a list of return value registers. 2108 SmallVector<unsigned, 4> RetRegs; 2109 2110 CallingConv::ID CC = F.getCallingConv(); 2111 if (Ret->getNumOperands() > 0) { 2112 SmallVector<ISD::OutputArg, 4> Outs; 2113 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); 2114 2115 // Analyze operands of the call, assigning locations to each operand. 2116 SmallVector<CCValAssign, 16> ValLocs; 2117 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2118 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2119 F.isVarArg())); 2120 2121 const Value *RV = Ret->getOperand(0); 2122 unsigned Reg = getRegForValue(RV); 2123 if (Reg == 0) 2124 return false; 2125 2126 // Only handle a single return value for now. 2127 if (ValLocs.size() != 1) 2128 return false; 2129 2130 CCValAssign &VA = ValLocs[0]; 2131 2132 // Don't bother handling odd stuff for now. 2133 if (VA.getLocInfo() != CCValAssign::Full) 2134 return false; 2135 // Only handle register returns for now. 2136 if (!VA.isRegLoc()) 2137 return false; 2138 2139 unsigned SrcReg = Reg + VA.getValNo(); 2140 EVT RVEVT = TLI.getValueType(RV->getType()); 2141 if (!RVEVT.isSimple()) return false; 2142 MVT RVVT = RVEVT.getSimpleVT(); 2143 MVT DestVT = VA.getValVT(); 2144 // Special handling for extended integers. 2145 if (RVVT != DestVT) { 2146 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2147 return false; 2148 2149 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2150 2151 // Perform extension if flagged as either zext or sext. Otherwise, do 2152 // nothing. 2153 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2154 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2155 if (SrcReg == 0) return false; 2156 } 2157 } 2158 2159 // Make the copy. 2160 unsigned DstReg = VA.getLocReg(); 2161 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2162 // Avoid a cross-class copy. This is very unlikely. 2163 if (!SrcRC->contains(DstReg)) 2164 return false; 2165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2166 DstReg).addReg(SrcReg); 2167 2168 // Add register to return instruction. 2169 RetRegs.push_back(VA.getLocReg()); 2170 } 2171 2172 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2173 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2174 TII.get(RetOpc)); 2175 AddOptionalDefs(MIB); 2176 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 2177 MIB.addReg(RetRegs[i], RegState::Implicit); 2178 return true; 2179 } 2180 2181 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2182 if (UseReg) 2183 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2184 else 2185 return isThumb2 ? ARM::tBL : ARM::BL; 2186 } 2187 2188 unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2189 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2190 GlobalValue::ExternalLinkage, 0, Name); 2191 EVT LCREVT = TLI.getValueType(GV->getType()); 2192 if (!LCREVT.isSimple()) return 0; 2193 return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2194 } 2195 2196 // A quick function that will emit a call for a named libcall in F with the 2197 // vector of passed arguments for the Instruction in I. We can assume that we 2198 // can emit a call for any libcall we can produce. This is an abridged version 2199 // of the full call infrastructure since we won't need to worry about things 2200 // like computed function pointers or strange arguments at call sites. 2201 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2202 // with X86. 2203 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2204 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2205 2206 // Handle *simple* calls for now. 2207 Type *RetTy = I->getType(); 2208 MVT RetVT; 2209 if (RetTy->isVoidTy()) 2210 RetVT = MVT::isVoid; 2211 else if (!isTypeLegal(RetTy, RetVT)) 2212 return false; 2213 2214 // Can't handle non-double multi-reg retvals. 2215 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2216 SmallVector<CCValAssign, 16> RVLocs; 2217 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2218 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2219 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2220 return false; 2221 } 2222 2223 // Set up the argument vectors. 2224 SmallVector<Value*, 8> Args; 2225 SmallVector<unsigned, 8> ArgRegs; 2226 SmallVector<MVT, 8> ArgVTs; 2227 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2228 Args.reserve(I->getNumOperands()); 2229 ArgRegs.reserve(I->getNumOperands()); 2230 ArgVTs.reserve(I->getNumOperands()); 2231 ArgFlags.reserve(I->getNumOperands()); 2232 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2233 Value *Op = I->getOperand(i); 2234 unsigned Arg = getRegForValue(Op); 2235 if (Arg == 0) return false; 2236 2237 Type *ArgTy = Op->getType(); 2238 MVT ArgVT; 2239 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2240 2241 ISD::ArgFlagsTy Flags; 2242 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2243 Flags.setOrigAlign(OriginalAlignment); 2244 2245 Args.push_back(Op); 2246 ArgRegs.push_back(Arg); 2247 ArgVTs.push_back(ArgVT); 2248 ArgFlags.push_back(Flags); 2249 } 2250 2251 // Handle the arguments now that we've gotten them. 2252 SmallVector<unsigned, 4> RegArgs; 2253 unsigned NumBytes; 2254 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2255 RegArgs, CC, NumBytes, false)) 2256 return false; 2257 2258 unsigned CalleeReg = 0; 2259 if (EnableARMLongCalls) { 2260 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2261 if (CalleeReg == 0) return false; 2262 } 2263 2264 // Issue the call. 2265 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2266 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2267 DL, TII.get(CallOpc)); 2268 // BL / BLX don't take a predicate, but tBL / tBLX do. 2269 if (isThumb2) 2270 AddDefaultPred(MIB); 2271 if (EnableARMLongCalls) 2272 MIB.addReg(CalleeReg); 2273 else 2274 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2275 2276 // Add implicit physical register uses to the call. 2277 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2278 MIB.addReg(RegArgs[i], RegState::Implicit); 2279 2280 // Add a register mask with the call-preserved registers. 2281 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2282 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2283 2284 // Finish off the call including any return values. 2285 SmallVector<unsigned, 4> UsedRegs; 2286 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2287 2288 // Set all unused physreg defs as dead. 2289 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2290 2291 return true; 2292 } 2293 2294 bool ARMFastISel::SelectCall(const Instruction *I, 2295 const char *IntrMemName = 0) { 2296 const CallInst *CI = cast<CallInst>(I); 2297 const Value *Callee = CI->getCalledValue(); 2298 2299 // Can't handle inline asm. 2300 if (isa<InlineAsm>(Callee)) return false; 2301 2302 // Allow SelectionDAG isel to handle tail calls. 2303 if (CI->isTailCall()) return false; 2304 2305 // Check the calling convention. 2306 ImmutableCallSite CS(CI); 2307 CallingConv::ID CC = CS.getCallingConv(); 2308 2309 // TODO: Avoid some calling conventions? 2310 2311 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2312 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2313 bool isVarArg = FTy->isVarArg(); 2314 2315 // Handle *simple* calls for now. 2316 Type *RetTy = I->getType(); 2317 MVT RetVT; 2318 if (RetTy->isVoidTy()) 2319 RetVT = MVT::isVoid; 2320 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2321 RetVT != MVT::i8 && RetVT != MVT::i1) 2322 return false; 2323 2324 // Can't handle non-double multi-reg retvals. 2325 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2326 RetVT != MVT::i16 && RetVT != MVT::i32) { 2327 SmallVector<CCValAssign, 16> RVLocs; 2328 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2329 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2330 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2331 return false; 2332 } 2333 2334 // Set up the argument vectors. 2335 SmallVector<Value*, 8> Args; 2336 SmallVector<unsigned, 8> ArgRegs; 2337 SmallVector<MVT, 8> ArgVTs; 2338 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2339 unsigned arg_size = CS.arg_size(); 2340 Args.reserve(arg_size); 2341 ArgRegs.reserve(arg_size); 2342 ArgVTs.reserve(arg_size); 2343 ArgFlags.reserve(arg_size); 2344 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2345 i != e; ++i) { 2346 // If we're lowering a memory intrinsic instead of a regular call, skip the 2347 // last two arguments, which shouldn't be passed to the underlying function. 2348 if (IntrMemName && e-i <= 2) 2349 break; 2350 2351 ISD::ArgFlagsTy Flags; 2352 unsigned AttrInd = i - CS.arg_begin() + 1; 2353 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2354 Flags.setSExt(); 2355 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2356 Flags.setZExt(); 2357 2358 // FIXME: Only handle *easy* calls for now. 2359 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2360 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2361 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2362 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2363 return false; 2364 2365 Type *ArgTy = (*i)->getType(); 2366 MVT ArgVT; 2367 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2368 ArgVT != MVT::i1) 2369 return false; 2370 2371 unsigned Arg = getRegForValue(*i); 2372 if (Arg == 0) 2373 return false; 2374 2375 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2376 Flags.setOrigAlign(OriginalAlignment); 2377 2378 Args.push_back(*i); 2379 ArgRegs.push_back(Arg); 2380 ArgVTs.push_back(ArgVT); 2381 ArgFlags.push_back(Flags); 2382 } 2383 2384 // Handle the arguments now that we've gotten them. 2385 SmallVector<unsigned, 4> RegArgs; 2386 unsigned NumBytes; 2387 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2388 RegArgs, CC, NumBytes, isVarArg)) 2389 return false; 2390 2391 bool UseReg = false; 2392 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2393 if (!GV || EnableARMLongCalls) UseReg = true; 2394 2395 unsigned CalleeReg = 0; 2396 if (UseReg) { 2397 if (IntrMemName) 2398 CalleeReg = getLibcallReg(IntrMemName); 2399 else 2400 CalleeReg = getRegForValue(Callee); 2401 2402 if (CalleeReg == 0) return false; 2403 } 2404 2405 // Issue the call. 2406 unsigned CallOpc = ARMSelectCallOp(UseReg); 2407 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2408 DL, TII.get(CallOpc)); 2409 2410 // ARM calls don't take a predicate, but tBL / tBLX do. 2411 if(isThumb2) 2412 AddDefaultPred(MIB); 2413 if (UseReg) 2414 MIB.addReg(CalleeReg); 2415 else if (!IntrMemName) 2416 MIB.addGlobalAddress(GV, 0, 0); 2417 else 2418 MIB.addExternalSymbol(IntrMemName, 0); 2419 2420 // Add implicit physical register uses to the call. 2421 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2422 MIB.addReg(RegArgs[i], RegState::Implicit); 2423 2424 // Add a register mask with the call-preserved registers. 2425 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2426 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2427 2428 // Finish off the call including any return values. 2429 SmallVector<unsigned, 4> UsedRegs; 2430 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2431 return false; 2432 2433 // Set all unused physreg defs as dead. 2434 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2435 2436 return true; 2437 } 2438 2439 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2440 return Len <= 16; 2441 } 2442 2443 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2444 uint64_t Len, unsigned Alignment) { 2445 // Make sure we don't bloat code by inlining very large memcpy's. 2446 if (!ARMIsMemCpySmall(Len)) 2447 return false; 2448 2449 while (Len) { 2450 MVT VT; 2451 if (!Alignment || Alignment >= 4) { 2452 if (Len >= 4) 2453 VT = MVT::i32; 2454 else if (Len >= 2) 2455 VT = MVT::i16; 2456 else { 2457 assert (Len == 1 && "Expected a length of 1!"); 2458 VT = MVT::i8; 2459 } 2460 } else { 2461 // Bound based on alignment. 2462 if (Len >= 2 && Alignment == 2) 2463 VT = MVT::i16; 2464 else { 2465 VT = MVT::i8; 2466 } 2467 } 2468 2469 bool RV; 2470 unsigned ResultReg; 2471 RV = ARMEmitLoad(VT, ResultReg, Src); 2472 assert (RV == true && "Should be able to handle this load."); 2473 RV = ARMEmitStore(VT, ResultReg, Dest); 2474 assert (RV == true && "Should be able to handle this store."); 2475 (void)RV; 2476 2477 unsigned Size = VT.getSizeInBits()/8; 2478 Len -= Size; 2479 Dest.Offset += Size; 2480 Src.Offset += Size; 2481 } 2482 2483 return true; 2484 } 2485 2486 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2487 // FIXME: Handle more intrinsics. 2488 switch (I.getIntrinsicID()) { 2489 default: return false; 2490 case Intrinsic::frameaddress: { 2491 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2492 MFI->setFrameAddressIsTaken(true); 2493 2494 unsigned LdrOpc; 2495 const TargetRegisterClass *RC; 2496 if (isThumb2) { 2497 LdrOpc = ARM::t2LDRi12; 2498 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2499 } else { 2500 LdrOpc = ARM::LDRi12; 2501 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2502 } 2503 2504 const ARMBaseRegisterInfo *RegInfo = 2505 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2506 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2507 unsigned SrcReg = FramePtr; 2508 2509 // Recursively load frame address 2510 // ldr r0 [fp] 2511 // ldr r0 [r0] 2512 // ldr r0 [r0] 2513 // ... 2514 unsigned DestReg; 2515 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2516 while (Depth--) { 2517 DestReg = createResultReg(RC); 2518 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2519 TII.get(LdrOpc), DestReg) 2520 .addReg(SrcReg).addImm(0)); 2521 SrcReg = DestReg; 2522 } 2523 UpdateValueMap(&I, SrcReg); 2524 return true; 2525 } 2526 case Intrinsic::memcpy: 2527 case Intrinsic::memmove: { 2528 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2529 // Don't handle volatile. 2530 if (MTI.isVolatile()) 2531 return false; 2532 2533 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2534 // we would emit dead code because we don't currently handle memmoves. 2535 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2536 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2537 // Small memcpy's are common enough that we want to do them without a call 2538 // if possible. 2539 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2540 if (ARMIsMemCpySmall(Len)) { 2541 Address Dest, Src; 2542 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2543 !ARMComputeAddress(MTI.getRawSource(), Src)) 2544 return false; 2545 unsigned Alignment = MTI.getAlignment(); 2546 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2547 return true; 2548 } 2549 } 2550 2551 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2552 return false; 2553 2554 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2555 return false; 2556 2557 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2558 return SelectCall(&I, IntrMemName); 2559 } 2560 case Intrinsic::memset: { 2561 const MemSetInst &MSI = cast<MemSetInst>(I); 2562 // Don't handle volatile. 2563 if (MSI.isVolatile()) 2564 return false; 2565 2566 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2567 return false; 2568 2569 if (MSI.getDestAddressSpace() > 255) 2570 return false; 2571 2572 return SelectCall(&I, "memset"); 2573 } 2574 case Intrinsic::trap: { 2575 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get( 2576 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP)); 2577 return true; 2578 } 2579 } 2580 } 2581 2582 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2583 // The high bits for a type smaller than the register size are assumed to be 2584 // undefined. 2585 Value *Op = I->getOperand(0); 2586 2587 EVT SrcVT, DestVT; 2588 SrcVT = TLI.getValueType(Op->getType(), true); 2589 DestVT = TLI.getValueType(I->getType(), true); 2590 2591 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2592 return false; 2593 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2594 return false; 2595 2596 unsigned SrcReg = getRegForValue(Op); 2597 if (!SrcReg) return false; 2598 2599 // Because the high bits are undefined, a truncate doesn't generate 2600 // any code. 2601 UpdateValueMap(I, SrcReg); 2602 return true; 2603 } 2604 2605 unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2606 bool isZExt) { 2607 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2608 return 0; 2609 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) 2610 return 0; 2611 2612 // Table of which combinations can be emitted as a single instruction, 2613 // and which will require two. 2614 static const uint8_t isSingleInstrTbl[3][2][2][2] = { 2615 // ARM Thumb 2616 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops 2617 // ext: s z s z s z s z 2618 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, 2619 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, 2620 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } 2621 }; 2622 2623 // Target registers for: 2624 // - For ARM can never be PC. 2625 // - For 16-bit Thumb are restricted to lower 8 registers. 2626 // - For 32-bit Thumb are restricted to non-SP and non-PC. 2627 static const TargetRegisterClass *RCTbl[2][2] = { 2628 // Instructions: Two Single 2629 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, 2630 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } 2631 }; 2632 2633 // Table governing the instruction(s) to be emitted. 2634 static const struct { 2635 // First entry for each of the following is sext, second zext. 2636 uint16_t Opc[2]; 2637 uint8_t Imm[2]; // All instructions have either a shift or a mask. 2638 uint8_t hasS[2]; // Some instructions have an S bit, always set it to 0. 2639 } OpcTbl[2][2][3] = { 2640 { // Two instructions (first is left shift, second is in this table). 2641 { // ARM 2642 /* 1 */ { { ARM::ASRi, ARM::LSRi }, { 31, 31 }, { 1, 1 } }, 2643 /* 8 */ { { ARM::ASRi, ARM::LSRi }, { 24, 24 }, { 1, 1 } }, 2644 /* 16 */ { { ARM::ASRi, ARM::LSRi }, { 16, 16 }, { 1, 1 } } 2645 }, 2646 { // Thumb 2647 /* 1 */ { { ARM::tASRri, ARM::tLSRri }, { 31, 31 }, { 0, 0 } }, 2648 /* 8 */ { { ARM::tASRri, ARM::tLSRri }, { 24, 24 }, { 0, 0 } }, 2649 /* 16 */ { { ARM::tASRri, ARM::tLSRri }, { 16, 16 }, { 0, 0 } } 2650 } 2651 }, 2652 { // Single instruction. 2653 { // ARM 2654 /* 1 */ { { ARM::KILL, ARM::ANDri }, { 0, 1 }, { 0, 1 } }, 2655 /* 8 */ { { ARM::SXTB, ARM::ANDri }, { 0, 255 }, { 0, 1 } }, 2656 /* 16 */ { { ARM::SXTH, ARM::UXTH }, { 0, 0 }, { 0, 0 } } 2657 }, 2658 { // Thumb 2659 /* 1 */ { { ARM::KILL, ARM::t2ANDri }, { 0, 1 }, { 0, 1 } }, 2660 /* 8 */ { { ARM::t2SXTB, ARM::t2ANDri }, { 0, 255 }, { 0, 1 } }, 2661 /* 16 */ { { ARM::t2SXTH, ARM::t2UXTH }, { 0, 0 }, { 0, 0 } } 2662 } 2663 } 2664 }; 2665 2666 unsigned SrcBits = SrcVT.getSizeInBits(); 2667 unsigned DestBits = DestVT.getSizeInBits(); 2668 (void) DestBits; 2669 assert((SrcBits < DestBits) && "can only extend to larger types"); 2670 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && 2671 "other sizes unimplemented"); 2672 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && 2673 "other sizes unimplemented"); 2674 2675 bool hasV6Ops = Subtarget->hasV6Ops(); 2676 unsigned Bitness = countTrailingZeros(SrcBits) >> 1; // {1,8,16}=>{0,1,2} 2677 assert((Bitness < 3) && "sanity-check table bounds"); 2678 2679 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; 2680 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; 2681 unsigned Opc = OpcTbl[isSingleInstr][isThumb2][Bitness].Opc[isZExt]; 2682 assert(ARM::KILL != Opc && "Invalid table entry"); 2683 unsigned Imm = OpcTbl[isSingleInstr][isThumb2][Bitness].Imm[isZExt]; 2684 unsigned hasS = OpcTbl[isSingleInstr][isThumb2][Bitness].hasS[isZExt]; 2685 2686 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). 2687 bool setsCPSR = &ARM::tGPRRegClass == RC; 2688 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::LSLi; 2689 unsigned ResultReg; 2690 2691 // Either one or two instructions are emitted. 2692 // They're always of the form: 2693 // dst = in OP imm 2694 // CPSR is set only by 16-bit Thumb instructions. 2695 // Predicate, if any, is AL. 2696 // S bit, if available, is always 0. 2697 // When two are emitted the first's result will feed as the second's input, 2698 // that value is then dead. 2699 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; 2700 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { 2701 ResultReg = createResultReg(RC); 2702 unsigned Opcode = ((0 == Instr) && !isSingleInstr) ? LSLOpc : Opc; 2703 bool isKill = 1 == Instr; 2704 MachineInstrBuilder MIB = BuildMI( 2705 *FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opcode), ResultReg); 2706 if (setsCPSR) 2707 MIB.addReg(ARM::CPSR, RegState::Define); 2708 AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(Imm)); 2709 if (hasS) 2710 AddDefaultCC(MIB); 2711 // Second instruction consumes the first's result. 2712 SrcReg = ResultReg; 2713 } 2714 2715 return ResultReg; 2716 } 2717 2718 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2719 // On ARM, in general, integer casts don't involve legal types; this code 2720 // handles promotable integers. 2721 Type *DestTy = I->getType(); 2722 Value *Src = I->getOperand(0); 2723 Type *SrcTy = Src->getType(); 2724 2725 bool isZExt = isa<ZExtInst>(I); 2726 unsigned SrcReg = getRegForValue(Src); 2727 if (!SrcReg) return false; 2728 2729 EVT SrcEVT, DestEVT; 2730 SrcEVT = TLI.getValueType(SrcTy, true); 2731 DestEVT = TLI.getValueType(DestTy, true); 2732 if (!SrcEVT.isSimple()) return false; 2733 if (!DestEVT.isSimple()) return false; 2734 2735 MVT SrcVT = SrcEVT.getSimpleVT(); 2736 MVT DestVT = DestEVT.getSimpleVT(); 2737 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2738 if (ResultReg == 0) return false; 2739 UpdateValueMap(I, ResultReg); 2740 return true; 2741 } 2742 2743 bool ARMFastISel::SelectShift(const Instruction *I, 2744 ARM_AM::ShiftOpc ShiftTy) { 2745 // We handle thumb2 mode by target independent selector 2746 // or SelectionDAG ISel. 2747 if (isThumb2) 2748 return false; 2749 2750 // Only handle i32 now. 2751 EVT DestVT = TLI.getValueType(I->getType(), true); 2752 if (DestVT != MVT::i32) 2753 return false; 2754 2755 unsigned Opc = ARM::MOVsr; 2756 unsigned ShiftImm; 2757 Value *Src2Value = I->getOperand(1); 2758 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2759 ShiftImm = CI->getZExtValue(); 2760 2761 // Fall back to selection DAG isel if the shift amount 2762 // is zero or greater than the width of the value type. 2763 if (ShiftImm == 0 || ShiftImm >=32) 2764 return false; 2765 2766 Opc = ARM::MOVsi; 2767 } 2768 2769 Value *Src1Value = I->getOperand(0); 2770 unsigned Reg1 = getRegForValue(Src1Value); 2771 if (Reg1 == 0) return false; 2772 2773 unsigned Reg2 = 0; 2774 if (Opc == ARM::MOVsr) { 2775 Reg2 = getRegForValue(Src2Value); 2776 if (Reg2 == 0) return false; 2777 } 2778 2779 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 2780 if(ResultReg == 0) return false; 2781 2782 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2783 TII.get(Opc), ResultReg) 2784 .addReg(Reg1); 2785 2786 if (Opc == ARM::MOVsi) 2787 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2788 else if (Opc == ARM::MOVsr) { 2789 MIB.addReg(Reg2); 2790 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2791 } 2792 2793 AddOptionalDefs(MIB); 2794 UpdateValueMap(I, ResultReg); 2795 return true; 2796 } 2797 2798 // TODO: SoftFP support. 2799 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2800 2801 switch (I->getOpcode()) { 2802 case Instruction::Load: 2803 return SelectLoad(I); 2804 case Instruction::Store: 2805 return SelectStore(I); 2806 case Instruction::Br: 2807 return SelectBranch(I); 2808 case Instruction::IndirectBr: 2809 return SelectIndirectBr(I); 2810 case Instruction::ICmp: 2811 case Instruction::FCmp: 2812 return SelectCmp(I); 2813 case Instruction::FPExt: 2814 return SelectFPExt(I); 2815 case Instruction::FPTrunc: 2816 return SelectFPTrunc(I); 2817 case Instruction::SIToFP: 2818 return SelectIToFP(I, /*isSigned*/ true); 2819 case Instruction::UIToFP: 2820 return SelectIToFP(I, /*isSigned*/ false); 2821 case Instruction::FPToSI: 2822 return SelectFPToI(I, /*isSigned*/ true); 2823 case Instruction::FPToUI: 2824 return SelectFPToI(I, /*isSigned*/ false); 2825 case Instruction::Add: 2826 return SelectBinaryIntOp(I, ISD::ADD); 2827 case Instruction::Or: 2828 return SelectBinaryIntOp(I, ISD::OR); 2829 case Instruction::Sub: 2830 return SelectBinaryIntOp(I, ISD::SUB); 2831 case Instruction::FAdd: 2832 return SelectBinaryFPOp(I, ISD::FADD); 2833 case Instruction::FSub: 2834 return SelectBinaryFPOp(I, ISD::FSUB); 2835 case Instruction::FMul: 2836 return SelectBinaryFPOp(I, ISD::FMUL); 2837 case Instruction::SDiv: 2838 return SelectDiv(I, /*isSigned*/ true); 2839 case Instruction::UDiv: 2840 return SelectDiv(I, /*isSigned*/ false); 2841 case Instruction::SRem: 2842 return SelectRem(I, /*isSigned*/ true); 2843 case Instruction::URem: 2844 return SelectRem(I, /*isSigned*/ false); 2845 case Instruction::Call: 2846 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2847 return SelectIntrinsicCall(*II); 2848 return SelectCall(I); 2849 case Instruction::Select: 2850 return SelectSelect(I); 2851 case Instruction::Ret: 2852 return SelectRet(I); 2853 case Instruction::Trunc: 2854 return SelectTrunc(I); 2855 case Instruction::ZExt: 2856 case Instruction::SExt: 2857 return SelectIntExt(I); 2858 case Instruction::Shl: 2859 return SelectShift(I, ARM_AM::lsl); 2860 case Instruction::LShr: 2861 return SelectShift(I, ARM_AM::lsr); 2862 case Instruction::AShr: 2863 return SelectShift(I, ARM_AM::asr); 2864 default: break; 2865 } 2866 return false; 2867 } 2868 2869 namespace { 2870 // This table describes sign- and zero-extend instructions which can be 2871 // folded into a preceding load. All of these extends have an immediate 2872 // (sometimes a mask and sometimes a shift) that's applied after 2873 // extension. 2874 const struct FoldableLoadExtendsStruct { 2875 uint16_t Opc[2]; // ARM, Thumb. 2876 uint8_t ExpectedImm; 2877 uint8_t isZExt : 1; 2878 uint8_t ExpectedVT : 7; 2879 } FoldableLoadExtends[] = { 2880 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, 2881 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, 2882 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, 2883 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, 2884 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } 2885 }; 2886 } 2887 2888 /// \brief The specified machine instr operand is a vreg, and that 2889 /// vreg is being provided by the specified load instruction. If possible, 2890 /// try to fold the load as an operand to the instruction, returning true if 2891 /// successful. 2892 bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 2893 const LoadInst *LI) { 2894 // Verify we have a legal type before going any further. 2895 MVT VT; 2896 if (!isLoadTypeLegal(LI->getType(), VT)) 2897 return false; 2898 2899 // Combine load followed by zero- or sign-extend. 2900 // ldrb r1, [r0] ldrb r1, [r0] 2901 // uxtb r2, r1 => 2902 // mov r3, r2 mov r3, r1 2903 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) 2904 return false; 2905 const uint64_t Imm = MI->getOperand(2).getImm(); 2906 2907 bool Found = false; 2908 bool isZExt; 2909 for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends); 2910 i != e; ++i) { 2911 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() && 2912 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm && 2913 MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) { 2914 Found = true; 2915 isZExt = FoldableLoadExtends[i].isZExt; 2916 } 2917 } 2918 if (!Found) return false; 2919 2920 // See if we can handle this address. 2921 Address Addr; 2922 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2923 2924 unsigned ResultReg = MI->getOperand(0).getReg(); 2925 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2926 return false; 2927 MI->eraseFromParent(); 2928 return true; 2929 } 2930 2931 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2932 unsigned Align, MVT VT) { 2933 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2934 ARMConstantPoolConstant *CPV = 2935 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2936 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 2937 2938 unsigned Opc; 2939 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); 2940 // Load value. 2941 if (isThumb2) { 2942 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2943 TII.get(ARM::t2LDRpci), DestReg1) 2944 .addConstantPoolIndex(Idx)); 2945 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; 2946 } else { 2947 // The extra immediate is for addrmode2. 2948 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2949 DL, TII.get(ARM::LDRcp), DestReg1) 2950 .addConstantPoolIndex(Idx).addImm(0)); 2951 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; 2952 } 2953 2954 unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); 2955 if (GlobalBaseReg == 0) { 2956 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); 2957 AFI->setGlobalBaseReg(GlobalBaseReg); 2958 } 2959 2960 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); 2961 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2962 DL, TII.get(Opc), DestReg2) 2963 .addReg(DestReg1) 2964 .addReg(GlobalBaseReg); 2965 if (!UseGOTOFF) 2966 MIB.addImm(0); 2967 AddOptionalDefs(MIB); 2968 2969 return DestReg2; 2970 } 2971 2972 bool ARMFastISel::FastLowerArguments() { 2973 if (!FuncInfo.CanLowerReturn) 2974 return false; 2975 2976 const Function *F = FuncInfo.Fn; 2977 if (F->isVarArg()) 2978 return false; 2979 2980 CallingConv::ID CC = F->getCallingConv(); 2981 switch (CC) { 2982 default: 2983 return false; 2984 case CallingConv::Fast: 2985 case CallingConv::C: 2986 case CallingConv::ARM_AAPCS_VFP: 2987 case CallingConv::ARM_AAPCS: 2988 case CallingConv::ARM_APCS: 2989 break; 2990 } 2991 2992 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments 2993 // which are passed in r0 - r3. 2994 unsigned Idx = 1; 2995 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 2996 I != E; ++I, ++Idx) { 2997 if (Idx > 4) 2998 return false; 2999 3000 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || 3001 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || 3002 F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) 3003 return false; 3004 3005 Type *ArgTy = I->getType(); 3006 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) 3007 return false; 3008 3009 EVT ArgVT = TLI.getValueType(ArgTy); 3010 if (!ArgVT.isSimple()) return false; 3011 switch (ArgVT.getSimpleVT().SimpleTy) { 3012 case MVT::i8: 3013 case MVT::i16: 3014 case MVT::i32: 3015 break; 3016 default: 3017 return false; 3018 } 3019 } 3020 3021 3022 static const uint16_t GPRArgRegs[] = { 3023 ARM::R0, ARM::R1, ARM::R2, ARM::R3 3024 }; 3025 3026 const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32); 3027 Idx = 0; 3028 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3029 I != E; ++I, ++Idx) { 3030 unsigned SrcReg = GPRArgRegs[Idx]; 3031 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); 3032 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. 3033 // Without this, EmitLiveInCopies may eliminate the livein if its only 3034 // use is a bitcast (which isn't turned into an instruction). 3035 unsigned ResultReg = createResultReg(RC); 3036 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 3037 ResultReg).addReg(DstReg, getKillRegState(true)); 3038 UpdateValueMap(I, ResultReg); 3039 } 3040 3041 return true; 3042 } 3043 3044 namespace llvm { 3045 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 3046 const TargetLibraryInfo *libInfo) { 3047 const TargetMachine &TM = funcInfo.MF->getTarget(); 3048 3049 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 3050 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. 3051 bool UseFastISel = false; 3052 UseFastISel |= Subtarget->isTargetIOS() && !Subtarget->isThumb1Only(); 3053 UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb(); 3054 UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb(); 3055 3056 if (UseFastISel) { 3057 // iOS always has a FP for backtracking, force other targets 3058 // to keep their FP when doing FastISel. The emitted code is 3059 // currently superior, and in cases like test-suite's lencod 3060 // FastISel isn't quite correct when FP is eliminated. 3061 TM.Options.NoFramePointerElim = true; 3062 return new ARMFastISel(funcInfo, libInfo); 3063 } 3064 return 0; 3065 } 3066 } 3067