1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseRegisterInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMConstantPoolValue.h" 20 #include "ARMISelLowering.h" 21 #include "ARMMachineFunctionInfo.h" 22 #include "ARMSubtarget.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/CodeGen/Analysis.h" 26 #include "llvm/CodeGen/FastISel.h" 27 #include "llvm/CodeGen/FunctionLoweringInfo.h" 28 #include "llvm/CodeGen/MachineConstantPool.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/IR/CallSite.h" 35 #include "llvm/IR/CallingConv.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/GetElementPtrTypeIterator.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/Module.h" 43 #include "llvm/IR/Operator.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Support/ErrorHandling.h" 46 #include "llvm/Target/TargetInstrInfo.h" 47 #include "llvm/Target/TargetLowering.h" 48 #include "llvm/Target/TargetMachine.h" 49 #include "llvm/Target/TargetOptions.h" 50 using namespace llvm; 51 52 extern cl::opt<bool> EnableARMLongCalls; 53 54 namespace { 55 56 // All possible address modes, plus some. 57 typedef struct Address { 58 enum { 59 RegBase, 60 FrameIndexBase 61 } BaseType; 62 63 union { 64 unsigned Reg; 65 int FI; 66 } Base; 67 68 int Offset; 69 70 // Innocuous defaults for our address. 71 Address() 72 : BaseType(RegBase), Offset(0) { 73 Base.Reg = 0; 74 } 75 } Address; 76 77 class ARMFastISel final : public FastISel { 78 79 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 80 /// make the right decision when generating code for different targets. 81 const ARMSubtarget *Subtarget; 82 Module &M; 83 const TargetMachine &TM; 84 const TargetInstrInfo &TII; 85 const TargetLowering &TLI; 86 ARMFunctionInfo *AFI; 87 88 // Convenience variables to avoid some queries. 89 bool isThumb2; 90 LLVMContext *Context; 91 92 public: 93 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 94 const TargetLibraryInfo *libInfo) 95 : FastISel(funcInfo, libInfo), 96 M(const_cast<Module &>(*funcInfo.Fn->getParent())), 97 TM(funcInfo.MF->getTarget()), 98 TII(*TM.getSubtargetImpl()->getInstrInfo()), 99 TLI(*TM.getSubtargetImpl()->getTargetLowering()) { 100 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 101 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 102 isThumb2 = AFI->isThumbFunction(); 103 Context = &funcInfo.Fn->getContext(); 104 } 105 106 // Code from FastISel.cpp. 107 private: 108 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 109 const TargetRegisterClass *RC, 110 unsigned Op0, bool Op0IsKill); 111 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill, 114 unsigned Op1, bool Op1IsKill); 115 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 116 const TargetRegisterClass *RC, 117 unsigned Op0, bool Op0IsKill, 118 unsigned Op1, bool Op1IsKill, 119 unsigned Op2, bool Op2IsKill); 120 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 unsigned Op0, bool Op0IsKill, 123 uint64_t Imm); 124 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 125 const TargetRegisterClass *RC, 126 unsigned Op0, bool Op0IsKill, 127 unsigned Op1, bool Op1IsKill, 128 uint64_t Imm); 129 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 uint64_t Imm); 132 133 // Backend specific FastISel code. 134 private: 135 bool TargetSelectInstruction(const Instruction *I) override; 136 unsigned TargetMaterializeConstant(const Constant *C) override; 137 unsigned TargetMaterializeAlloca(const AllocaInst *AI) override; 138 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 139 const LoadInst *LI) override; 140 bool FastLowerArguments() override; 141 private: 142 #include "ARMGenFastISel.inc" 143 144 // Instruction selection routines. 145 private: 146 bool SelectLoad(const Instruction *I); 147 bool SelectStore(const Instruction *I); 148 bool SelectBranch(const Instruction *I); 149 bool SelectIndirectBr(const Instruction *I); 150 bool SelectCmp(const Instruction *I); 151 bool SelectFPExt(const Instruction *I); 152 bool SelectFPTrunc(const Instruction *I); 153 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 154 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 155 bool SelectIToFP(const Instruction *I, bool isSigned); 156 bool SelectFPToI(const Instruction *I, bool isSigned); 157 bool SelectDiv(const Instruction *I, bool isSigned); 158 bool SelectRem(const Instruction *I, bool isSigned); 159 bool SelectCall(const Instruction *I, const char *IntrMemName); 160 bool SelectIntrinsicCall(const IntrinsicInst &I); 161 bool SelectSelect(const Instruction *I); 162 bool SelectRet(const Instruction *I); 163 bool SelectTrunc(const Instruction *I); 164 bool SelectIntExt(const Instruction *I); 165 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 166 167 // Utility routines. 168 private: 169 bool isTypeLegal(Type *Ty, MVT &VT); 170 bool isLoadTypeLegal(Type *Ty, MVT &VT); 171 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 172 bool isZExt); 173 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 174 unsigned Alignment = 0, bool isZExt = true, 175 bool allocReg = true); 176 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 177 unsigned Alignment = 0); 178 bool ARMComputeAddress(const Value *Obj, Address &Addr); 179 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 180 bool ARMIsMemCpySmall(uint64_t Len); 181 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 182 unsigned Alignment); 183 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 184 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 185 unsigned ARMMaterializeInt(const Constant *C, MVT VT); 186 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 187 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 188 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 189 unsigned ARMSelectCallOp(bool UseReg); 190 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 191 192 const TargetLowering *getTargetLowering() { 193 return TM.getSubtargetImpl()->getTargetLowering(); 194 } 195 196 // Call handling routines. 197 private: 198 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 199 bool Return, 200 bool isVarArg); 201 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 202 SmallVectorImpl<unsigned> &ArgRegs, 203 SmallVectorImpl<MVT> &ArgVTs, 204 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 205 SmallVectorImpl<unsigned> &RegArgs, 206 CallingConv::ID CC, 207 unsigned &NumBytes, 208 bool isVarArg); 209 unsigned getLibcallReg(const Twine &Name); 210 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 211 const Instruction *I, CallingConv::ID CC, 212 unsigned &NumBytes, bool isVarArg); 213 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 214 215 // OptionalDef handling routines. 216 private: 217 bool isARMNEONPred(const MachineInstr *MI); 218 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 219 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 220 void AddLoadStoreOperands(MVT VT, Address &Addr, 221 const MachineInstrBuilder &MIB, 222 unsigned Flags, bool useAM3); 223 }; 224 225 } // end anonymous namespace 226 227 #include "ARMGenCallingConv.inc" 228 229 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 230 // we don't care about implicit defs here, just places we'll need to add a 231 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 232 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 233 if (!MI->hasOptionalDef()) 234 return false; 235 236 // Look to see if our OptionalDef is defining CPSR or CCR. 237 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 238 const MachineOperand &MO = MI->getOperand(i); 239 if (!MO.isReg() || !MO.isDef()) continue; 240 if (MO.getReg() == ARM::CPSR) 241 *CPSR = true; 242 } 243 return true; 244 } 245 246 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 247 const MCInstrDesc &MCID = MI->getDesc(); 248 249 // If we're a thumb2 or not NEON function we'll be handled via isPredicable. 250 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 251 AFI->isThumb2Function()) 252 return MI->isPredicable(); 253 254 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 255 if (MCID.OpInfo[i].isPredicate()) 256 return true; 257 258 return false; 259 } 260 261 // If the machine is predicable go ahead and add the predicate operands, if 262 // it needs default CC operands add those. 263 // TODO: If we want to support thumb1 then we'll need to deal with optional 264 // CPSR defs that need to be added before the remaining operands. See s_cc_out 265 // for descriptions why. 266 const MachineInstrBuilder & 267 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 268 MachineInstr *MI = &*MIB; 269 270 // Do we use a predicate? or... 271 // Are we NEON in ARM mode and have a predicate operand? If so, I know 272 // we're not predicable but add it anyways. 273 if (isARMNEONPred(MI)) 274 AddDefaultPred(MIB); 275 276 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 277 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 278 bool CPSR = false; 279 if (DefinesOptionalPredicate(MI, &CPSR)) { 280 if (CPSR) 281 AddDefaultT1CC(MIB); 282 else 283 AddDefaultCC(MIB); 284 } 285 return MIB; 286 } 287 288 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 289 const TargetRegisterClass *RC, 290 unsigned Op0, bool Op0IsKill) { 291 unsigned ResultReg = createResultReg(RC); 292 const MCInstrDesc &II = TII.get(MachineInstOpcode); 293 294 // Make sure the input operand is sufficiently constrained to be legal 295 // for this instruction. 296 Op0 = constrainOperandRegClass(II, Op0, 1); 297 if (II.getNumDefs() >= 1) { 298 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 299 ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); 300 } else { 301 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 302 .addReg(Op0, Op0IsKill * RegState::Kill)); 303 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 304 TII.get(TargetOpcode::COPY), ResultReg) 305 .addReg(II.ImplicitDefs[0])); 306 } 307 return ResultReg; 308 } 309 310 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 311 const TargetRegisterClass *RC, 312 unsigned Op0, bool Op0IsKill, 313 unsigned Op1, bool Op1IsKill) { 314 unsigned ResultReg = createResultReg(RC); 315 const MCInstrDesc &II = TII.get(MachineInstOpcode); 316 317 // Make sure the input operands are sufficiently constrained to be legal 318 // for this instruction. 319 Op0 = constrainOperandRegClass(II, Op0, 1); 320 Op1 = constrainOperandRegClass(II, Op1, 2); 321 322 if (II.getNumDefs() >= 1) { 323 AddOptionalDefs( 324 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 325 .addReg(Op0, Op0IsKill * RegState::Kill) 326 .addReg(Op1, Op1IsKill * RegState::Kill)); 327 } else { 328 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 329 .addReg(Op0, Op0IsKill * RegState::Kill) 330 .addReg(Op1, Op1IsKill * RegState::Kill)); 331 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 332 TII.get(TargetOpcode::COPY), ResultReg) 333 .addReg(II.ImplicitDefs[0])); 334 } 335 return ResultReg; 336 } 337 338 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 339 const TargetRegisterClass *RC, 340 unsigned Op0, bool Op0IsKill, 341 unsigned Op1, bool Op1IsKill, 342 unsigned Op2, bool Op2IsKill) { 343 unsigned ResultReg = createResultReg(RC); 344 const MCInstrDesc &II = TII.get(MachineInstOpcode); 345 346 // Make sure the input operands are sufficiently constrained to be legal 347 // for this instruction. 348 Op0 = constrainOperandRegClass(II, Op0, 1); 349 Op1 = constrainOperandRegClass(II, Op1, 2); 350 Op2 = constrainOperandRegClass(II, Op1, 3); 351 352 if (II.getNumDefs() >= 1) { 353 AddOptionalDefs( 354 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 355 .addReg(Op0, Op0IsKill * RegState::Kill) 356 .addReg(Op1, Op1IsKill * RegState::Kill) 357 .addReg(Op2, Op2IsKill * RegState::Kill)); 358 } else { 359 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 360 .addReg(Op0, Op0IsKill * RegState::Kill) 361 .addReg(Op1, Op1IsKill * RegState::Kill) 362 .addReg(Op2, Op2IsKill * RegState::Kill)); 363 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 364 TII.get(TargetOpcode::COPY), ResultReg) 365 .addReg(II.ImplicitDefs[0])); 366 } 367 return ResultReg; 368 } 369 370 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 371 const TargetRegisterClass *RC, 372 unsigned Op0, bool Op0IsKill, 373 uint64_t Imm) { 374 unsigned ResultReg = createResultReg(RC); 375 const MCInstrDesc &II = TII.get(MachineInstOpcode); 376 377 // Make sure the input operand is sufficiently constrained to be legal 378 // for this instruction. 379 Op0 = constrainOperandRegClass(II, Op0, 1); 380 if (II.getNumDefs() >= 1) { 381 AddOptionalDefs( 382 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 383 .addReg(Op0, Op0IsKill * RegState::Kill) 384 .addImm(Imm)); 385 } else { 386 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 387 .addReg(Op0, Op0IsKill * RegState::Kill) 388 .addImm(Imm)); 389 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 390 TII.get(TargetOpcode::COPY), ResultReg) 391 .addReg(II.ImplicitDefs[0])); 392 } 393 return ResultReg; 394 } 395 396 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 397 const TargetRegisterClass *RC, 398 unsigned Op0, bool Op0IsKill, 399 unsigned Op1, bool Op1IsKill, 400 uint64_t Imm) { 401 unsigned ResultReg = createResultReg(RC); 402 const MCInstrDesc &II = TII.get(MachineInstOpcode); 403 404 // Make sure the input operands are sufficiently constrained to be legal 405 // for this instruction. 406 Op0 = constrainOperandRegClass(II, Op0, 1); 407 Op1 = constrainOperandRegClass(II, Op1, 2); 408 if (II.getNumDefs() >= 1) { 409 AddOptionalDefs( 410 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 411 .addReg(Op0, Op0IsKill * RegState::Kill) 412 .addReg(Op1, Op1IsKill * RegState::Kill) 413 .addImm(Imm)); 414 } else { 415 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 416 .addReg(Op0, Op0IsKill * RegState::Kill) 417 .addReg(Op1, Op1IsKill * RegState::Kill) 418 .addImm(Imm)); 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 420 TII.get(TargetOpcode::COPY), ResultReg) 421 .addReg(II.ImplicitDefs[0])); 422 } 423 return ResultReg; 424 } 425 426 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 427 const TargetRegisterClass *RC, 428 uint64_t Imm) { 429 unsigned ResultReg = createResultReg(RC); 430 const MCInstrDesc &II = TII.get(MachineInstOpcode); 431 432 if (II.getNumDefs() >= 1) { 433 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 434 ResultReg).addImm(Imm)); 435 } else { 436 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 437 .addImm(Imm)); 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 439 TII.get(TargetOpcode::COPY), ResultReg) 440 .addReg(II.ImplicitDefs[0])); 441 } 442 return ResultReg; 443 } 444 445 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 446 // checks from the various callers. 447 unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 448 if (VT == MVT::f64) return 0; 449 450 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 451 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 452 TII.get(ARM::VMOVSR), MoveReg) 453 .addReg(SrcReg)); 454 return MoveReg; 455 } 456 457 unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 458 if (VT == MVT::i64) return 0; 459 460 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 461 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 462 TII.get(ARM::VMOVRS), MoveReg) 463 .addReg(SrcReg)); 464 return MoveReg; 465 } 466 467 // For double width floating point we need to materialize two constants 468 // (the high and the low) into integer registers then use a move to get 469 // the combined constant into an FP reg. 470 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 471 const APFloat Val = CFP->getValueAPF(); 472 bool is64bit = VT == MVT::f64; 473 474 // This checks to see if we can use VFP3 instructions to materialize 475 // a constant, otherwise we have to go through the constant pool. 476 if (TLI.isFPImmLegal(Val, VT)) { 477 int Imm; 478 unsigned Opc; 479 if (is64bit) { 480 Imm = ARM_AM::getFP64Imm(Val); 481 Opc = ARM::FCONSTD; 482 } else { 483 Imm = ARM_AM::getFP32Imm(Val); 484 Opc = ARM::FCONSTS; 485 } 486 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 487 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 488 TII.get(Opc), DestReg).addImm(Imm)); 489 return DestReg; 490 } 491 492 // Require VFP2 for loading fp constants. 493 if (!Subtarget->hasVFP2()) return false; 494 495 // MachineConstantPool wants an explicit alignment. 496 unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); 497 if (Align == 0) { 498 // TODO: Figure out if this is correct. 499 Align = DL.getTypeAllocSize(CFP->getType()); 500 } 501 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 502 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 503 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 504 505 // The extra reg is for addrmode5. 506 AddOptionalDefs( 507 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 508 .addConstantPoolIndex(Idx) 509 .addReg(0)); 510 return DestReg; 511 } 512 513 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 514 515 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 516 return false; 517 518 // If we can do this in a single instruction without a constant pool entry 519 // do so now. 520 const ConstantInt *CI = cast<ConstantInt>(C); 521 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 522 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 523 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 524 &ARM::GPRRegClass; 525 unsigned ImmReg = createResultReg(RC); 526 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 527 TII.get(Opc), ImmReg) 528 .addImm(CI->getZExtValue())); 529 return ImmReg; 530 } 531 532 // Use MVN to emit negative constants. 533 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 534 unsigned Imm = (unsigned)~(CI->getSExtValue()); 535 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 536 (ARM_AM::getSOImmVal(Imm) != -1); 537 if (UseImm) { 538 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 539 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 540 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 541 TII.get(Opc), ImmReg) 542 .addImm(Imm)); 543 return ImmReg; 544 } 545 } 546 547 // Load from constant pool. For now 32-bit only. 548 if (VT != MVT::i32) 549 return false; 550 551 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 552 553 // MachineConstantPool wants an explicit alignment. 554 unsigned Align = DL.getPrefTypeAlignment(C->getType()); 555 if (Align == 0) { 556 // TODO: Figure out if this is correct. 557 Align = DL.getTypeAllocSize(C->getType()); 558 } 559 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 560 561 if (isThumb2) 562 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 563 TII.get(ARM::t2LDRpci), DestReg) 564 .addConstantPoolIndex(Idx)); 565 else { 566 // The extra immediate is for addrmode2. 567 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 568 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 569 TII.get(ARM::LDRcp), DestReg) 570 .addConstantPoolIndex(Idx) 571 .addImm(0)); 572 } 573 574 return DestReg; 575 } 576 577 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 578 // For now 32-bit only. 579 if (VT != MVT::i32) return 0; 580 581 Reloc::Model RelocM = TM.getRelocationModel(); 582 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); 583 const TargetRegisterClass *RC = isThumb2 ? 584 (const TargetRegisterClass*)&ARM::rGPRRegClass : 585 (const TargetRegisterClass*)&ARM::GPRRegClass; 586 unsigned DestReg = createResultReg(RC); 587 588 // FastISel TLS support on non-MachO is broken, punt to SelectionDAG. 589 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 590 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 591 if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0; 592 593 // Use movw+movt when possible, it avoids constant pool entries. 594 // Non-darwin targets only support static movt relocations in FastISel. 595 if (Subtarget->useMovt(*FuncInfo.MF) && 596 (Subtarget->isTargetMachO() || RelocM == Reloc::Static)) { 597 unsigned Opc; 598 unsigned char TF = 0; 599 if (Subtarget->isTargetMachO()) 600 TF = ARMII::MO_NONLAZY; 601 602 switch (RelocM) { 603 case Reloc::PIC_: 604 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 605 break; 606 default: 607 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 608 break; 609 } 610 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 611 TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); 612 } else { 613 // MachineConstantPool wants an explicit alignment. 614 unsigned Align = DL.getPrefTypeAlignment(GV->getType()); 615 if (Align == 0) { 616 // TODO: Figure out if this is correct. 617 Align = DL.getTypeAllocSize(GV->getType()); 618 } 619 620 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) 621 return ARMLowerPICELF(GV, Align, VT); 622 623 // Grab index. 624 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 625 (Subtarget->isThumb() ? 4 : 8); 626 unsigned Id = AFI->createPICLabelUId(); 627 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 628 ARMCP::CPValue, 629 PCAdj); 630 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 631 632 // Load value. 633 MachineInstrBuilder MIB; 634 if (isThumb2) { 635 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 636 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), 637 DestReg).addConstantPoolIndex(Idx); 638 if (RelocM == Reloc::PIC_) 639 MIB.addImm(Id); 640 AddOptionalDefs(MIB); 641 } else { 642 // The extra immediate is for addrmode2. 643 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 644 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 645 TII.get(ARM::LDRcp), DestReg) 646 .addConstantPoolIndex(Idx) 647 .addImm(0); 648 AddOptionalDefs(MIB); 649 650 if (RelocM == Reloc::PIC_) { 651 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 652 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 653 654 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 655 DbgLoc, TII.get(Opc), NewDestReg) 656 .addReg(DestReg) 657 .addImm(Id); 658 AddOptionalDefs(MIB); 659 return NewDestReg; 660 } 661 } 662 } 663 664 if (IsIndirect) { 665 MachineInstrBuilder MIB; 666 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 667 if (isThumb2) 668 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 669 TII.get(ARM::t2LDRi12), NewDestReg) 670 .addReg(DestReg) 671 .addImm(0); 672 else 673 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 674 TII.get(ARM::LDRi12), NewDestReg) 675 .addReg(DestReg) 676 .addImm(0); 677 DestReg = NewDestReg; 678 AddOptionalDefs(MIB); 679 } 680 681 return DestReg; 682 } 683 684 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 685 EVT CEVT = TLI.getValueType(C->getType(), true); 686 687 // Only handle simple types. 688 if (!CEVT.isSimple()) return 0; 689 MVT VT = CEVT.getSimpleVT(); 690 691 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 692 return ARMMaterializeFP(CFP, VT); 693 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 694 return ARMMaterializeGV(GV, VT); 695 else if (isa<ConstantInt>(C)) 696 return ARMMaterializeInt(C, VT); 697 698 return 0; 699 } 700 701 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 702 703 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 704 // Don't handle dynamic allocas. 705 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 706 707 MVT VT; 708 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 709 710 DenseMap<const AllocaInst*, int>::iterator SI = 711 FuncInfo.StaticAllocaMap.find(AI); 712 713 // This will get lowered later into the correct offsets and registers 714 // via rewriteXFrameIndex. 715 if (SI != FuncInfo.StaticAllocaMap.end()) { 716 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 717 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 718 unsigned ResultReg = createResultReg(RC); 719 ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0); 720 721 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 722 TII.get(Opc), ResultReg) 723 .addFrameIndex(SI->second) 724 .addImm(0)); 725 return ResultReg; 726 } 727 728 return 0; 729 } 730 731 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 732 EVT evt = TLI.getValueType(Ty, true); 733 734 // Only handle simple types. 735 if (evt == MVT::Other || !evt.isSimple()) return false; 736 VT = evt.getSimpleVT(); 737 738 // Handle all legal types, i.e. a register that will directly hold this 739 // value. 740 return TLI.isTypeLegal(VT); 741 } 742 743 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 744 if (isTypeLegal(Ty, VT)) return true; 745 746 // If this is a type than can be sign or zero-extended to a basic operation 747 // go ahead and accept it now. 748 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 749 return true; 750 751 return false; 752 } 753 754 // Computes the address to get to an object. 755 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 756 // Some boilerplate from the X86 FastISel. 757 const User *U = nullptr; 758 unsigned Opcode = Instruction::UserOp1; 759 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 760 // Don't walk into other basic blocks unless the object is an alloca from 761 // another block, otherwise it may not have a virtual register assigned. 762 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 763 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 764 Opcode = I->getOpcode(); 765 U = I; 766 } 767 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 768 Opcode = C->getOpcode(); 769 U = C; 770 } 771 772 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 773 if (Ty->getAddressSpace() > 255) 774 // Fast instruction selection doesn't support the special 775 // address spaces. 776 return false; 777 778 switch (Opcode) { 779 default: 780 break; 781 case Instruction::BitCast: 782 // Look through bitcasts. 783 return ARMComputeAddress(U->getOperand(0), Addr); 784 case Instruction::IntToPtr: 785 // Look past no-op inttoptrs. 786 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 787 return ARMComputeAddress(U->getOperand(0), Addr); 788 break; 789 case Instruction::PtrToInt: 790 // Look past no-op ptrtoints. 791 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 792 return ARMComputeAddress(U->getOperand(0), Addr); 793 break; 794 case Instruction::GetElementPtr: { 795 Address SavedAddr = Addr; 796 int TmpOffset = Addr.Offset; 797 798 // Iterate through the GEP folding the constants into offsets where 799 // we can. 800 gep_type_iterator GTI = gep_type_begin(U); 801 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 802 i != e; ++i, ++GTI) { 803 const Value *Op = *i; 804 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 805 const StructLayout *SL = DL.getStructLayout(STy); 806 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 807 TmpOffset += SL->getElementOffset(Idx); 808 } else { 809 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); 810 for (;;) { 811 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 812 // Constant-offset addressing. 813 TmpOffset += CI->getSExtValue() * S; 814 break; 815 } 816 if (canFoldAddIntoGEP(U, Op)) { 817 // A compatible add with a constant operand. Fold the constant. 818 ConstantInt *CI = 819 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 820 TmpOffset += CI->getSExtValue() * S; 821 // Iterate on the other operand. 822 Op = cast<AddOperator>(Op)->getOperand(0); 823 continue; 824 } 825 // Unsupported 826 goto unsupported_gep; 827 } 828 } 829 } 830 831 // Try to grab the base operand now. 832 Addr.Offset = TmpOffset; 833 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 834 835 // We failed, restore everything and try the other options. 836 Addr = SavedAddr; 837 838 unsupported_gep: 839 break; 840 } 841 case Instruction::Alloca: { 842 const AllocaInst *AI = cast<AllocaInst>(Obj); 843 DenseMap<const AllocaInst*, int>::iterator SI = 844 FuncInfo.StaticAllocaMap.find(AI); 845 if (SI != FuncInfo.StaticAllocaMap.end()) { 846 Addr.BaseType = Address::FrameIndexBase; 847 Addr.Base.FI = SI->second; 848 return true; 849 } 850 break; 851 } 852 } 853 854 // Try to get this in a register if nothing else has worked. 855 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 856 return Addr.Base.Reg != 0; 857 } 858 859 void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 860 bool needsLowering = false; 861 switch (VT.SimpleTy) { 862 default: llvm_unreachable("Unhandled load/store type!"); 863 case MVT::i1: 864 case MVT::i8: 865 case MVT::i16: 866 case MVT::i32: 867 if (!useAM3) { 868 // Integer loads/stores handle 12-bit offsets. 869 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 870 // Handle negative offsets. 871 if (needsLowering && isThumb2) 872 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 873 Addr.Offset > -256); 874 } else { 875 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 876 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 877 } 878 break; 879 case MVT::f32: 880 case MVT::f64: 881 // Floating point operands handle 8-bit offsets. 882 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 883 break; 884 } 885 886 // If this is a stack pointer and the offset needs to be simplified then 887 // put the alloca address into a register, set the base type back to 888 // register and continue. This should almost never happen. 889 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 890 const TargetRegisterClass *RC = isThumb2 ? 891 (const TargetRegisterClass*)&ARM::tGPRRegClass : 892 (const TargetRegisterClass*)&ARM::GPRRegClass; 893 unsigned ResultReg = createResultReg(RC); 894 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 895 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 896 TII.get(Opc), ResultReg) 897 .addFrameIndex(Addr.Base.FI) 898 .addImm(0)); 899 Addr.Base.Reg = ResultReg; 900 Addr.BaseType = Address::RegBase; 901 } 902 903 // Since the offset is too large for the load/store instruction 904 // get the reg+offset into a register. 905 if (needsLowering) { 906 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 907 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 908 Addr.Offset = 0; 909 } 910 } 911 912 void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 913 const MachineInstrBuilder &MIB, 914 unsigned Flags, bool useAM3) { 915 // addrmode5 output depends on the selection dag addressing dividing the 916 // offset by 4 that it then later multiplies. Do this here as well. 917 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 918 Addr.Offset /= 4; 919 920 // Frame base works a bit differently. Handle it separately. 921 if (Addr.BaseType == Address::FrameIndexBase) { 922 int FI = Addr.Base.FI; 923 int Offset = Addr.Offset; 924 MachineMemOperand *MMO = 925 FuncInfo.MF->getMachineMemOperand( 926 MachinePointerInfo::getFixedStack(FI, Offset), 927 Flags, 928 MFI.getObjectSize(FI), 929 MFI.getObjectAlignment(FI)); 930 // Now add the rest of the operands. 931 MIB.addFrameIndex(FI); 932 933 // ARM halfword load/stores and signed byte loads need an additional 934 // operand. 935 if (useAM3) { 936 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 937 MIB.addReg(0); 938 MIB.addImm(Imm); 939 } else { 940 MIB.addImm(Addr.Offset); 941 } 942 MIB.addMemOperand(MMO); 943 } else { 944 // Now add the rest of the operands. 945 MIB.addReg(Addr.Base.Reg); 946 947 // ARM halfword load/stores and signed byte loads need an additional 948 // operand. 949 if (useAM3) { 950 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 951 MIB.addReg(0); 952 MIB.addImm(Imm); 953 } else { 954 MIB.addImm(Addr.Offset); 955 } 956 } 957 AddOptionalDefs(MIB); 958 } 959 960 bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 961 unsigned Alignment, bool isZExt, bool allocReg) { 962 unsigned Opc; 963 bool useAM3 = false; 964 bool needVMOV = false; 965 const TargetRegisterClass *RC; 966 switch (VT.SimpleTy) { 967 // This is mostly going to be Neon/vector support. 968 default: return false; 969 case MVT::i1: 970 case MVT::i8: 971 if (isThumb2) { 972 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 973 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 974 else 975 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 976 } else { 977 if (isZExt) { 978 Opc = ARM::LDRBi12; 979 } else { 980 Opc = ARM::LDRSB; 981 useAM3 = true; 982 } 983 } 984 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 985 break; 986 case MVT::i16: 987 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 988 return false; 989 990 if (isThumb2) { 991 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 992 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 993 else 994 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 995 } else { 996 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 997 useAM3 = true; 998 } 999 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1000 break; 1001 case MVT::i32: 1002 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1003 return false; 1004 1005 if (isThumb2) { 1006 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1007 Opc = ARM::t2LDRi8; 1008 else 1009 Opc = ARM::t2LDRi12; 1010 } else { 1011 Opc = ARM::LDRi12; 1012 } 1013 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1014 break; 1015 case MVT::f32: 1016 if (!Subtarget->hasVFP2()) return false; 1017 // Unaligned loads need special handling. Floats require word-alignment. 1018 if (Alignment && Alignment < 4) { 1019 needVMOV = true; 1020 VT = MVT::i32; 1021 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1022 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 1023 } else { 1024 Opc = ARM::VLDRS; 1025 RC = TLI.getRegClassFor(VT); 1026 } 1027 break; 1028 case MVT::f64: 1029 if (!Subtarget->hasVFP2()) return false; 1030 // FIXME: Unaligned loads need special handling. Doublewords require 1031 // word-alignment. 1032 if (Alignment && Alignment < 4) 1033 return false; 1034 1035 Opc = ARM::VLDRD; 1036 RC = TLI.getRegClassFor(VT); 1037 break; 1038 } 1039 // Simplify this down to something we can handle. 1040 ARMSimplifyAddress(Addr, VT, useAM3); 1041 1042 // Create the base instruction, then add the operands. 1043 if (allocReg) 1044 ResultReg = createResultReg(RC); 1045 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1046 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1047 TII.get(Opc), ResultReg); 1048 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1049 1050 // If we had an unaligned load of a float we've converted it to an regular 1051 // load. Now we must move from the GRP to the FP register. 1052 if (needVMOV) { 1053 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1054 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1055 TII.get(ARM::VMOVSR), MoveReg) 1056 .addReg(ResultReg)); 1057 ResultReg = MoveReg; 1058 } 1059 return true; 1060 } 1061 1062 bool ARMFastISel::SelectLoad(const Instruction *I) { 1063 // Atomic loads need special handling. 1064 if (cast<LoadInst>(I)->isAtomic()) 1065 return false; 1066 1067 // Verify we have a legal type before going any further. 1068 MVT VT; 1069 if (!isLoadTypeLegal(I->getType(), VT)) 1070 return false; 1071 1072 // See if we can handle this address. 1073 Address Addr; 1074 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1075 1076 unsigned ResultReg; 1077 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1078 return false; 1079 UpdateValueMap(I, ResultReg); 1080 return true; 1081 } 1082 1083 bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1084 unsigned Alignment) { 1085 unsigned StrOpc; 1086 bool useAM3 = false; 1087 switch (VT.SimpleTy) { 1088 // This is mostly going to be Neon/vector support. 1089 default: return false; 1090 case MVT::i1: { 1091 unsigned Res = createResultReg(isThumb2 ? 1092 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1093 (const TargetRegisterClass*)&ARM::GPRRegClass); 1094 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1095 SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); 1096 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1097 TII.get(Opc), Res) 1098 .addReg(SrcReg).addImm(1)); 1099 SrcReg = Res; 1100 } // Fallthrough here. 1101 case MVT::i8: 1102 if (isThumb2) { 1103 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1104 StrOpc = ARM::t2STRBi8; 1105 else 1106 StrOpc = ARM::t2STRBi12; 1107 } else { 1108 StrOpc = ARM::STRBi12; 1109 } 1110 break; 1111 case MVT::i16: 1112 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1113 return false; 1114 1115 if (isThumb2) { 1116 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1117 StrOpc = ARM::t2STRHi8; 1118 else 1119 StrOpc = ARM::t2STRHi12; 1120 } else { 1121 StrOpc = ARM::STRH; 1122 useAM3 = true; 1123 } 1124 break; 1125 case MVT::i32: 1126 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1127 return false; 1128 1129 if (isThumb2) { 1130 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1131 StrOpc = ARM::t2STRi8; 1132 else 1133 StrOpc = ARM::t2STRi12; 1134 } else { 1135 StrOpc = ARM::STRi12; 1136 } 1137 break; 1138 case MVT::f32: 1139 if (!Subtarget->hasVFP2()) return false; 1140 // Unaligned stores need special handling. Floats require word-alignment. 1141 if (Alignment && Alignment < 4) { 1142 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1143 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1144 TII.get(ARM::VMOVRS), MoveReg) 1145 .addReg(SrcReg)); 1146 SrcReg = MoveReg; 1147 VT = MVT::i32; 1148 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1149 } else { 1150 StrOpc = ARM::VSTRS; 1151 } 1152 break; 1153 case MVT::f64: 1154 if (!Subtarget->hasVFP2()) return false; 1155 // FIXME: Unaligned stores need special handling. Doublewords require 1156 // word-alignment. 1157 if (Alignment && Alignment < 4) 1158 return false; 1159 1160 StrOpc = ARM::VSTRD; 1161 break; 1162 } 1163 // Simplify this down to something we can handle. 1164 ARMSimplifyAddress(Addr, VT, useAM3); 1165 1166 // Create the base instruction, then add the operands. 1167 SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); 1168 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1169 TII.get(StrOpc)) 1170 .addReg(SrcReg); 1171 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1172 return true; 1173 } 1174 1175 bool ARMFastISel::SelectStore(const Instruction *I) { 1176 Value *Op0 = I->getOperand(0); 1177 unsigned SrcReg = 0; 1178 1179 // Atomic stores need special handling. 1180 if (cast<StoreInst>(I)->isAtomic()) 1181 return false; 1182 1183 // Verify we have a legal type before going any further. 1184 MVT VT; 1185 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1186 return false; 1187 1188 // Get the value to be stored into a register. 1189 SrcReg = getRegForValue(Op0); 1190 if (SrcReg == 0) return false; 1191 1192 // See if we can handle this address. 1193 Address Addr; 1194 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1195 return false; 1196 1197 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1198 return false; 1199 return true; 1200 } 1201 1202 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1203 switch (Pred) { 1204 // Needs two compares... 1205 case CmpInst::FCMP_ONE: 1206 case CmpInst::FCMP_UEQ: 1207 default: 1208 // AL is our "false" for now. The other two need more compares. 1209 return ARMCC::AL; 1210 case CmpInst::ICMP_EQ: 1211 case CmpInst::FCMP_OEQ: 1212 return ARMCC::EQ; 1213 case CmpInst::ICMP_SGT: 1214 case CmpInst::FCMP_OGT: 1215 return ARMCC::GT; 1216 case CmpInst::ICMP_SGE: 1217 case CmpInst::FCMP_OGE: 1218 return ARMCC::GE; 1219 case CmpInst::ICMP_UGT: 1220 case CmpInst::FCMP_UGT: 1221 return ARMCC::HI; 1222 case CmpInst::FCMP_OLT: 1223 return ARMCC::MI; 1224 case CmpInst::ICMP_ULE: 1225 case CmpInst::FCMP_OLE: 1226 return ARMCC::LS; 1227 case CmpInst::FCMP_ORD: 1228 return ARMCC::VC; 1229 case CmpInst::FCMP_UNO: 1230 return ARMCC::VS; 1231 case CmpInst::FCMP_UGE: 1232 return ARMCC::PL; 1233 case CmpInst::ICMP_SLT: 1234 case CmpInst::FCMP_ULT: 1235 return ARMCC::LT; 1236 case CmpInst::ICMP_SLE: 1237 case CmpInst::FCMP_ULE: 1238 return ARMCC::LE; 1239 case CmpInst::FCMP_UNE: 1240 case CmpInst::ICMP_NE: 1241 return ARMCC::NE; 1242 case CmpInst::ICMP_UGE: 1243 return ARMCC::HS; 1244 case CmpInst::ICMP_ULT: 1245 return ARMCC::LO; 1246 } 1247 } 1248 1249 bool ARMFastISel::SelectBranch(const Instruction *I) { 1250 const BranchInst *BI = cast<BranchInst>(I); 1251 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1252 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1253 1254 // Simple branch support. 1255 1256 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1257 // behavior. 1258 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1259 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1260 1261 // Get the compare predicate. 1262 // Try to take advantage of fallthrough opportunities. 1263 CmpInst::Predicate Predicate = CI->getPredicate(); 1264 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1265 std::swap(TBB, FBB); 1266 Predicate = CmpInst::getInversePredicate(Predicate); 1267 } 1268 1269 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1270 1271 // We may not handle every CC for now. 1272 if (ARMPred == ARMCC::AL) return false; 1273 1274 // Emit the compare. 1275 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1276 return false; 1277 1278 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1279 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1280 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1281 FastEmitBranch(FBB, DbgLoc); 1282 FuncInfo.MBB->addSuccessor(TBB); 1283 return true; 1284 } 1285 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1286 MVT SourceVT; 1287 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1288 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1289 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1290 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1291 OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); 1292 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1293 TII.get(TstOpc)) 1294 .addReg(OpReg).addImm(1)); 1295 1296 unsigned CCMode = ARMCC::NE; 1297 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1298 std::swap(TBB, FBB); 1299 CCMode = ARMCC::EQ; 1300 } 1301 1302 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1303 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1304 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1305 1306 FastEmitBranch(FBB, DbgLoc); 1307 FuncInfo.MBB->addSuccessor(TBB); 1308 return true; 1309 } 1310 } else if (const ConstantInt *CI = 1311 dyn_cast<ConstantInt>(BI->getCondition())) { 1312 uint64_t Imm = CI->getZExtValue(); 1313 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1314 FastEmitBranch(Target, DbgLoc); 1315 return true; 1316 } 1317 1318 unsigned CmpReg = getRegForValue(BI->getCondition()); 1319 if (CmpReg == 0) return false; 1320 1321 // We've been divorced from our compare! Our block was split, and 1322 // now our compare lives in a predecessor block. We musn't 1323 // re-compare here, as the children of the compare aren't guaranteed 1324 // live across the block boundary (we *could* check for this). 1325 // Regardless, the compare has been done in the predecessor block, 1326 // and it left a value for us in a virtual register. Ergo, we test 1327 // the one-bit value left in the virtual register. 1328 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1329 CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); 1330 AddOptionalDefs( 1331 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1332 .addReg(CmpReg) 1333 .addImm(1)); 1334 1335 unsigned CCMode = ARMCC::NE; 1336 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1337 std::swap(TBB, FBB); 1338 CCMode = ARMCC::EQ; 1339 } 1340 1341 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1342 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1343 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1344 FastEmitBranch(FBB, DbgLoc); 1345 FuncInfo.MBB->addSuccessor(TBB); 1346 return true; 1347 } 1348 1349 bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1350 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1351 if (AddrReg == 0) return false; 1352 1353 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1354 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1355 TII.get(Opc)).addReg(AddrReg)); 1356 1357 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1358 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) 1359 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); 1360 1361 return true; 1362 } 1363 1364 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1365 bool isZExt) { 1366 Type *Ty = Src1Value->getType(); 1367 EVT SrcEVT = TLI.getValueType(Ty, true); 1368 if (!SrcEVT.isSimple()) return false; 1369 MVT SrcVT = SrcEVT.getSimpleVT(); 1370 1371 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1372 if (isFloat && !Subtarget->hasVFP2()) 1373 return false; 1374 1375 // Check to see if the 2nd operand is a constant that we can encode directly 1376 // in the compare. 1377 int Imm = 0; 1378 bool UseImm = false; 1379 bool isNegativeImm = false; 1380 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1381 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1382 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1383 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1384 SrcVT == MVT::i1) { 1385 const APInt &CIVal = ConstInt->getValue(); 1386 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1387 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1388 // then a cmn, because there is no way to represent 2147483648 as a 1389 // signed 32-bit int. 1390 if (Imm < 0 && Imm != (int)0x80000000) { 1391 isNegativeImm = true; 1392 Imm = -Imm; 1393 } 1394 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1395 (ARM_AM::getSOImmVal(Imm) != -1); 1396 } 1397 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1398 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1399 if (ConstFP->isZero() && !ConstFP->isNegative()) 1400 UseImm = true; 1401 } 1402 1403 unsigned CmpOpc; 1404 bool isICmp = true; 1405 bool needsExt = false; 1406 switch (SrcVT.SimpleTy) { 1407 default: return false; 1408 // TODO: Verify compares. 1409 case MVT::f32: 1410 isICmp = false; 1411 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1412 break; 1413 case MVT::f64: 1414 isICmp = false; 1415 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1416 break; 1417 case MVT::i1: 1418 case MVT::i8: 1419 case MVT::i16: 1420 needsExt = true; 1421 // Intentional fall-through. 1422 case MVT::i32: 1423 if (isThumb2) { 1424 if (!UseImm) 1425 CmpOpc = ARM::t2CMPrr; 1426 else 1427 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1428 } else { 1429 if (!UseImm) 1430 CmpOpc = ARM::CMPrr; 1431 else 1432 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1433 } 1434 break; 1435 } 1436 1437 unsigned SrcReg1 = getRegForValue(Src1Value); 1438 if (SrcReg1 == 0) return false; 1439 1440 unsigned SrcReg2 = 0; 1441 if (!UseImm) { 1442 SrcReg2 = getRegForValue(Src2Value); 1443 if (SrcReg2 == 0) return false; 1444 } 1445 1446 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1447 if (needsExt) { 1448 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1449 if (SrcReg1 == 0) return false; 1450 if (!UseImm) { 1451 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1452 if (SrcReg2 == 0) return false; 1453 } 1454 } 1455 1456 const MCInstrDesc &II = TII.get(CmpOpc); 1457 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); 1458 if (!UseImm) { 1459 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); 1460 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1461 .addReg(SrcReg1).addReg(SrcReg2)); 1462 } else { 1463 MachineInstrBuilder MIB; 1464 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1465 .addReg(SrcReg1); 1466 1467 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1468 if (isICmp) 1469 MIB.addImm(Imm); 1470 AddOptionalDefs(MIB); 1471 } 1472 1473 // For floating point we need to move the result to a comparison register 1474 // that we can then use for branches. 1475 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1476 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1477 TII.get(ARM::FMSTAT))); 1478 return true; 1479 } 1480 1481 bool ARMFastISel::SelectCmp(const Instruction *I) { 1482 const CmpInst *CI = cast<CmpInst>(I); 1483 1484 // Get the compare predicate. 1485 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1486 1487 // We may not handle every CC for now. 1488 if (ARMPred == ARMCC::AL) return false; 1489 1490 // Emit the compare. 1491 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1492 return false; 1493 1494 // Now set a register based on the comparison. Explicitly set the predicates 1495 // here. 1496 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1497 const TargetRegisterClass *RC = isThumb2 ? 1498 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1499 (const TargetRegisterClass*)&ARM::GPRRegClass; 1500 unsigned DestReg = createResultReg(RC); 1501 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1502 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1503 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1504 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg) 1505 .addReg(ZeroReg).addImm(1) 1506 .addImm(ARMPred).addReg(ARM::CPSR); 1507 1508 UpdateValueMap(I, DestReg); 1509 return true; 1510 } 1511 1512 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1513 // Make sure we have VFP and that we're extending float to double. 1514 if (!Subtarget->hasVFP2()) return false; 1515 1516 Value *V = I->getOperand(0); 1517 if (!I->getType()->isDoubleTy() || 1518 !V->getType()->isFloatTy()) return false; 1519 1520 unsigned Op = getRegForValue(V); 1521 if (Op == 0) return false; 1522 1523 unsigned Result = createResultReg(&ARM::DPRRegClass); 1524 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1525 TII.get(ARM::VCVTDS), Result) 1526 .addReg(Op)); 1527 UpdateValueMap(I, Result); 1528 return true; 1529 } 1530 1531 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1532 // Make sure we have VFP and that we're truncating double to float. 1533 if (!Subtarget->hasVFP2()) return false; 1534 1535 Value *V = I->getOperand(0); 1536 if (!(I->getType()->isFloatTy() && 1537 V->getType()->isDoubleTy())) return false; 1538 1539 unsigned Op = getRegForValue(V); 1540 if (Op == 0) return false; 1541 1542 unsigned Result = createResultReg(&ARM::SPRRegClass); 1543 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1544 TII.get(ARM::VCVTSD), Result) 1545 .addReg(Op)); 1546 UpdateValueMap(I, Result); 1547 return true; 1548 } 1549 1550 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1551 // Make sure we have VFP. 1552 if (!Subtarget->hasVFP2()) return false; 1553 1554 MVT DstVT; 1555 Type *Ty = I->getType(); 1556 if (!isTypeLegal(Ty, DstVT)) 1557 return false; 1558 1559 Value *Src = I->getOperand(0); 1560 EVT SrcEVT = TLI.getValueType(Src->getType(), true); 1561 if (!SrcEVT.isSimple()) 1562 return false; 1563 MVT SrcVT = SrcEVT.getSimpleVT(); 1564 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1565 return false; 1566 1567 unsigned SrcReg = getRegForValue(Src); 1568 if (SrcReg == 0) return false; 1569 1570 // Handle sign-extension. 1571 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1572 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1573 /*isZExt*/!isSigned); 1574 if (SrcReg == 0) return false; 1575 } 1576 1577 // The conversion routine works on fp-reg to fp-reg and the operand above 1578 // was an integer, move it to the fp registers if possible. 1579 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1580 if (FP == 0) return false; 1581 1582 unsigned Opc; 1583 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1584 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1585 else return false; 1586 1587 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1588 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1589 TII.get(Opc), ResultReg).addReg(FP)); 1590 UpdateValueMap(I, ResultReg); 1591 return true; 1592 } 1593 1594 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1595 // Make sure we have VFP. 1596 if (!Subtarget->hasVFP2()) return false; 1597 1598 MVT DstVT; 1599 Type *RetTy = I->getType(); 1600 if (!isTypeLegal(RetTy, DstVT)) 1601 return false; 1602 1603 unsigned Op = getRegForValue(I->getOperand(0)); 1604 if (Op == 0) return false; 1605 1606 unsigned Opc; 1607 Type *OpTy = I->getOperand(0)->getType(); 1608 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1609 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1610 else return false; 1611 1612 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1613 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1614 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1615 TII.get(Opc), ResultReg).addReg(Op)); 1616 1617 // This result needs to be in an integer register, but the conversion only 1618 // takes place in fp-regs. 1619 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1620 if (IntReg == 0) return false; 1621 1622 UpdateValueMap(I, IntReg); 1623 return true; 1624 } 1625 1626 bool ARMFastISel::SelectSelect(const Instruction *I) { 1627 MVT VT; 1628 if (!isTypeLegal(I->getType(), VT)) 1629 return false; 1630 1631 // Things need to be register sized for register moves. 1632 if (VT != MVT::i32) return false; 1633 1634 unsigned CondReg = getRegForValue(I->getOperand(0)); 1635 if (CondReg == 0) return false; 1636 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1637 if (Op1Reg == 0) return false; 1638 1639 // Check to see if we can use an immediate in the conditional move. 1640 int Imm = 0; 1641 bool UseImm = false; 1642 bool isNegativeImm = false; 1643 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1644 assert (VT == MVT::i32 && "Expecting an i32."); 1645 Imm = (int)ConstInt->getValue().getZExtValue(); 1646 if (Imm < 0) { 1647 isNegativeImm = true; 1648 Imm = ~Imm; 1649 } 1650 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1651 (ARM_AM::getSOImmVal(Imm) != -1); 1652 } 1653 1654 unsigned Op2Reg = 0; 1655 if (!UseImm) { 1656 Op2Reg = getRegForValue(I->getOperand(2)); 1657 if (Op2Reg == 0) return false; 1658 } 1659 1660 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1661 CondReg = constrainOperandRegClass(TII.get(CmpOpc), CondReg, 0); 1662 AddOptionalDefs( 1663 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) 1664 .addReg(CondReg) 1665 .addImm(0)); 1666 1667 unsigned MovCCOpc; 1668 const TargetRegisterClass *RC; 1669 if (!UseImm) { 1670 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1671 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1672 } else { 1673 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1674 if (!isNegativeImm) 1675 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1676 else 1677 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1678 } 1679 unsigned ResultReg = createResultReg(RC); 1680 if (!UseImm) { 1681 Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); 1682 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); 1683 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1684 ResultReg) 1685 .addReg(Op2Reg) 1686 .addReg(Op1Reg) 1687 .addImm(ARMCC::NE) 1688 .addReg(ARM::CPSR); 1689 } else { 1690 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); 1691 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1692 ResultReg) 1693 .addReg(Op1Reg) 1694 .addImm(Imm) 1695 .addImm(ARMCC::EQ) 1696 .addReg(ARM::CPSR); 1697 } 1698 UpdateValueMap(I, ResultReg); 1699 return true; 1700 } 1701 1702 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1703 MVT VT; 1704 Type *Ty = I->getType(); 1705 if (!isTypeLegal(Ty, VT)) 1706 return false; 1707 1708 // If we have integer div support we should have selected this automagically. 1709 // In case we have a real miss go ahead and return false and we'll pick 1710 // it up later. 1711 if (Subtarget->hasDivide()) return false; 1712 1713 // Otherwise emit a libcall. 1714 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1715 if (VT == MVT::i8) 1716 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1717 else if (VT == MVT::i16) 1718 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1719 else if (VT == MVT::i32) 1720 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1721 else if (VT == MVT::i64) 1722 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1723 else if (VT == MVT::i128) 1724 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1725 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1726 1727 return ARMEmitLibcall(I, LC); 1728 } 1729 1730 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1731 MVT VT; 1732 Type *Ty = I->getType(); 1733 if (!isTypeLegal(Ty, VT)) 1734 return false; 1735 1736 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1737 if (VT == MVT::i8) 1738 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1739 else if (VT == MVT::i16) 1740 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1741 else if (VT == MVT::i32) 1742 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1743 else if (VT == MVT::i64) 1744 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1745 else if (VT == MVT::i128) 1746 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1747 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1748 1749 return ARMEmitLibcall(I, LC); 1750 } 1751 1752 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1753 EVT DestVT = TLI.getValueType(I->getType(), true); 1754 1755 // We can get here in the case when we have a binary operation on a non-legal 1756 // type and the target independent selector doesn't know how to handle it. 1757 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1758 return false; 1759 1760 unsigned Opc; 1761 switch (ISDOpcode) { 1762 default: return false; 1763 case ISD::ADD: 1764 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1765 break; 1766 case ISD::OR: 1767 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1768 break; 1769 case ISD::SUB: 1770 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1771 break; 1772 } 1773 1774 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1775 if (SrcReg1 == 0) return false; 1776 1777 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1778 // in the instruction, rather then materializing the value in a register. 1779 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1780 if (SrcReg2 == 0) return false; 1781 1782 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 1783 SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); 1784 SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); 1785 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1786 TII.get(Opc), ResultReg) 1787 .addReg(SrcReg1).addReg(SrcReg2)); 1788 UpdateValueMap(I, ResultReg); 1789 return true; 1790 } 1791 1792 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1793 EVT FPVT = TLI.getValueType(I->getType(), true); 1794 if (!FPVT.isSimple()) return false; 1795 MVT VT = FPVT.getSimpleVT(); 1796 1797 // We can get here in the case when we want to use NEON for our fp 1798 // operations, but can't figure out how to. Just use the vfp instructions 1799 // if we have them. 1800 // FIXME: It'd be nice to use NEON instructions. 1801 Type *Ty = I->getType(); 1802 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1803 if (isFloat && !Subtarget->hasVFP2()) 1804 return false; 1805 1806 unsigned Opc; 1807 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1808 switch (ISDOpcode) { 1809 default: return false; 1810 case ISD::FADD: 1811 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1812 break; 1813 case ISD::FSUB: 1814 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1815 break; 1816 case ISD::FMUL: 1817 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1818 break; 1819 } 1820 unsigned Op1 = getRegForValue(I->getOperand(0)); 1821 if (Op1 == 0) return false; 1822 1823 unsigned Op2 = getRegForValue(I->getOperand(1)); 1824 if (Op2 == 0) return false; 1825 1826 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1827 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1828 TII.get(Opc), ResultReg) 1829 .addReg(Op1).addReg(Op2)); 1830 UpdateValueMap(I, ResultReg); 1831 return true; 1832 } 1833 1834 // Call Handling Code 1835 1836 // This is largely taken directly from CCAssignFnForNode 1837 // TODO: We may not support all of this. 1838 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1839 bool Return, 1840 bool isVarArg) { 1841 switch (CC) { 1842 default: 1843 llvm_unreachable("Unsupported calling convention"); 1844 case CallingConv::Fast: 1845 if (Subtarget->hasVFP2() && !isVarArg) { 1846 if (!Subtarget->isAAPCS_ABI()) 1847 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1848 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1849 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1850 } 1851 // Fallthrough 1852 case CallingConv::C: 1853 // Use target triple & subtarget features to do actual dispatch. 1854 if (Subtarget->isAAPCS_ABI()) { 1855 if (Subtarget->hasVFP2() && 1856 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1857 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1858 else 1859 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1860 } else 1861 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1862 case CallingConv::ARM_AAPCS_VFP: 1863 if (!isVarArg) 1864 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1865 // Fall through to soft float variant, variadic functions don't 1866 // use hard floating point ABI. 1867 case CallingConv::ARM_AAPCS: 1868 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1869 case CallingConv::ARM_APCS: 1870 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1871 case CallingConv::GHC: 1872 if (Return) 1873 llvm_unreachable("Can't return in GHC call convention"); 1874 else 1875 return CC_ARM_APCS_GHC; 1876 } 1877 } 1878 1879 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1880 SmallVectorImpl<unsigned> &ArgRegs, 1881 SmallVectorImpl<MVT> &ArgVTs, 1882 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1883 SmallVectorImpl<unsigned> &RegArgs, 1884 CallingConv::ID CC, 1885 unsigned &NumBytes, 1886 bool isVarArg) { 1887 SmallVector<CCValAssign, 16> ArgLocs; 1888 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context); 1889 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1890 CCAssignFnForCall(CC, false, isVarArg)); 1891 1892 // Check that we can handle all of the arguments. If we can't, then bail out 1893 // now before we add code to the MBB. 1894 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1895 CCValAssign &VA = ArgLocs[i]; 1896 MVT ArgVT = ArgVTs[VA.getValNo()]; 1897 1898 // We don't handle NEON/vector parameters yet. 1899 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1900 return false; 1901 1902 // Now copy/store arg to correct locations. 1903 if (VA.isRegLoc() && !VA.needsCustom()) { 1904 continue; 1905 } else if (VA.needsCustom()) { 1906 // TODO: We need custom lowering for vector (v2f64) args. 1907 if (VA.getLocVT() != MVT::f64 || 1908 // TODO: Only handle register args for now. 1909 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1910 return false; 1911 } else { 1912 switch (ArgVT.SimpleTy) { 1913 default: 1914 return false; 1915 case MVT::i1: 1916 case MVT::i8: 1917 case MVT::i16: 1918 case MVT::i32: 1919 break; 1920 case MVT::f32: 1921 if (!Subtarget->hasVFP2()) 1922 return false; 1923 break; 1924 case MVT::f64: 1925 if (!Subtarget->hasVFP2()) 1926 return false; 1927 break; 1928 } 1929 } 1930 } 1931 1932 // At the point, we are able to handle the call's arguments in fast isel. 1933 1934 // Get a count of how many bytes are to be pushed on the stack. 1935 NumBytes = CCInfo.getNextStackOffset(); 1936 1937 // Issue CALLSEQ_START 1938 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1939 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1940 TII.get(AdjStackDown)) 1941 .addImm(NumBytes)); 1942 1943 // Process the args. 1944 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1945 CCValAssign &VA = ArgLocs[i]; 1946 const Value *ArgVal = Args[VA.getValNo()]; 1947 unsigned Arg = ArgRegs[VA.getValNo()]; 1948 MVT ArgVT = ArgVTs[VA.getValNo()]; 1949 1950 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1951 "We don't handle NEON/vector parameters yet."); 1952 1953 // Handle arg promotion, etc. 1954 switch (VA.getLocInfo()) { 1955 case CCValAssign::Full: break; 1956 case CCValAssign::SExt: { 1957 MVT DestVT = VA.getLocVT(); 1958 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1959 assert (Arg != 0 && "Failed to emit a sext"); 1960 ArgVT = DestVT; 1961 break; 1962 } 1963 case CCValAssign::AExt: 1964 // Intentional fall-through. Handle AExt and ZExt. 1965 case CCValAssign::ZExt: { 1966 MVT DestVT = VA.getLocVT(); 1967 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1968 assert (Arg != 0 && "Failed to emit a zext"); 1969 ArgVT = DestVT; 1970 break; 1971 } 1972 case CCValAssign::BCvt: { 1973 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1974 /*TODO: Kill=*/false); 1975 assert(BC != 0 && "Failed to emit a bitcast!"); 1976 Arg = BC; 1977 ArgVT = VA.getLocVT(); 1978 break; 1979 } 1980 default: llvm_unreachable("Unknown arg promotion!"); 1981 } 1982 1983 // Now copy/store arg to correct locations. 1984 if (VA.isRegLoc() && !VA.needsCustom()) { 1985 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1986 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); 1987 RegArgs.push_back(VA.getLocReg()); 1988 } else if (VA.needsCustom()) { 1989 // TODO: We need custom lowering for vector (v2f64) args. 1990 assert(VA.getLocVT() == MVT::f64 && 1991 "Custom lowering for v2f64 args not available"); 1992 1993 CCValAssign &NextVA = ArgLocs[++i]; 1994 1995 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1996 "We only handle register args!"); 1997 1998 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1999 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2000 .addReg(NextVA.getLocReg(), RegState::Define) 2001 .addReg(Arg)); 2002 RegArgs.push_back(VA.getLocReg()); 2003 RegArgs.push_back(NextVA.getLocReg()); 2004 } else { 2005 assert(VA.isMemLoc()); 2006 // Need to store on the stack. 2007 2008 // Don't emit stores for undef values. 2009 if (isa<UndefValue>(ArgVal)) 2010 continue; 2011 2012 Address Addr; 2013 Addr.BaseType = Address::RegBase; 2014 Addr.Base.Reg = ARM::SP; 2015 Addr.Offset = VA.getLocMemOffset(); 2016 2017 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2018 assert(EmitRet && "Could not emit a store for argument!"); 2019 } 2020 } 2021 2022 return true; 2023 } 2024 2025 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2026 const Instruction *I, CallingConv::ID CC, 2027 unsigned &NumBytes, bool isVarArg) { 2028 // Issue CALLSEQ_END 2029 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2030 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2031 TII.get(AdjStackUp)) 2032 .addImm(NumBytes).addImm(0)); 2033 2034 // Now the return value. 2035 if (RetVT != MVT::isVoid) { 2036 SmallVector<CCValAssign, 16> RVLocs; 2037 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2038 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2039 2040 // Copy all of the result registers out of their specified physreg. 2041 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2042 // For this move we copy into two registers and then move into the 2043 // double fp reg we want. 2044 MVT DestVT = RVLocs[0].getValVT(); 2045 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2046 unsigned ResultReg = createResultReg(DstRC); 2047 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2048 TII.get(ARM::VMOVDRR), ResultReg) 2049 .addReg(RVLocs[0].getLocReg()) 2050 .addReg(RVLocs[1].getLocReg())); 2051 2052 UsedRegs.push_back(RVLocs[0].getLocReg()); 2053 UsedRegs.push_back(RVLocs[1].getLocReg()); 2054 2055 // Finally update the result. 2056 UpdateValueMap(I, ResultReg); 2057 } else { 2058 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2059 MVT CopyVT = RVLocs[0].getValVT(); 2060 2061 // Special handling for extended integers. 2062 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2063 CopyVT = MVT::i32; 2064 2065 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2066 2067 unsigned ResultReg = createResultReg(DstRC); 2068 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2069 TII.get(TargetOpcode::COPY), 2070 ResultReg).addReg(RVLocs[0].getLocReg()); 2071 UsedRegs.push_back(RVLocs[0].getLocReg()); 2072 2073 // Finally update the result. 2074 UpdateValueMap(I, ResultReg); 2075 } 2076 } 2077 2078 return true; 2079 } 2080 2081 bool ARMFastISel::SelectRet(const Instruction *I) { 2082 const ReturnInst *Ret = cast<ReturnInst>(I); 2083 const Function &F = *I->getParent()->getParent(); 2084 2085 if (!FuncInfo.CanLowerReturn) 2086 return false; 2087 2088 // Build a list of return value registers. 2089 SmallVector<unsigned, 4> RetRegs; 2090 2091 CallingConv::ID CC = F.getCallingConv(); 2092 if (Ret->getNumOperands() > 0) { 2093 SmallVector<ISD::OutputArg, 4> Outs; 2094 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); 2095 2096 // Analyze operands of the call, assigning locations to each operand. 2097 SmallVector<CCValAssign, 16> ValLocs; 2098 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); 2099 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2100 F.isVarArg())); 2101 2102 const Value *RV = Ret->getOperand(0); 2103 unsigned Reg = getRegForValue(RV); 2104 if (Reg == 0) 2105 return false; 2106 2107 // Only handle a single return value for now. 2108 if (ValLocs.size() != 1) 2109 return false; 2110 2111 CCValAssign &VA = ValLocs[0]; 2112 2113 // Don't bother handling odd stuff for now. 2114 if (VA.getLocInfo() != CCValAssign::Full) 2115 return false; 2116 // Only handle register returns for now. 2117 if (!VA.isRegLoc()) 2118 return false; 2119 2120 unsigned SrcReg = Reg + VA.getValNo(); 2121 EVT RVEVT = TLI.getValueType(RV->getType()); 2122 if (!RVEVT.isSimple()) return false; 2123 MVT RVVT = RVEVT.getSimpleVT(); 2124 MVT DestVT = VA.getValVT(); 2125 // Special handling for extended integers. 2126 if (RVVT != DestVT) { 2127 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2128 return false; 2129 2130 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2131 2132 // Perform extension if flagged as either zext or sext. Otherwise, do 2133 // nothing. 2134 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2135 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2136 if (SrcReg == 0) return false; 2137 } 2138 } 2139 2140 // Make the copy. 2141 unsigned DstReg = VA.getLocReg(); 2142 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2143 // Avoid a cross-class copy. This is very unlikely. 2144 if (!SrcRC->contains(DstReg)) 2145 return false; 2146 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2147 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); 2148 2149 // Add register to return instruction. 2150 RetRegs.push_back(VA.getLocReg()); 2151 } 2152 2153 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2154 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2155 TII.get(RetOpc)); 2156 AddOptionalDefs(MIB); 2157 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 2158 MIB.addReg(RetRegs[i], RegState::Implicit); 2159 return true; 2160 } 2161 2162 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2163 if (UseReg) 2164 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2165 else 2166 return isThumb2 ? ARM::tBL : ARM::BL; 2167 } 2168 2169 unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2170 // Manually compute the global's type to avoid building it when unnecessary. 2171 Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); 2172 EVT LCREVT = TLI.getValueType(GVTy); 2173 if (!LCREVT.isSimple()) return 0; 2174 2175 GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false, 2176 GlobalValue::ExternalLinkage, nullptr, 2177 Name); 2178 assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); 2179 return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2180 } 2181 2182 // A quick function that will emit a call for a named libcall in F with the 2183 // vector of passed arguments for the Instruction in I. We can assume that we 2184 // can emit a call for any libcall we can produce. This is an abridged version 2185 // of the full call infrastructure since we won't need to worry about things 2186 // like computed function pointers or strange arguments at call sites. 2187 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2188 // with X86. 2189 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2190 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2191 2192 // Handle *simple* calls for now. 2193 Type *RetTy = I->getType(); 2194 MVT RetVT; 2195 if (RetTy->isVoidTy()) 2196 RetVT = MVT::isVoid; 2197 else if (!isTypeLegal(RetTy, RetVT)) 2198 return false; 2199 2200 // Can't handle non-double multi-reg retvals. 2201 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2202 SmallVector<CCValAssign, 16> RVLocs; 2203 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); 2204 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2205 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2206 return false; 2207 } 2208 2209 // Set up the argument vectors. 2210 SmallVector<Value*, 8> Args; 2211 SmallVector<unsigned, 8> ArgRegs; 2212 SmallVector<MVT, 8> ArgVTs; 2213 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2214 Args.reserve(I->getNumOperands()); 2215 ArgRegs.reserve(I->getNumOperands()); 2216 ArgVTs.reserve(I->getNumOperands()); 2217 ArgFlags.reserve(I->getNumOperands()); 2218 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2219 Value *Op = I->getOperand(i); 2220 unsigned Arg = getRegForValue(Op); 2221 if (Arg == 0) return false; 2222 2223 Type *ArgTy = Op->getType(); 2224 MVT ArgVT; 2225 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2226 2227 ISD::ArgFlagsTy Flags; 2228 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2229 Flags.setOrigAlign(OriginalAlignment); 2230 2231 Args.push_back(Op); 2232 ArgRegs.push_back(Arg); 2233 ArgVTs.push_back(ArgVT); 2234 ArgFlags.push_back(Flags); 2235 } 2236 2237 // Handle the arguments now that we've gotten them. 2238 SmallVector<unsigned, 4> RegArgs; 2239 unsigned NumBytes; 2240 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2241 RegArgs, CC, NumBytes, false)) 2242 return false; 2243 2244 unsigned CalleeReg = 0; 2245 if (EnableARMLongCalls) { 2246 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2247 if (CalleeReg == 0) return false; 2248 } 2249 2250 // Issue the call. 2251 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2252 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2253 DbgLoc, TII.get(CallOpc)); 2254 // BL / BLX don't take a predicate, but tBL / tBLX do. 2255 if (isThumb2) 2256 AddDefaultPred(MIB); 2257 if (EnableARMLongCalls) 2258 MIB.addReg(CalleeReg); 2259 else 2260 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2261 2262 // Add implicit physical register uses to the call. 2263 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2264 MIB.addReg(RegArgs[i], RegState::Implicit); 2265 2266 // Add a register mask with the call-preserved registers. 2267 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2268 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2269 2270 // Finish off the call including any return values. 2271 SmallVector<unsigned, 4> UsedRegs; 2272 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2273 2274 // Set all unused physreg defs as dead. 2275 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2276 2277 return true; 2278 } 2279 2280 bool ARMFastISel::SelectCall(const Instruction *I, 2281 const char *IntrMemName = nullptr) { 2282 const CallInst *CI = cast<CallInst>(I); 2283 const Value *Callee = CI->getCalledValue(); 2284 2285 // Can't handle inline asm. 2286 if (isa<InlineAsm>(Callee)) return false; 2287 2288 // Allow SelectionDAG isel to handle tail calls. 2289 if (CI->isTailCall()) return false; 2290 2291 // Check the calling convention. 2292 ImmutableCallSite CS(CI); 2293 CallingConv::ID CC = CS.getCallingConv(); 2294 2295 // TODO: Avoid some calling conventions? 2296 2297 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2298 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2299 bool isVarArg = FTy->isVarArg(); 2300 2301 // Handle *simple* calls for now. 2302 Type *RetTy = I->getType(); 2303 MVT RetVT; 2304 if (RetTy->isVoidTy()) 2305 RetVT = MVT::isVoid; 2306 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2307 RetVT != MVT::i8 && RetVT != MVT::i1) 2308 return false; 2309 2310 // Can't handle non-double multi-reg retvals. 2311 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2312 RetVT != MVT::i16 && RetVT != MVT::i32) { 2313 SmallVector<CCValAssign, 16> RVLocs; 2314 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2315 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2316 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2317 return false; 2318 } 2319 2320 // Set up the argument vectors. 2321 SmallVector<Value*, 8> Args; 2322 SmallVector<unsigned, 8> ArgRegs; 2323 SmallVector<MVT, 8> ArgVTs; 2324 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2325 unsigned arg_size = CS.arg_size(); 2326 Args.reserve(arg_size); 2327 ArgRegs.reserve(arg_size); 2328 ArgVTs.reserve(arg_size); 2329 ArgFlags.reserve(arg_size); 2330 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2331 i != e; ++i) { 2332 // If we're lowering a memory intrinsic instead of a regular call, skip the 2333 // last two arguments, which shouldn't be passed to the underlying function. 2334 if (IntrMemName && e-i <= 2) 2335 break; 2336 2337 ISD::ArgFlagsTy Flags; 2338 unsigned AttrInd = i - CS.arg_begin() + 1; 2339 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2340 Flags.setSExt(); 2341 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2342 Flags.setZExt(); 2343 2344 // FIXME: Only handle *easy* calls for now. 2345 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2346 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2347 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2348 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2349 return false; 2350 2351 Type *ArgTy = (*i)->getType(); 2352 MVT ArgVT; 2353 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2354 ArgVT != MVT::i1) 2355 return false; 2356 2357 unsigned Arg = getRegForValue(*i); 2358 if (Arg == 0) 2359 return false; 2360 2361 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2362 Flags.setOrigAlign(OriginalAlignment); 2363 2364 Args.push_back(*i); 2365 ArgRegs.push_back(Arg); 2366 ArgVTs.push_back(ArgVT); 2367 ArgFlags.push_back(Flags); 2368 } 2369 2370 // Handle the arguments now that we've gotten them. 2371 SmallVector<unsigned, 4> RegArgs; 2372 unsigned NumBytes; 2373 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2374 RegArgs, CC, NumBytes, isVarArg)) 2375 return false; 2376 2377 bool UseReg = false; 2378 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2379 if (!GV || EnableARMLongCalls) UseReg = true; 2380 2381 unsigned CalleeReg = 0; 2382 if (UseReg) { 2383 if (IntrMemName) 2384 CalleeReg = getLibcallReg(IntrMemName); 2385 else 2386 CalleeReg = getRegForValue(Callee); 2387 2388 if (CalleeReg == 0) return false; 2389 } 2390 2391 // Issue the call. 2392 unsigned CallOpc = ARMSelectCallOp(UseReg); 2393 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2394 DbgLoc, TII.get(CallOpc)); 2395 2396 unsigned char OpFlags = 0; 2397 2398 // Add MO_PLT for global address or external symbol in the PIC relocation 2399 // model. 2400 if (Subtarget->isTargetELF() && TM.getRelocationModel() == Reloc::PIC_) 2401 OpFlags = ARMII::MO_PLT; 2402 2403 // ARM calls don't take a predicate, but tBL / tBLX do. 2404 if(isThumb2) 2405 AddDefaultPred(MIB); 2406 if (UseReg) 2407 MIB.addReg(CalleeReg); 2408 else if (!IntrMemName) 2409 MIB.addGlobalAddress(GV, 0, OpFlags); 2410 else 2411 MIB.addExternalSymbol(IntrMemName, OpFlags); 2412 2413 // Add implicit physical register uses to the call. 2414 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2415 MIB.addReg(RegArgs[i], RegState::Implicit); 2416 2417 // Add a register mask with the call-preserved registers. 2418 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2419 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2420 2421 // Finish off the call including any return values. 2422 SmallVector<unsigned, 4> UsedRegs; 2423 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2424 return false; 2425 2426 // Set all unused physreg defs as dead. 2427 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2428 2429 return true; 2430 } 2431 2432 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2433 return Len <= 16; 2434 } 2435 2436 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2437 uint64_t Len, unsigned Alignment) { 2438 // Make sure we don't bloat code by inlining very large memcpy's. 2439 if (!ARMIsMemCpySmall(Len)) 2440 return false; 2441 2442 while (Len) { 2443 MVT VT; 2444 if (!Alignment || Alignment >= 4) { 2445 if (Len >= 4) 2446 VT = MVT::i32; 2447 else if (Len >= 2) 2448 VT = MVT::i16; 2449 else { 2450 assert (Len == 1 && "Expected a length of 1!"); 2451 VT = MVT::i8; 2452 } 2453 } else { 2454 // Bound based on alignment. 2455 if (Len >= 2 && Alignment == 2) 2456 VT = MVT::i16; 2457 else { 2458 VT = MVT::i8; 2459 } 2460 } 2461 2462 bool RV; 2463 unsigned ResultReg; 2464 RV = ARMEmitLoad(VT, ResultReg, Src); 2465 assert (RV == true && "Should be able to handle this load."); 2466 RV = ARMEmitStore(VT, ResultReg, Dest); 2467 assert (RV == true && "Should be able to handle this store."); 2468 (void)RV; 2469 2470 unsigned Size = VT.getSizeInBits()/8; 2471 Len -= Size; 2472 Dest.Offset += Size; 2473 Src.Offset += Size; 2474 } 2475 2476 return true; 2477 } 2478 2479 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2480 // FIXME: Handle more intrinsics. 2481 switch (I.getIntrinsicID()) { 2482 default: return false; 2483 case Intrinsic::frameaddress: { 2484 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2485 MFI->setFrameAddressIsTaken(true); 2486 2487 unsigned LdrOpc; 2488 const TargetRegisterClass *RC; 2489 if (isThumb2) { 2490 LdrOpc = ARM::t2LDRi12; 2491 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2492 } else { 2493 LdrOpc = ARM::LDRi12; 2494 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2495 } 2496 2497 const ARMBaseRegisterInfo *RegInfo = 2498 static_cast<const ARMBaseRegisterInfo *>( 2499 TM.getSubtargetImpl()->getRegisterInfo()); 2500 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2501 unsigned SrcReg = FramePtr; 2502 2503 // Recursively load frame address 2504 // ldr r0 [fp] 2505 // ldr r0 [r0] 2506 // ldr r0 [r0] 2507 // ... 2508 unsigned DestReg; 2509 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2510 while (Depth--) { 2511 DestReg = createResultReg(RC); 2512 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2513 TII.get(LdrOpc), DestReg) 2514 .addReg(SrcReg).addImm(0)); 2515 SrcReg = DestReg; 2516 } 2517 UpdateValueMap(&I, SrcReg); 2518 return true; 2519 } 2520 case Intrinsic::memcpy: 2521 case Intrinsic::memmove: { 2522 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2523 // Don't handle volatile. 2524 if (MTI.isVolatile()) 2525 return false; 2526 2527 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2528 // we would emit dead code because we don't currently handle memmoves. 2529 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2530 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2531 // Small memcpy's are common enough that we want to do them without a call 2532 // if possible. 2533 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2534 if (ARMIsMemCpySmall(Len)) { 2535 Address Dest, Src; 2536 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2537 !ARMComputeAddress(MTI.getRawSource(), Src)) 2538 return false; 2539 unsigned Alignment = MTI.getAlignment(); 2540 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2541 return true; 2542 } 2543 } 2544 2545 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2546 return false; 2547 2548 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2549 return false; 2550 2551 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2552 return SelectCall(&I, IntrMemName); 2553 } 2554 case Intrinsic::memset: { 2555 const MemSetInst &MSI = cast<MemSetInst>(I); 2556 // Don't handle volatile. 2557 if (MSI.isVolatile()) 2558 return false; 2559 2560 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2561 return false; 2562 2563 if (MSI.getDestAddressSpace() > 255) 2564 return false; 2565 2566 return SelectCall(&I, "memset"); 2567 } 2568 case Intrinsic::trap: { 2569 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get( 2570 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP)); 2571 return true; 2572 } 2573 } 2574 } 2575 2576 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2577 // The high bits for a type smaller than the register size are assumed to be 2578 // undefined. 2579 Value *Op = I->getOperand(0); 2580 2581 EVT SrcVT, DestVT; 2582 SrcVT = TLI.getValueType(Op->getType(), true); 2583 DestVT = TLI.getValueType(I->getType(), true); 2584 2585 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2586 return false; 2587 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2588 return false; 2589 2590 unsigned SrcReg = getRegForValue(Op); 2591 if (!SrcReg) return false; 2592 2593 // Because the high bits are undefined, a truncate doesn't generate 2594 // any code. 2595 UpdateValueMap(I, SrcReg); 2596 return true; 2597 } 2598 2599 unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2600 bool isZExt) { 2601 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2602 return 0; 2603 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) 2604 return 0; 2605 2606 // Table of which combinations can be emitted as a single instruction, 2607 // and which will require two. 2608 static const uint8_t isSingleInstrTbl[3][2][2][2] = { 2609 // ARM Thumb 2610 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops 2611 // ext: s z s z s z s z 2612 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, 2613 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, 2614 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } 2615 }; 2616 2617 // Target registers for: 2618 // - For ARM can never be PC. 2619 // - For 16-bit Thumb are restricted to lower 8 registers. 2620 // - For 32-bit Thumb are restricted to non-SP and non-PC. 2621 static const TargetRegisterClass *RCTbl[2][2] = { 2622 // Instructions: Two Single 2623 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, 2624 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } 2625 }; 2626 2627 // Table governing the instruction(s) to be emitted. 2628 static const struct InstructionTable { 2629 uint32_t Opc : 16; 2630 uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. 2631 uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. 2632 uint32_t Imm : 8; // All instructions have either a shift or a mask. 2633 } IT[2][2][3][2] = { 2634 { // Two instructions (first is left shift, second is in this table). 2635 { // ARM Opc S Shift Imm 2636 /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, 2637 /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, 2638 /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, 2639 /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, 2640 /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, 2641 /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } 2642 }, 2643 { // Thumb Opc S Shift Imm 2644 /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, 2645 /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, 2646 /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, 2647 /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, 2648 /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, 2649 /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } 2650 } 2651 }, 2652 { // Single instruction. 2653 { // ARM Opc S Shift Imm 2654 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2655 /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, 2656 /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, 2657 /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, 2658 /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, 2659 /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } 2660 }, 2661 { // Thumb Opc S Shift Imm 2662 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2663 /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, 2664 /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, 2665 /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, 2666 /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, 2667 /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } 2668 } 2669 } 2670 }; 2671 2672 unsigned SrcBits = SrcVT.getSizeInBits(); 2673 unsigned DestBits = DestVT.getSizeInBits(); 2674 (void) DestBits; 2675 assert((SrcBits < DestBits) && "can only extend to larger types"); 2676 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && 2677 "other sizes unimplemented"); 2678 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && 2679 "other sizes unimplemented"); 2680 2681 bool hasV6Ops = Subtarget->hasV6Ops(); 2682 unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} 2683 assert((Bitness < 3) && "sanity-check table bounds"); 2684 2685 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; 2686 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; 2687 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; 2688 unsigned Opc = ITP->Opc; 2689 assert(ARM::KILL != Opc && "Invalid table entry"); 2690 unsigned hasS = ITP->hasS; 2691 ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; 2692 assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && 2693 "only MOVsi has shift operand addressing mode"); 2694 unsigned Imm = ITP->Imm; 2695 2696 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). 2697 bool setsCPSR = &ARM::tGPRRegClass == RC; 2698 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; 2699 unsigned ResultReg; 2700 // MOVsi encodes shift and immediate in shift operand addressing mode. 2701 // The following condition has the same value when emitting two 2702 // instruction sequences: both are shifts. 2703 bool ImmIsSO = (Shift != ARM_AM::no_shift); 2704 2705 // Either one or two instructions are emitted. 2706 // They're always of the form: 2707 // dst = in OP imm 2708 // CPSR is set only by 16-bit Thumb instructions. 2709 // Predicate, if any, is AL. 2710 // S bit, if available, is always 0. 2711 // When two are emitted the first's result will feed as the second's input, 2712 // that value is then dead. 2713 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; 2714 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { 2715 ResultReg = createResultReg(RC); 2716 bool isLsl = (0 == Instr) && !isSingleInstr; 2717 unsigned Opcode = isLsl ? LSLOpc : Opc; 2718 ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; 2719 unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; 2720 bool isKill = 1 == Instr; 2721 MachineInstrBuilder MIB = BuildMI( 2722 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg); 2723 if (setsCPSR) 2724 MIB.addReg(ARM::CPSR, RegState::Define); 2725 SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); 2726 AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); 2727 if (hasS) 2728 AddDefaultCC(MIB); 2729 // Second instruction consumes the first's result. 2730 SrcReg = ResultReg; 2731 } 2732 2733 return ResultReg; 2734 } 2735 2736 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2737 // On ARM, in general, integer casts don't involve legal types; this code 2738 // handles promotable integers. 2739 Type *DestTy = I->getType(); 2740 Value *Src = I->getOperand(0); 2741 Type *SrcTy = Src->getType(); 2742 2743 bool isZExt = isa<ZExtInst>(I); 2744 unsigned SrcReg = getRegForValue(Src); 2745 if (!SrcReg) return false; 2746 2747 EVT SrcEVT, DestEVT; 2748 SrcEVT = TLI.getValueType(SrcTy, true); 2749 DestEVT = TLI.getValueType(DestTy, true); 2750 if (!SrcEVT.isSimple()) return false; 2751 if (!DestEVT.isSimple()) return false; 2752 2753 MVT SrcVT = SrcEVT.getSimpleVT(); 2754 MVT DestVT = DestEVT.getSimpleVT(); 2755 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2756 if (ResultReg == 0) return false; 2757 UpdateValueMap(I, ResultReg); 2758 return true; 2759 } 2760 2761 bool ARMFastISel::SelectShift(const Instruction *I, 2762 ARM_AM::ShiftOpc ShiftTy) { 2763 // We handle thumb2 mode by target independent selector 2764 // or SelectionDAG ISel. 2765 if (isThumb2) 2766 return false; 2767 2768 // Only handle i32 now. 2769 EVT DestVT = TLI.getValueType(I->getType(), true); 2770 if (DestVT != MVT::i32) 2771 return false; 2772 2773 unsigned Opc = ARM::MOVsr; 2774 unsigned ShiftImm; 2775 Value *Src2Value = I->getOperand(1); 2776 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2777 ShiftImm = CI->getZExtValue(); 2778 2779 // Fall back to selection DAG isel if the shift amount 2780 // is zero or greater than the width of the value type. 2781 if (ShiftImm == 0 || ShiftImm >=32) 2782 return false; 2783 2784 Opc = ARM::MOVsi; 2785 } 2786 2787 Value *Src1Value = I->getOperand(0); 2788 unsigned Reg1 = getRegForValue(Src1Value); 2789 if (Reg1 == 0) return false; 2790 2791 unsigned Reg2 = 0; 2792 if (Opc == ARM::MOVsr) { 2793 Reg2 = getRegForValue(Src2Value); 2794 if (Reg2 == 0) return false; 2795 } 2796 2797 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 2798 if(ResultReg == 0) return false; 2799 2800 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2801 TII.get(Opc), ResultReg) 2802 .addReg(Reg1); 2803 2804 if (Opc == ARM::MOVsi) 2805 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2806 else if (Opc == ARM::MOVsr) { 2807 MIB.addReg(Reg2); 2808 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2809 } 2810 2811 AddOptionalDefs(MIB); 2812 UpdateValueMap(I, ResultReg); 2813 return true; 2814 } 2815 2816 // TODO: SoftFP support. 2817 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2818 2819 switch (I->getOpcode()) { 2820 case Instruction::Load: 2821 return SelectLoad(I); 2822 case Instruction::Store: 2823 return SelectStore(I); 2824 case Instruction::Br: 2825 return SelectBranch(I); 2826 case Instruction::IndirectBr: 2827 return SelectIndirectBr(I); 2828 case Instruction::ICmp: 2829 case Instruction::FCmp: 2830 return SelectCmp(I); 2831 case Instruction::FPExt: 2832 return SelectFPExt(I); 2833 case Instruction::FPTrunc: 2834 return SelectFPTrunc(I); 2835 case Instruction::SIToFP: 2836 return SelectIToFP(I, /*isSigned*/ true); 2837 case Instruction::UIToFP: 2838 return SelectIToFP(I, /*isSigned*/ false); 2839 case Instruction::FPToSI: 2840 return SelectFPToI(I, /*isSigned*/ true); 2841 case Instruction::FPToUI: 2842 return SelectFPToI(I, /*isSigned*/ false); 2843 case Instruction::Add: 2844 return SelectBinaryIntOp(I, ISD::ADD); 2845 case Instruction::Or: 2846 return SelectBinaryIntOp(I, ISD::OR); 2847 case Instruction::Sub: 2848 return SelectBinaryIntOp(I, ISD::SUB); 2849 case Instruction::FAdd: 2850 return SelectBinaryFPOp(I, ISD::FADD); 2851 case Instruction::FSub: 2852 return SelectBinaryFPOp(I, ISD::FSUB); 2853 case Instruction::FMul: 2854 return SelectBinaryFPOp(I, ISD::FMUL); 2855 case Instruction::SDiv: 2856 return SelectDiv(I, /*isSigned*/ true); 2857 case Instruction::UDiv: 2858 return SelectDiv(I, /*isSigned*/ false); 2859 case Instruction::SRem: 2860 return SelectRem(I, /*isSigned*/ true); 2861 case Instruction::URem: 2862 return SelectRem(I, /*isSigned*/ false); 2863 case Instruction::Call: 2864 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2865 return SelectIntrinsicCall(*II); 2866 return SelectCall(I); 2867 case Instruction::Select: 2868 return SelectSelect(I); 2869 case Instruction::Ret: 2870 return SelectRet(I); 2871 case Instruction::Trunc: 2872 return SelectTrunc(I); 2873 case Instruction::ZExt: 2874 case Instruction::SExt: 2875 return SelectIntExt(I); 2876 case Instruction::Shl: 2877 return SelectShift(I, ARM_AM::lsl); 2878 case Instruction::LShr: 2879 return SelectShift(I, ARM_AM::lsr); 2880 case Instruction::AShr: 2881 return SelectShift(I, ARM_AM::asr); 2882 default: break; 2883 } 2884 return false; 2885 } 2886 2887 namespace { 2888 // This table describes sign- and zero-extend instructions which can be 2889 // folded into a preceding load. All of these extends have an immediate 2890 // (sometimes a mask and sometimes a shift) that's applied after 2891 // extension. 2892 const struct FoldableLoadExtendsStruct { 2893 uint16_t Opc[2]; // ARM, Thumb. 2894 uint8_t ExpectedImm; 2895 uint8_t isZExt : 1; 2896 uint8_t ExpectedVT : 7; 2897 } FoldableLoadExtends[] = { 2898 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, 2899 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, 2900 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, 2901 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, 2902 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } 2903 }; 2904 } 2905 2906 /// \brief The specified machine instr operand is a vreg, and that 2907 /// vreg is being provided by the specified load instruction. If possible, 2908 /// try to fold the load as an operand to the instruction, returning true if 2909 /// successful. 2910 bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 2911 const LoadInst *LI) { 2912 // Verify we have a legal type before going any further. 2913 MVT VT; 2914 if (!isLoadTypeLegal(LI->getType(), VT)) 2915 return false; 2916 2917 // Combine load followed by zero- or sign-extend. 2918 // ldrb r1, [r0] ldrb r1, [r0] 2919 // uxtb r2, r1 => 2920 // mov r3, r2 mov r3, r1 2921 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) 2922 return false; 2923 const uint64_t Imm = MI->getOperand(2).getImm(); 2924 2925 bool Found = false; 2926 bool isZExt; 2927 for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends); 2928 i != e; ++i) { 2929 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() && 2930 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm && 2931 MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) { 2932 Found = true; 2933 isZExt = FoldableLoadExtends[i].isZExt; 2934 } 2935 } 2936 if (!Found) return false; 2937 2938 // See if we can handle this address. 2939 Address Addr; 2940 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2941 2942 unsigned ResultReg = MI->getOperand(0).getReg(); 2943 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2944 return false; 2945 MI->eraseFromParent(); 2946 return true; 2947 } 2948 2949 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2950 unsigned Align, MVT VT) { 2951 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2952 ARMConstantPoolConstant *CPV = 2953 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2954 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 2955 2956 unsigned Opc; 2957 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); 2958 // Load value. 2959 if (isThumb2) { 2960 DestReg1 = constrainOperandRegClass(TII.get(ARM::t2LDRpci), DestReg1, 0); 2961 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2962 TII.get(ARM::t2LDRpci), DestReg1) 2963 .addConstantPoolIndex(Idx)); 2964 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; 2965 } else { 2966 // The extra immediate is for addrmode2. 2967 DestReg1 = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg1, 0); 2968 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2969 DbgLoc, TII.get(ARM::LDRcp), DestReg1) 2970 .addConstantPoolIndex(Idx).addImm(0)); 2971 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; 2972 } 2973 2974 unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); 2975 if (GlobalBaseReg == 0) { 2976 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); 2977 AFI->setGlobalBaseReg(GlobalBaseReg); 2978 } 2979 2980 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); 2981 DestReg2 = constrainOperandRegClass(TII.get(Opc), DestReg2, 0); 2982 DestReg1 = constrainOperandRegClass(TII.get(Opc), DestReg1, 1); 2983 GlobalBaseReg = constrainOperandRegClass(TII.get(Opc), GlobalBaseReg, 2); 2984 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2985 DbgLoc, TII.get(Opc), DestReg2) 2986 .addReg(DestReg1) 2987 .addReg(GlobalBaseReg); 2988 if (!UseGOTOFF) 2989 MIB.addImm(0); 2990 AddOptionalDefs(MIB); 2991 2992 return DestReg2; 2993 } 2994 2995 bool ARMFastISel::FastLowerArguments() { 2996 if (!FuncInfo.CanLowerReturn) 2997 return false; 2998 2999 const Function *F = FuncInfo.Fn; 3000 if (F->isVarArg()) 3001 return false; 3002 3003 CallingConv::ID CC = F->getCallingConv(); 3004 switch (CC) { 3005 default: 3006 return false; 3007 case CallingConv::Fast: 3008 case CallingConv::C: 3009 case CallingConv::ARM_AAPCS_VFP: 3010 case CallingConv::ARM_AAPCS: 3011 case CallingConv::ARM_APCS: 3012 break; 3013 } 3014 3015 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments 3016 // which are passed in r0 - r3. 3017 unsigned Idx = 1; 3018 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3019 I != E; ++I, ++Idx) { 3020 if (Idx > 4) 3021 return false; 3022 3023 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || 3024 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || 3025 F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) 3026 return false; 3027 3028 Type *ArgTy = I->getType(); 3029 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) 3030 return false; 3031 3032 EVT ArgVT = TLI.getValueType(ArgTy); 3033 if (!ArgVT.isSimple()) return false; 3034 switch (ArgVT.getSimpleVT().SimpleTy) { 3035 case MVT::i8: 3036 case MVT::i16: 3037 case MVT::i32: 3038 break; 3039 default: 3040 return false; 3041 } 3042 } 3043 3044 3045 static const uint16_t GPRArgRegs[] = { 3046 ARM::R0, ARM::R1, ARM::R2, ARM::R3 3047 }; 3048 3049 const TargetRegisterClass *RC = &ARM::rGPRRegClass; 3050 Idx = 0; 3051 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3052 I != E; ++I, ++Idx) { 3053 unsigned SrcReg = GPRArgRegs[Idx]; 3054 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); 3055 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. 3056 // Without this, EmitLiveInCopies may eliminate the livein if its only 3057 // use is a bitcast (which isn't turned into an instruction). 3058 unsigned ResultReg = createResultReg(RC); 3059 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 3060 TII.get(TargetOpcode::COPY), 3061 ResultReg).addReg(DstReg, getKillRegState(true)); 3062 UpdateValueMap(I, ResultReg); 3063 } 3064 3065 return true; 3066 } 3067 3068 namespace llvm { 3069 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 3070 const TargetLibraryInfo *libInfo) { 3071 const TargetMachine &TM = funcInfo.MF->getTarget(); 3072 3073 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 3074 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. 3075 bool UseFastISel = false; 3076 UseFastISel |= Subtarget->isTargetMachO() && !Subtarget->isThumb1Only(); 3077 UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb(); 3078 UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb(); 3079 3080 if (UseFastISel) { 3081 // iOS always has a FP for backtracking, force other targets 3082 // to keep their FP when doing FastISel. The emitted code is 3083 // currently superior, and in cases like test-suite's lencod 3084 // FastISel isn't quite correct when FP is eliminated. 3085 TM.Options.NoFramePointerElim = true; 3086 return new ARMFastISel(funcInfo, libInfo); 3087 } 3088 return nullptr; 3089 } 3090 } 3091