1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseRegisterInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMConstantPoolValue.h" 20 #include "ARMISelLowering.h" 21 #include "ARMMachineFunctionInfo.h" 22 #include "ARMSubtarget.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/CodeGen/FastISel.h" 26 #include "llvm/CodeGen/FunctionLoweringInfo.h" 27 #include "llvm/CodeGen/MachineConstantPool.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineMemOperand.h" 31 #include "llvm/CodeGen/MachineModuleInfo.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/IR/CallSite.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/GetElementPtrTypeIterator.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/Module.h" 42 #include "llvm/IR/Operator.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Target/TargetInstrInfo.h" 45 #include "llvm/Target/TargetLowering.h" 46 #include "llvm/Target/TargetMachine.h" 47 #include "llvm/Target/TargetOptions.h" 48 using namespace llvm; 49 50 namespace { 51 52 // All possible address modes, plus some. 53 typedef struct Address { 54 enum { 55 RegBase, 56 FrameIndexBase 57 } BaseType; 58 59 union { 60 unsigned Reg; 61 int FI; 62 } Base; 63 64 int Offset; 65 66 // Innocuous defaults for our address. 67 Address() 68 : BaseType(RegBase), Offset(0) { 69 Base.Reg = 0; 70 } 71 } Address; 72 73 class ARMFastISel final : public FastISel { 74 75 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 76 /// make the right decision when generating code for different targets. 77 const ARMSubtarget *Subtarget; 78 Module &M; 79 const TargetMachine &TM; 80 const TargetInstrInfo &TII; 81 const TargetLowering &TLI; 82 ARMFunctionInfo *AFI; 83 84 // Convenience variables to avoid some queries. 85 bool isThumb2; 86 LLVMContext *Context; 87 88 public: 89 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 90 const TargetLibraryInfo *libInfo) 91 : FastISel(funcInfo, libInfo), 92 Subtarget( 93 &static_cast<const ARMSubtarget &>(funcInfo.MF->getSubtarget())), 94 M(const_cast<Module &>(*funcInfo.Fn->getParent())), 95 TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()), 96 TLI(*Subtarget->getTargetLowering()) { 97 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 98 isThumb2 = AFI->isThumbFunction(); 99 Context = &funcInfo.Fn->getContext(); 100 } 101 102 // Code from FastISel.cpp. 103 private: 104 unsigned fastEmitInst_r(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 unsigned Op0, bool Op0IsKill); 107 unsigned fastEmitInst_rr(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC, 109 unsigned Op0, bool Op0IsKill, 110 unsigned Op1, bool Op1IsKill); 111 unsigned fastEmitInst_ri(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill, 114 uint64_t Imm); 115 unsigned fastEmitInst_rri(unsigned MachineInstOpcode, 116 const TargetRegisterClass *RC, 117 unsigned Op0, bool Op0IsKill, 118 unsigned Op1, bool Op1IsKill, 119 uint64_t Imm); 120 unsigned fastEmitInst_i(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 uint64_t Imm); 123 124 // Backend specific FastISel code. 125 private: 126 bool fastSelectInstruction(const Instruction *I) override; 127 unsigned fastMaterializeConstant(const Constant *C) override; 128 unsigned fastMaterializeAlloca(const AllocaInst *AI) override; 129 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 130 const LoadInst *LI) override; 131 bool fastLowerArguments() override; 132 private: 133 #include "ARMGenFastISel.inc" 134 135 // Instruction selection routines. 136 private: 137 bool SelectLoad(const Instruction *I); 138 bool SelectStore(const Instruction *I); 139 bool SelectBranch(const Instruction *I); 140 bool SelectIndirectBr(const Instruction *I); 141 bool SelectCmp(const Instruction *I); 142 bool SelectFPExt(const Instruction *I); 143 bool SelectFPTrunc(const Instruction *I); 144 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 145 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 146 bool SelectIToFP(const Instruction *I, bool isSigned); 147 bool SelectFPToI(const Instruction *I, bool isSigned); 148 bool SelectDiv(const Instruction *I, bool isSigned); 149 bool SelectRem(const Instruction *I, bool isSigned); 150 bool SelectCall(const Instruction *I, const char *IntrMemName); 151 bool SelectIntrinsicCall(const IntrinsicInst &I); 152 bool SelectSelect(const Instruction *I); 153 bool SelectRet(const Instruction *I); 154 bool SelectTrunc(const Instruction *I); 155 bool SelectIntExt(const Instruction *I); 156 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 157 158 // Utility routines. 159 private: 160 bool isPositionIndependent() const; 161 bool isTypeLegal(Type *Ty, MVT &VT); 162 bool isLoadTypeLegal(Type *Ty, MVT &VT); 163 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 164 bool isZExt); 165 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 166 unsigned Alignment = 0, bool isZExt = true, 167 bool allocReg = true); 168 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 169 unsigned Alignment = 0); 170 bool ARMComputeAddress(const Value *Obj, Address &Addr); 171 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 172 bool ARMIsMemCpySmall(uint64_t Len); 173 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 174 unsigned Alignment); 175 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 176 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 177 unsigned ARMMaterializeInt(const Constant *C, MVT VT); 178 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 179 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 180 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 181 unsigned ARMSelectCallOp(bool UseReg); 182 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 183 184 const TargetLowering *getTargetLowering() { return &TLI; } 185 186 // Call handling routines. 187 private: 188 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 189 bool Return, 190 bool isVarArg); 191 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 192 SmallVectorImpl<unsigned> &ArgRegs, 193 SmallVectorImpl<MVT> &ArgVTs, 194 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 195 SmallVectorImpl<unsigned> &RegArgs, 196 CallingConv::ID CC, 197 unsigned &NumBytes, 198 bool isVarArg); 199 unsigned getLibcallReg(const Twine &Name); 200 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 201 const Instruction *I, CallingConv::ID CC, 202 unsigned &NumBytes, bool isVarArg); 203 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 204 205 // OptionalDef handling routines. 206 private: 207 bool isARMNEONPred(const MachineInstr *MI); 208 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 209 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 210 void AddLoadStoreOperands(MVT VT, Address &Addr, 211 const MachineInstrBuilder &MIB, 212 MachineMemOperand::Flags Flags, bool useAM3); 213 }; 214 215 } // end anonymous namespace 216 217 #include "ARMGenCallingConv.inc" 218 219 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 220 // we don't care about implicit defs here, just places we'll need to add a 221 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 222 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 223 if (!MI->hasOptionalDef()) 224 return false; 225 226 // Look to see if our OptionalDef is defining CPSR or CCR. 227 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 228 const MachineOperand &MO = MI->getOperand(i); 229 if (!MO.isReg() || !MO.isDef()) continue; 230 if (MO.getReg() == ARM::CPSR) 231 *CPSR = true; 232 } 233 return true; 234 } 235 236 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 237 const MCInstrDesc &MCID = MI->getDesc(); 238 239 // If we're a thumb2 or not NEON function we'll be handled via isPredicable. 240 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 241 AFI->isThumb2Function()) 242 return MI->isPredicable(); 243 244 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 245 if (MCID.OpInfo[i].isPredicate()) 246 return true; 247 248 return false; 249 } 250 251 // If the machine is predicable go ahead and add the predicate operands, if 252 // it needs default CC operands add those. 253 // TODO: If we want to support thumb1 then we'll need to deal with optional 254 // CPSR defs that need to be added before the remaining operands. See s_cc_out 255 // for descriptions why. 256 const MachineInstrBuilder & 257 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 258 MachineInstr *MI = &*MIB; 259 260 // Do we use a predicate? or... 261 // Are we NEON in ARM mode and have a predicate operand? If so, I know 262 // we're not predicable but add it anyways. 263 if (isARMNEONPred(MI)) 264 AddDefaultPred(MIB); 265 266 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 267 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 268 bool CPSR = false; 269 if (DefinesOptionalPredicate(MI, &CPSR)) { 270 if (CPSR) 271 AddDefaultT1CC(MIB); 272 else 273 AddDefaultCC(MIB); 274 } 275 return MIB; 276 } 277 278 unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode, 279 const TargetRegisterClass *RC, 280 unsigned Op0, bool Op0IsKill) { 281 unsigned ResultReg = createResultReg(RC); 282 const MCInstrDesc &II = TII.get(MachineInstOpcode); 283 284 // Make sure the input operand is sufficiently constrained to be legal 285 // for this instruction. 286 Op0 = constrainOperandRegClass(II, Op0, 1); 287 if (II.getNumDefs() >= 1) { 288 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 289 ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); 290 } else { 291 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 292 .addReg(Op0, Op0IsKill * RegState::Kill)); 293 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 294 TII.get(TargetOpcode::COPY), ResultReg) 295 .addReg(II.ImplicitDefs[0])); 296 } 297 return ResultReg; 298 } 299 300 unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 301 const TargetRegisterClass *RC, 302 unsigned Op0, bool Op0IsKill, 303 unsigned Op1, bool Op1IsKill) { 304 unsigned ResultReg = createResultReg(RC); 305 const MCInstrDesc &II = TII.get(MachineInstOpcode); 306 307 // Make sure the input operands are sufficiently constrained to be legal 308 // for this instruction. 309 Op0 = constrainOperandRegClass(II, Op0, 1); 310 Op1 = constrainOperandRegClass(II, Op1, 2); 311 312 if (II.getNumDefs() >= 1) { 313 AddOptionalDefs( 314 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 315 .addReg(Op0, Op0IsKill * RegState::Kill) 316 .addReg(Op1, Op1IsKill * RegState::Kill)); 317 } else { 318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 319 .addReg(Op0, Op0IsKill * RegState::Kill) 320 .addReg(Op1, Op1IsKill * RegState::Kill)); 321 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 322 TII.get(TargetOpcode::COPY), ResultReg) 323 .addReg(II.ImplicitDefs[0])); 324 } 325 return ResultReg; 326 } 327 328 unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 329 const TargetRegisterClass *RC, 330 unsigned Op0, bool Op0IsKill, 331 uint64_t Imm) { 332 unsigned ResultReg = createResultReg(RC); 333 const MCInstrDesc &II = TII.get(MachineInstOpcode); 334 335 // Make sure the input operand is sufficiently constrained to be legal 336 // for this instruction. 337 Op0 = constrainOperandRegClass(II, Op0, 1); 338 if (II.getNumDefs() >= 1) { 339 AddOptionalDefs( 340 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 341 .addReg(Op0, Op0IsKill * RegState::Kill) 342 .addImm(Imm)); 343 } else { 344 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 345 .addReg(Op0, Op0IsKill * RegState::Kill) 346 .addImm(Imm)); 347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 348 TII.get(TargetOpcode::COPY), ResultReg) 349 .addReg(II.ImplicitDefs[0])); 350 } 351 return ResultReg; 352 } 353 354 unsigned ARMFastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 355 const TargetRegisterClass *RC, 356 unsigned Op0, bool Op0IsKill, 357 unsigned Op1, bool Op1IsKill, 358 uint64_t Imm) { 359 unsigned ResultReg = createResultReg(RC); 360 const MCInstrDesc &II = TII.get(MachineInstOpcode); 361 362 // Make sure the input operands are sufficiently constrained to be legal 363 // for this instruction. 364 Op0 = constrainOperandRegClass(II, Op0, 1); 365 Op1 = constrainOperandRegClass(II, Op1, 2); 366 if (II.getNumDefs() >= 1) { 367 AddOptionalDefs( 368 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 369 .addReg(Op0, Op0IsKill * RegState::Kill) 370 .addReg(Op1, Op1IsKill * RegState::Kill) 371 .addImm(Imm)); 372 } else { 373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 374 .addReg(Op0, Op0IsKill * RegState::Kill) 375 .addReg(Op1, Op1IsKill * RegState::Kill) 376 .addImm(Imm)); 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 378 TII.get(TargetOpcode::COPY), ResultReg) 379 .addReg(II.ImplicitDefs[0])); 380 } 381 return ResultReg; 382 } 383 384 unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode, 385 const TargetRegisterClass *RC, 386 uint64_t Imm) { 387 unsigned ResultReg = createResultReg(RC); 388 const MCInstrDesc &II = TII.get(MachineInstOpcode); 389 390 if (II.getNumDefs() >= 1) { 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 392 ResultReg).addImm(Imm)); 393 } else { 394 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 395 .addImm(Imm)); 396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 397 TII.get(TargetOpcode::COPY), ResultReg) 398 .addReg(II.ImplicitDefs[0])); 399 } 400 return ResultReg; 401 } 402 403 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 404 // checks from the various callers. 405 unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 406 if (VT == MVT::f64) return 0; 407 408 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 409 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 410 TII.get(ARM::VMOVSR), MoveReg) 411 .addReg(SrcReg)); 412 return MoveReg; 413 } 414 415 unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 416 if (VT == MVT::i64) return 0; 417 418 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 420 TII.get(ARM::VMOVRS), MoveReg) 421 .addReg(SrcReg)); 422 return MoveReg; 423 } 424 425 // For double width floating point we need to materialize two constants 426 // (the high and the low) into integer registers then use a move to get 427 // the combined constant into an FP reg. 428 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 429 const APFloat Val = CFP->getValueAPF(); 430 bool is64bit = VT == MVT::f64; 431 432 // This checks to see if we can use VFP3 instructions to materialize 433 // a constant, otherwise we have to go through the constant pool. 434 if (TLI.isFPImmLegal(Val, VT)) { 435 int Imm; 436 unsigned Opc; 437 if (is64bit) { 438 Imm = ARM_AM::getFP64Imm(Val); 439 Opc = ARM::FCONSTD; 440 } else { 441 Imm = ARM_AM::getFP32Imm(Val); 442 Opc = ARM::FCONSTS; 443 } 444 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 445 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 446 TII.get(Opc), DestReg).addImm(Imm)); 447 return DestReg; 448 } 449 450 // Require VFP2 for loading fp constants. 451 if (!Subtarget->hasVFP2()) return false; 452 453 // MachineConstantPool wants an explicit alignment. 454 unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); 455 if (Align == 0) { 456 // TODO: Figure out if this is correct. 457 Align = DL.getTypeAllocSize(CFP->getType()); 458 } 459 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 460 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 461 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 462 463 // The extra reg is for addrmode5. 464 AddOptionalDefs( 465 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 466 .addConstantPoolIndex(Idx) 467 .addReg(0)); 468 return DestReg; 469 } 470 471 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 472 473 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 474 return 0; 475 476 // If we can do this in a single instruction without a constant pool entry 477 // do so now. 478 const ConstantInt *CI = cast<ConstantInt>(C); 479 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 480 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 481 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 482 &ARM::GPRRegClass; 483 unsigned ImmReg = createResultReg(RC); 484 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 485 TII.get(Opc), ImmReg) 486 .addImm(CI->getZExtValue())); 487 return ImmReg; 488 } 489 490 // Use MVN to emit negative constants. 491 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 492 unsigned Imm = (unsigned)~(CI->getSExtValue()); 493 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 494 (ARM_AM::getSOImmVal(Imm) != -1); 495 if (UseImm) { 496 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 497 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 498 &ARM::GPRRegClass; 499 unsigned ImmReg = createResultReg(RC); 500 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 501 TII.get(Opc), ImmReg) 502 .addImm(Imm)); 503 return ImmReg; 504 } 505 } 506 507 unsigned ResultReg = 0; 508 if (Subtarget->useMovt(*FuncInfo.MF)) 509 ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 510 511 if (ResultReg) 512 return ResultReg; 513 514 // Load from constant pool. For now 32-bit only. 515 if (VT != MVT::i32) 516 return 0; 517 518 // MachineConstantPool wants an explicit alignment. 519 unsigned Align = DL.getPrefTypeAlignment(C->getType()); 520 if (Align == 0) { 521 // TODO: Figure out if this is correct. 522 Align = DL.getTypeAllocSize(C->getType()); 523 } 524 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 525 ResultReg = createResultReg(TLI.getRegClassFor(VT)); 526 if (isThumb2) 527 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 528 TII.get(ARM::t2LDRpci), ResultReg) 529 .addConstantPoolIndex(Idx)); 530 else { 531 // The extra immediate is for addrmode2. 532 ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0); 533 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 534 TII.get(ARM::LDRcp), ResultReg) 535 .addConstantPoolIndex(Idx) 536 .addImm(0)); 537 } 538 return ResultReg; 539 } 540 541 bool ARMFastISel::isPositionIndependent() const { 542 return TLI.isPositionIndependent(); 543 } 544 545 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 546 // For now 32-bit only. 547 if (VT != MVT::i32 || GV->isThreadLocal()) return 0; 548 549 // ROPI/RWPI not currently supported. 550 if (Subtarget->isROPI() || Subtarget->isRWPI()) 551 return 0; 552 553 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV); 554 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass 555 : &ARM::GPRRegClass; 556 unsigned DestReg = createResultReg(RC); 557 558 // FastISel TLS support on non-MachO is broken, punt to SelectionDAG. 559 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 560 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 561 if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0; 562 563 bool IsPositionIndependent = isPositionIndependent(); 564 // Use movw+movt when possible, it avoids constant pool entries. 565 // Non-darwin targets only support static movt relocations in FastISel. 566 if (Subtarget->useMovt(*FuncInfo.MF) && 567 (Subtarget->isTargetMachO() || !IsPositionIndependent)) { 568 unsigned Opc; 569 unsigned char TF = 0; 570 if (Subtarget->isTargetMachO()) 571 TF = ARMII::MO_NONLAZY; 572 573 if (IsPositionIndependent) 574 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 575 else 576 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 577 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 578 TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); 579 } else { 580 // MachineConstantPool wants an explicit alignment. 581 unsigned Align = DL.getPrefTypeAlignment(GV->getType()); 582 if (Align == 0) { 583 // TODO: Figure out if this is correct. 584 Align = DL.getTypeAllocSize(GV->getType()); 585 } 586 587 if (Subtarget->isTargetELF() && IsPositionIndependent) 588 return ARMLowerPICELF(GV, Align, VT); 589 590 // Grab index. 591 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; 592 unsigned Id = AFI->createPICLabelUId(); 593 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 594 ARMCP::CPValue, 595 PCAdj); 596 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 597 598 // Load value. 599 MachineInstrBuilder MIB; 600 if (isThumb2) { 601 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci; 602 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), 603 DestReg).addConstantPoolIndex(Idx); 604 if (IsPositionIndependent) 605 MIB.addImm(Id); 606 AddOptionalDefs(MIB); 607 } else { 608 // The extra immediate is for addrmode2. 609 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 610 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 611 TII.get(ARM::LDRcp), DestReg) 612 .addConstantPoolIndex(Idx) 613 .addImm(0); 614 AddOptionalDefs(MIB); 615 616 if (IsPositionIndependent) { 617 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 618 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 619 620 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 621 DbgLoc, TII.get(Opc), NewDestReg) 622 .addReg(DestReg) 623 .addImm(Id); 624 AddOptionalDefs(MIB); 625 return NewDestReg; 626 } 627 } 628 } 629 630 if (IsIndirect) { 631 MachineInstrBuilder MIB; 632 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 633 if (isThumb2) 634 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 635 TII.get(ARM::t2LDRi12), NewDestReg) 636 .addReg(DestReg) 637 .addImm(0); 638 else 639 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 640 TII.get(ARM::LDRi12), NewDestReg) 641 .addReg(DestReg) 642 .addImm(0); 643 DestReg = NewDestReg; 644 AddOptionalDefs(MIB); 645 } 646 647 return DestReg; 648 } 649 650 unsigned ARMFastISel::fastMaterializeConstant(const Constant *C) { 651 EVT CEVT = TLI.getValueType(DL, C->getType(), true); 652 653 // Only handle simple types. 654 if (!CEVT.isSimple()) return 0; 655 MVT VT = CEVT.getSimpleVT(); 656 657 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 658 return ARMMaterializeFP(CFP, VT); 659 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 660 return ARMMaterializeGV(GV, VT); 661 else if (isa<ConstantInt>(C)) 662 return ARMMaterializeInt(C, VT); 663 664 return 0; 665 } 666 667 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 668 669 unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) { 670 // Don't handle dynamic allocas. 671 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 672 673 MVT VT; 674 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 675 676 DenseMap<const AllocaInst*, int>::iterator SI = 677 FuncInfo.StaticAllocaMap.find(AI); 678 679 // This will get lowered later into the correct offsets and registers 680 // via rewriteXFrameIndex. 681 if (SI != FuncInfo.StaticAllocaMap.end()) { 682 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 683 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 684 unsigned ResultReg = createResultReg(RC); 685 ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0); 686 687 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 688 TII.get(Opc), ResultReg) 689 .addFrameIndex(SI->second) 690 .addImm(0)); 691 return ResultReg; 692 } 693 694 return 0; 695 } 696 697 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 698 EVT evt = TLI.getValueType(DL, Ty, true); 699 700 // Only handle simple types. 701 if (evt == MVT::Other || !evt.isSimple()) return false; 702 VT = evt.getSimpleVT(); 703 704 // Handle all legal types, i.e. a register that will directly hold this 705 // value. 706 return TLI.isTypeLegal(VT); 707 } 708 709 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 710 if (isTypeLegal(Ty, VT)) return true; 711 712 // If this is a type than can be sign or zero-extended to a basic operation 713 // go ahead and accept it now. 714 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 715 return true; 716 717 return false; 718 } 719 720 // Computes the address to get to an object. 721 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 722 // Some boilerplate from the X86 FastISel. 723 const User *U = nullptr; 724 unsigned Opcode = Instruction::UserOp1; 725 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 726 // Don't walk into other basic blocks unless the object is an alloca from 727 // another block, otherwise it may not have a virtual register assigned. 728 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 729 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 730 Opcode = I->getOpcode(); 731 U = I; 732 } 733 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 734 Opcode = C->getOpcode(); 735 U = C; 736 } 737 738 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 739 if (Ty->getAddressSpace() > 255) 740 // Fast instruction selection doesn't support the special 741 // address spaces. 742 return false; 743 744 switch (Opcode) { 745 default: 746 break; 747 case Instruction::BitCast: 748 // Look through bitcasts. 749 return ARMComputeAddress(U->getOperand(0), Addr); 750 case Instruction::IntToPtr: 751 // Look past no-op inttoptrs. 752 if (TLI.getValueType(DL, U->getOperand(0)->getType()) == 753 TLI.getPointerTy(DL)) 754 return ARMComputeAddress(U->getOperand(0), Addr); 755 break; 756 case Instruction::PtrToInt: 757 // Look past no-op ptrtoints. 758 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)) 759 return ARMComputeAddress(U->getOperand(0), Addr); 760 break; 761 case Instruction::GetElementPtr: { 762 Address SavedAddr = Addr; 763 int TmpOffset = Addr.Offset; 764 765 // Iterate through the GEP folding the constants into offsets where 766 // we can. 767 gep_type_iterator GTI = gep_type_begin(U); 768 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 769 i != e; ++i, ++GTI) { 770 const Value *Op = *i; 771 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 772 const StructLayout *SL = DL.getStructLayout(STy); 773 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 774 TmpOffset += SL->getElementOffset(Idx); 775 } else { 776 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); 777 for (;;) { 778 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 779 // Constant-offset addressing. 780 TmpOffset += CI->getSExtValue() * S; 781 break; 782 } 783 if (canFoldAddIntoGEP(U, Op)) { 784 // A compatible add with a constant operand. Fold the constant. 785 ConstantInt *CI = 786 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 787 TmpOffset += CI->getSExtValue() * S; 788 // Iterate on the other operand. 789 Op = cast<AddOperator>(Op)->getOperand(0); 790 continue; 791 } 792 // Unsupported 793 goto unsupported_gep; 794 } 795 } 796 } 797 798 // Try to grab the base operand now. 799 Addr.Offset = TmpOffset; 800 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 801 802 // We failed, restore everything and try the other options. 803 Addr = SavedAddr; 804 805 unsupported_gep: 806 break; 807 } 808 case Instruction::Alloca: { 809 const AllocaInst *AI = cast<AllocaInst>(Obj); 810 DenseMap<const AllocaInst*, int>::iterator SI = 811 FuncInfo.StaticAllocaMap.find(AI); 812 if (SI != FuncInfo.StaticAllocaMap.end()) { 813 Addr.BaseType = Address::FrameIndexBase; 814 Addr.Base.FI = SI->second; 815 return true; 816 } 817 break; 818 } 819 } 820 821 // Try to get this in a register if nothing else has worked. 822 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 823 return Addr.Base.Reg != 0; 824 } 825 826 void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 827 bool needsLowering = false; 828 switch (VT.SimpleTy) { 829 default: llvm_unreachable("Unhandled load/store type!"); 830 case MVT::i1: 831 case MVT::i8: 832 case MVT::i16: 833 case MVT::i32: 834 if (!useAM3) { 835 // Integer loads/stores handle 12-bit offsets. 836 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 837 // Handle negative offsets. 838 if (needsLowering && isThumb2) 839 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 840 Addr.Offset > -256); 841 } else { 842 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 843 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 844 } 845 break; 846 case MVT::f32: 847 case MVT::f64: 848 // Floating point operands handle 8-bit offsets. 849 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 850 break; 851 } 852 853 // If this is a stack pointer and the offset needs to be simplified then 854 // put the alloca address into a register, set the base type back to 855 // register and continue. This should almost never happen. 856 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 857 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass 858 : &ARM::GPRRegClass; 859 unsigned ResultReg = createResultReg(RC); 860 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 861 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 862 TII.get(Opc), ResultReg) 863 .addFrameIndex(Addr.Base.FI) 864 .addImm(0)); 865 Addr.Base.Reg = ResultReg; 866 Addr.BaseType = Address::RegBase; 867 } 868 869 // Since the offset is too large for the load/store instruction 870 // get the reg+offset into a register. 871 if (needsLowering) { 872 Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 873 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 874 Addr.Offset = 0; 875 } 876 } 877 878 void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 879 const MachineInstrBuilder &MIB, 880 MachineMemOperand::Flags Flags, 881 bool useAM3) { 882 // addrmode5 output depends on the selection dag addressing dividing the 883 // offset by 4 that it then later multiplies. Do this here as well. 884 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 885 Addr.Offset /= 4; 886 887 // Frame base works a bit differently. Handle it separately. 888 if (Addr.BaseType == Address::FrameIndexBase) { 889 int FI = Addr.Base.FI; 890 int Offset = Addr.Offset; 891 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( 892 MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags, 893 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 894 // Now add the rest of the operands. 895 MIB.addFrameIndex(FI); 896 897 // ARM halfword load/stores and signed byte loads need an additional 898 // operand. 899 if (useAM3) { 900 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 901 MIB.addReg(0); 902 MIB.addImm(Imm); 903 } else { 904 MIB.addImm(Addr.Offset); 905 } 906 MIB.addMemOperand(MMO); 907 } else { 908 // Now add the rest of the operands. 909 MIB.addReg(Addr.Base.Reg); 910 911 // ARM halfword load/stores and signed byte loads need an additional 912 // operand. 913 if (useAM3) { 914 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 915 MIB.addReg(0); 916 MIB.addImm(Imm); 917 } else { 918 MIB.addImm(Addr.Offset); 919 } 920 } 921 AddOptionalDefs(MIB); 922 } 923 924 bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 925 unsigned Alignment, bool isZExt, bool allocReg) { 926 unsigned Opc; 927 bool useAM3 = false; 928 bool needVMOV = false; 929 const TargetRegisterClass *RC; 930 switch (VT.SimpleTy) { 931 // This is mostly going to be Neon/vector support. 932 default: return false; 933 case MVT::i1: 934 case MVT::i8: 935 if (isThumb2) { 936 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 937 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 938 else 939 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 940 } else { 941 if (isZExt) { 942 Opc = ARM::LDRBi12; 943 } else { 944 Opc = ARM::LDRSB; 945 useAM3 = true; 946 } 947 } 948 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 949 break; 950 case MVT::i16: 951 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 952 return false; 953 954 if (isThumb2) { 955 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 956 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 957 else 958 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 959 } else { 960 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 961 useAM3 = true; 962 } 963 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 964 break; 965 case MVT::i32: 966 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 967 return false; 968 969 if (isThumb2) { 970 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 971 Opc = ARM::t2LDRi8; 972 else 973 Opc = ARM::t2LDRi12; 974 } else { 975 Opc = ARM::LDRi12; 976 } 977 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 978 break; 979 case MVT::f32: 980 if (!Subtarget->hasVFP2()) return false; 981 // Unaligned loads need special handling. Floats require word-alignment. 982 if (Alignment && Alignment < 4) { 983 needVMOV = true; 984 VT = MVT::i32; 985 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 986 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 987 } else { 988 Opc = ARM::VLDRS; 989 RC = TLI.getRegClassFor(VT); 990 } 991 break; 992 case MVT::f64: 993 if (!Subtarget->hasVFP2()) return false; 994 // FIXME: Unaligned loads need special handling. Doublewords require 995 // word-alignment. 996 if (Alignment && Alignment < 4) 997 return false; 998 999 Opc = ARM::VLDRD; 1000 RC = TLI.getRegClassFor(VT); 1001 break; 1002 } 1003 // Simplify this down to something we can handle. 1004 ARMSimplifyAddress(Addr, VT, useAM3); 1005 1006 // Create the base instruction, then add the operands. 1007 if (allocReg) 1008 ResultReg = createResultReg(RC); 1009 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1010 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1011 TII.get(Opc), ResultReg); 1012 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1013 1014 // If we had an unaligned load of a float we've converted it to an regular 1015 // load. Now we must move from the GRP to the FP register. 1016 if (needVMOV) { 1017 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1018 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1019 TII.get(ARM::VMOVSR), MoveReg) 1020 .addReg(ResultReg)); 1021 ResultReg = MoveReg; 1022 } 1023 return true; 1024 } 1025 1026 bool ARMFastISel::SelectLoad(const Instruction *I) { 1027 // Atomic loads need special handling. 1028 if (cast<LoadInst>(I)->isAtomic()) 1029 return false; 1030 1031 const Value *SV = I->getOperand(0); 1032 if (TLI.supportSwiftError()) { 1033 // Swifterror values can come from either a function parameter with 1034 // swifterror attribute or an alloca with swifterror attribute. 1035 if (const Argument *Arg = dyn_cast<Argument>(SV)) { 1036 if (Arg->hasSwiftErrorAttr()) 1037 return false; 1038 } 1039 1040 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) { 1041 if (Alloca->isSwiftError()) 1042 return false; 1043 } 1044 } 1045 1046 // Verify we have a legal type before going any further. 1047 MVT VT; 1048 if (!isLoadTypeLegal(I->getType(), VT)) 1049 return false; 1050 1051 // See if we can handle this address. 1052 Address Addr; 1053 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1054 1055 unsigned ResultReg; 1056 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1057 return false; 1058 updateValueMap(I, ResultReg); 1059 return true; 1060 } 1061 1062 bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1063 unsigned Alignment) { 1064 unsigned StrOpc; 1065 bool useAM3 = false; 1066 switch (VT.SimpleTy) { 1067 // This is mostly going to be Neon/vector support. 1068 default: return false; 1069 case MVT::i1: { 1070 unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass 1071 : &ARM::GPRRegClass); 1072 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1073 SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); 1074 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1075 TII.get(Opc), Res) 1076 .addReg(SrcReg).addImm(1)); 1077 SrcReg = Res; 1078 } // Fallthrough here. 1079 case MVT::i8: 1080 if (isThumb2) { 1081 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1082 StrOpc = ARM::t2STRBi8; 1083 else 1084 StrOpc = ARM::t2STRBi12; 1085 } else { 1086 StrOpc = ARM::STRBi12; 1087 } 1088 break; 1089 case MVT::i16: 1090 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1091 return false; 1092 1093 if (isThumb2) { 1094 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1095 StrOpc = ARM::t2STRHi8; 1096 else 1097 StrOpc = ARM::t2STRHi12; 1098 } else { 1099 StrOpc = ARM::STRH; 1100 useAM3 = true; 1101 } 1102 break; 1103 case MVT::i32: 1104 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1105 return false; 1106 1107 if (isThumb2) { 1108 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1109 StrOpc = ARM::t2STRi8; 1110 else 1111 StrOpc = ARM::t2STRi12; 1112 } else { 1113 StrOpc = ARM::STRi12; 1114 } 1115 break; 1116 case MVT::f32: 1117 if (!Subtarget->hasVFP2()) return false; 1118 // Unaligned stores need special handling. Floats require word-alignment. 1119 if (Alignment && Alignment < 4) { 1120 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1121 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1122 TII.get(ARM::VMOVRS), MoveReg) 1123 .addReg(SrcReg)); 1124 SrcReg = MoveReg; 1125 VT = MVT::i32; 1126 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1127 } else { 1128 StrOpc = ARM::VSTRS; 1129 } 1130 break; 1131 case MVT::f64: 1132 if (!Subtarget->hasVFP2()) return false; 1133 // FIXME: Unaligned stores need special handling. Doublewords require 1134 // word-alignment. 1135 if (Alignment && Alignment < 4) 1136 return false; 1137 1138 StrOpc = ARM::VSTRD; 1139 break; 1140 } 1141 // Simplify this down to something we can handle. 1142 ARMSimplifyAddress(Addr, VT, useAM3); 1143 1144 // Create the base instruction, then add the operands. 1145 SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); 1146 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1147 TII.get(StrOpc)) 1148 .addReg(SrcReg); 1149 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1150 return true; 1151 } 1152 1153 bool ARMFastISel::SelectStore(const Instruction *I) { 1154 Value *Op0 = I->getOperand(0); 1155 unsigned SrcReg = 0; 1156 1157 // Atomic stores need special handling. 1158 if (cast<StoreInst>(I)->isAtomic()) 1159 return false; 1160 1161 const Value *PtrV = I->getOperand(1); 1162 if (TLI.supportSwiftError()) { 1163 // Swifterror values can come from either a function parameter with 1164 // swifterror attribute or an alloca with swifterror attribute. 1165 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) { 1166 if (Arg->hasSwiftErrorAttr()) 1167 return false; 1168 } 1169 1170 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) { 1171 if (Alloca->isSwiftError()) 1172 return false; 1173 } 1174 } 1175 1176 // Verify we have a legal type before going any further. 1177 MVT VT; 1178 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1179 return false; 1180 1181 // Get the value to be stored into a register. 1182 SrcReg = getRegForValue(Op0); 1183 if (SrcReg == 0) return false; 1184 1185 // See if we can handle this address. 1186 Address Addr; 1187 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1188 return false; 1189 1190 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1191 return false; 1192 return true; 1193 } 1194 1195 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1196 switch (Pred) { 1197 // Needs two compares... 1198 case CmpInst::FCMP_ONE: 1199 case CmpInst::FCMP_UEQ: 1200 default: 1201 // AL is our "false" for now. The other two need more compares. 1202 return ARMCC::AL; 1203 case CmpInst::ICMP_EQ: 1204 case CmpInst::FCMP_OEQ: 1205 return ARMCC::EQ; 1206 case CmpInst::ICMP_SGT: 1207 case CmpInst::FCMP_OGT: 1208 return ARMCC::GT; 1209 case CmpInst::ICMP_SGE: 1210 case CmpInst::FCMP_OGE: 1211 return ARMCC::GE; 1212 case CmpInst::ICMP_UGT: 1213 case CmpInst::FCMP_UGT: 1214 return ARMCC::HI; 1215 case CmpInst::FCMP_OLT: 1216 return ARMCC::MI; 1217 case CmpInst::ICMP_ULE: 1218 case CmpInst::FCMP_OLE: 1219 return ARMCC::LS; 1220 case CmpInst::FCMP_ORD: 1221 return ARMCC::VC; 1222 case CmpInst::FCMP_UNO: 1223 return ARMCC::VS; 1224 case CmpInst::FCMP_UGE: 1225 return ARMCC::PL; 1226 case CmpInst::ICMP_SLT: 1227 case CmpInst::FCMP_ULT: 1228 return ARMCC::LT; 1229 case CmpInst::ICMP_SLE: 1230 case CmpInst::FCMP_ULE: 1231 return ARMCC::LE; 1232 case CmpInst::FCMP_UNE: 1233 case CmpInst::ICMP_NE: 1234 return ARMCC::NE; 1235 case CmpInst::ICMP_UGE: 1236 return ARMCC::HS; 1237 case CmpInst::ICMP_ULT: 1238 return ARMCC::LO; 1239 } 1240 } 1241 1242 bool ARMFastISel::SelectBranch(const Instruction *I) { 1243 const BranchInst *BI = cast<BranchInst>(I); 1244 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1245 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1246 1247 // Simple branch support. 1248 1249 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1250 // behavior. 1251 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1252 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1253 1254 // Get the compare predicate. 1255 // Try to take advantage of fallthrough opportunities. 1256 CmpInst::Predicate Predicate = CI->getPredicate(); 1257 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1258 std::swap(TBB, FBB); 1259 Predicate = CmpInst::getInversePredicate(Predicate); 1260 } 1261 1262 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1263 1264 // We may not handle every CC for now. 1265 if (ARMPred == ARMCC::AL) return false; 1266 1267 // Emit the compare. 1268 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1269 return false; 1270 1271 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1272 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1273 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1274 finishCondBranch(BI->getParent(), TBB, FBB); 1275 return true; 1276 } 1277 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1278 MVT SourceVT; 1279 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1280 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1281 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1282 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1283 OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); 1284 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1285 TII.get(TstOpc)) 1286 .addReg(OpReg).addImm(1)); 1287 1288 unsigned CCMode = ARMCC::NE; 1289 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1290 std::swap(TBB, FBB); 1291 CCMode = ARMCC::EQ; 1292 } 1293 1294 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1295 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1296 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1297 1298 finishCondBranch(BI->getParent(), TBB, FBB); 1299 return true; 1300 } 1301 } else if (const ConstantInt *CI = 1302 dyn_cast<ConstantInt>(BI->getCondition())) { 1303 uint64_t Imm = CI->getZExtValue(); 1304 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1305 fastEmitBranch(Target, DbgLoc); 1306 return true; 1307 } 1308 1309 unsigned CmpReg = getRegForValue(BI->getCondition()); 1310 if (CmpReg == 0) return false; 1311 1312 // We've been divorced from our compare! Our block was split, and 1313 // now our compare lives in a predecessor block. We musn't 1314 // re-compare here, as the children of the compare aren't guaranteed 1315 // live across the block boundary (we *could* check for this). 1316 // Regardless, the compare has been done in the predecessor block, 1317 // and it left a value for us in a virtual register. Ergo, we test 1318 // the one-bit value left in the virtual register. 1319 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1320 CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); 1321 AddOptionalDefs( 1322 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1323 .addReg(CmpReg) 1324 .addImm(1)); 1325 1326 unsigned CCMode = ARMCC::NE; 1327 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1328 std::swap(TBB, FBB); 1329 CCMode = ARMCC::EQ; 1330 } 1331 1332 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1333 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1334 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1335 finishCondBranch(BI->getParent(), TBB, FBB); 1336 return true; 1337 } 1338 1339 bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1340 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1341 if (AddrReg == 0) return false; 1342 1343 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1344 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1345 TII.get(Opc)).addReg(AddrReg)); 1346 1347 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1348 for (const BasicBlock *SuccBB : IB->successors()) 1349 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]); 1350 1351 return true; 1352 } 1353 1354 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1355 bool isZExt) { 1356 Type *Ty = Src1Value->getType(); 1357 EVT SrcEVT = TLI.getValueType(DL, Ty, true); 1358 if (!SrcEVT.isSimple()) return false; 1359 MVT SrcVT = SrcEVT.getSimpleVT(); 1360 1361 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1362 if (isFloat && !Subtarget->hasVFP2()) 1363 return false; 1364 1365 // Check to see if the 2nd operand is a constant that we can encode directly 1366 // in the compare. 1367 int Imm = 0; 1368 bool UseImm = false; 1369 bool isNegativeImm = false; 1370 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1371 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1372 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1373 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1374 SrcVT == MVT::i1) { 1375 const APInt &CIVal = ConstInt->getValue(); 1376 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1377 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1378 // then a cmn, because there is no way to represent 2147483648 as a 1379 // signed 32-bit int. 1380 if (Imm < 0 && Imm != (int)0x80000000) { 1381 isNegativeImm = true; 1382 Imm = -Imm; 1383 } 1384 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1385 (ARM_AM::getSOImmVal(Imm) != -1); 1386 } 1387 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1388 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1389 if (ConstFP->isZero() && !ConstFP->isNegative()) 1390 UseImm = true; 1391 } 1392 1393 unsigned CmpOpc; 1394 bool isICmp = true; 1395 bool needsExt = false; 1396 switch (SrcVT.SimpleTy) { 1397 default: return false; 1398 // TODO: Verify compares. 1399 case MVT::f32: 1400 isICmp = false; 1401 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1402 break; 1403 case MVT::f64: 1404 isICmp = false; 1405 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1406 break; 1407 case MVT::i1: 1408 case MVT::i8: 1409 case MVT::i16: 1410 needsExt = true; 1411 // Intentional fall-through. 1412 case MVT::i32: 1413 if (isThumb2) { 1414 if (!UseImm) 1415 CmpOpc = ARM::t2CMPrr; 1416 else 1417 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1418 } else { 1419 if (!UseImm) 1420 CmpOpc = ARM::CMPrr; 1421 else 1422 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1423 } 1424 break; 1425 } 1426 1427 unsigned SrcReg1 = getRegForValue(Src1Value); 1428 if (SrcReg1 == 0) return false; 1429 1430 unsigned SrcReg2 = 0; 1431 if (!UseImm) { 1432 SrcReg2 = getRegForValue(Src2Value); 1433 if (SrcReg2 == 0) return false; 1434 } 1435 1436 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1437 if (needsExt) { 1438 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1439 if (SrcReg1 == 0) return false; 1440 if (!UseImm) { 1441 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1442 if (SrcReg2 == 0) return false; 1443 } 1444 } 1445 1446 const MCInstrDesc &II = TII.get(CmpOpc); 1447 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); 1448 if (!UseImm) { 1449 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); 1450 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1451 .addReg(SrcReg1).addReg(SrcReg2)); 1452 } else { 1453 MachineInstrBuilder MIB; 1454 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1455 .addReg(SrcReg1); 1456 1457 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1458 if (isICmp) 1459 MIB.addImm(Imm); 1460 AddOptionalDefs(MIB); 1461 } 1462 1463 // For floating point we need to move the result to a comparison register 1464 // that we can then use for branches. 1465 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1466 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1467 TII.get(ARM::FMSTAT))); 1468 return true; 1469 } 1470 1471 bool ARMFastISel::SelectCmp(const Instruction *I) { 1472 const CmpInst *CI = cast<CmpInst>(I); 1473 1474 // Get the compare predicate. 1475 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1476 1477 // We may not handle every CC for now. 1478 if (ARMPred == ARMCC::AL) return false; 1479 1480 // Emit the compare. 1481 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1482 return false; 1483 1484 // Now set a register based on the comparison. Explicitly set the predicates 1485 // here. 1486 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1487 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass 1488 : &ARM::GPRRegClass; 1489 unsigned DestReg = createResultReg(RC); 1490 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1491 unsigned ZeroReg = fastMaterializeConstant(Zero); 1492 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1493 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg) 1494 .addReg(ZeroReg).addImm(1) 1495 .addImm(ARMPred).addReg(ARM::CPSR); 1496 1497 updateValueMap(I, DestReg); 1498 return true; 1499 } 1500 1501 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1502 // Make sure we have VFP and that we're extending float to double. 1503 if (!Subtarget->hasVFP2()) return false; 1504 1505 Value *V = I->getOperand(0); 1506 if (!I->getType()->isDoubleTy() || 1507 !V->getType()->isFloatTy()) return false; 1508 1509 unsigned Op = getRegForValue(V); 1510 if (Op == 0) return false; 1511 1512 unsigned Result = createResultReg(&ARM::DPRRegClass); 1513 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1514 TII.get(ARM::VCVTDS), Result) 1515 .addReg(Op)); 1516 updateValueMap(I, Result); 1517 return true; 1518 } 1519 1520 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1521 // Make sure we have VFP and that we're truncating double to float. 1522 if (!Subtarget->hasVFP2()) return false; 1523 1524 Value *V = I->getOperand(0); 1525 if (!(I->getType()->isFloatTy() && 1526 V->getType()->isDoubleTy())) return false; 1527 1528 unsigned Op = getRegForValue(V); 1529 if (Op == 0) return false; 1530 1531 unsigned Result = createResultReg(&ARM::SPRRegClass); 1532 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1533 TII.get(ARM::VCVTSD), Result) 1534 .addReg(Op)); 1535 updateValueMap(I, Result); 1536 return true; 1537 } 1538 1539 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1540 // Make sure we have VFP. 1541 if (!Subtarget->hasVFP2()) return false; 1542 1543 MVT DstVT; 1544 Type *Ty = I->getType(); 1545 if (!isTypeLegal(Ty, DstVT)) 1546 return false; 1547 1548 Value *Src = I->getOperand(0); 1549 EVT SrcEVT = TLI.getValueType(DL, Src->getType(), true); 1550 if (!SrcEVT.isSimple()) 1551 return false; 1552 MVT SrcVT = SrcEVT.getSimpleVT(); 1553 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1554 return false; 1555 1556 unsigned SrcReg = getRegForValue(Src); 1557 if (SrcReg == 0) return false; 1558 1559 // Handle sign-extension. 1560 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1561 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1562 /*isZExt*/!isSigned); 1563 if (SrcReg == 0) return false; 1564 } 1565 1566 // The conversion routine works on fp-reg to fp-reg and the operand above 1567 // was an integer, move it to the fp registers if possible. 1568 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1569 if (FP == 0) return false; 1570 1571 unsigned Opc; 1572 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1573 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1574 else return false; 1575 1576 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1577 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1578 TII.get(Opc), ResultReg).addReg(FP)); 1579 updateValueMap(I, ResultReg); 1580 return true; 1581 } 1582 1583 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1584 // Make sure we have VFP. 1585 if (!Subtarget->hasVFP2()) return false; 1586 1587 MVT DstVT; 1588 Type *RetTy = I->getType(); 1589 if (!isTypeLegal(RetTy, DstVT)) 1590 return false; 1591 1592 unsigned Op = getRegForValue(I->getOperand(0)); 1593 if (Op == 0) return false; 1594 1595 unsigned Opc; 1596 Type *OpTy = I->getOperand(0)->getType(); 1597 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1598 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1599 else return false; 1600 1601 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1602 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1603 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1604 TII.get(Opc), ResultReg).addReg(Op)); 1605 1606 // This result needs to be in an integer register, but the conversion only 1607 // takes place in fp-regs. 1608 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1609 if (IntReg == 0) return false; 1610 1611 updateValueMap(I, IntReg); 1612 return true; 1613 } 1614 1615 bool ARMFastISel::SelectSelect(const Instruction *I) { 1616 MVT VT; 1617 if (!isTypeLegal(I->getType(), VT)) 1618 return false; 1619 1620 // Things need to be register sized for register moves. 1621 if (VT != MVT::i32) return false; 1622 1623 unsigned CondReg = getRegForValue(I->getOperand(0)); 1624 if (CondReg == 0) return false; 1625 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1626 if (Op1Reg == 0) return false; 1627 1628 // Check to see if we can use an immediate in the conditional move. 1629 int Imm = 0; 1630 bool UseImm = false; 1631 bool isNegativeImm = false; 1632 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1633 assert (VT == MVT::i32 && "Expecting an i32."); 1634 Imm = (int)ConstInt->getValue().getZExtValue(); 1635 if (Imm < 0) { 1636 isNegativeImm = true; 1637 Imm = ~Imm; 1638 } 1639 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1640 (ARM_AM::getSOImmVal(Imm) != -1); 1641 } 1642 1643 unsigned Op2Reg = 0; 1644 if (!UseImm) { 1645 Op2Reg = getRegForValue(I->getOperand(2)); 1646 if (Op2Reg == 0) return false; 1647 } 1648 1649 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1650 CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0); 1651 AddOptionalDefs( 1652 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1653 .addReg(CondReg) 1654 .addImm(1)); 1655 1656 unsigned MovCCOpc; 1657 const TargetRegisterClass *RC; 1658 if (!UseImm) { 1659 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1660 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1661 } else { 1662 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1663 if (!isNegativeImm) 1664 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1665 else 1666 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1667 } 1668 unsigned ResultReg = createResultReg(RC); 1669 if (!UseImm) { 1670 Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); 1671 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); 1672 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1673 ResultReg) 1674 .addReg(Op2Reg) 1675 .addReg(Op1Reg) 1676 .addImm(ARMCC::NE) 1677 .addReg(ARM::CPSR); 1678 } else { 1679 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); 1680 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1681 ResultReg) 1682 .addReg(Op1Reg) 1683 .addImm(Imm) 1684 .addImm(ARMCC::EQ) 1685 .addReg(ARM::CPSR); 1686 } 1687 updateValueMap(I, ResultReg); 1688 return true; 1689 } 1690 1691 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1692 MVT VT; 1693 Type *Ty = I->getType(); 1694 if (!isTypeLegal(Ty, VT)) 1695 return false; 1696 1697 // If we have integer div support we should have selected this automagically. 1698 // In case we have a real miss go ahead and return false and we'll pick 1699 // it up later. 1700 if (Subtarget->hasDivide()) return false; 1701 1702 // Otherwise emit a libcall. 1703 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1704 if (VT == MVT::i8) 1705 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1706 else if (VT == MVT::i16) 1707 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1708 else if (VT == MVT::i32) 1709 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1710 else if (VT == MVT::i64) 1711 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1712 else if (VT == MVT::i128) 1713 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1714 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1715 1716 return ARMEmitLibcall(I, LC); 1717 } 1718 1719 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1720 MVT VT; 1721 Type *Ty = I->getType(); 1722 if (!isTypeLegal(Ty, VT)) 1723 return false; 1724 1725 // Many ABIs do not provide a libcall for standalone remainder, so we need to 1726 // use divrem (see the RTABI 4.3.1). Since FastISel can't handle non-double 1727 // multi-reg returns, we'll have to bail out. 1728 if (!TLI.hasStandaloneRem(VT)) { 1729 return false; 1730 } 1731 1732 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1733 if (VT == MVT::i8) 1734 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1735 else if (VT == MVT::i16) 1736 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1737 else if (VT == MVT::i32) 1738 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1739 else if (VT == MVT::i64) 1740 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1741 else if (VT == MVT::i128) 1742 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1743 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1744 1745 return ARMEmitLibcall(I, LC); 1746 } 1747 1748 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1749 EVT DestVT = TLI.getValueType(DL, I->getType(), true); 1750 1751 // We can get here in the case when we have a binary operation on a non-legal 1752 // type and the target independent selector doesn't know how to handle it. 1753 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1754 return false; 1755 1756 unsigned Opc; 1757 switch (ISDOpcode) { 1758 default: return false; 1759 case ISD::ADD: 1760 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1761 break; 1762 case ISD::OR: 1763 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1764 break; 1765 case ISD::SUB: 1766 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1767 break; 1768 } 1769 1770 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1771 if (SrcReg1 == 0) return false; 1772 1773 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1774 // in the instruction, rather then materializing the value in a register. 1775 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1776 if (SrcReg2 == 0) return false; 1777 1778 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 1779 SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); 1780 SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); 1781 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1782 TII.get(Opc), ResultReg) 1783 .addReg(SrcReg1).addReg(SrcReg2)); 1784 updateValueMap(I, ResultReg); 1785 return true; 1786 } 1787 1788 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1789 EVT FPVT = TLI.getValueType(DL, I->getType(), true); 1790 if (!FPVT.isSimple()) return false; 1791 MVT VT = FPVT.getSimpleVT(); 1792 1793 // FIXME: Support vector types where possible. 1794 if (VT.isVector()) 1795 return false; 1796 1797 // We can get here in the case when we want to use NEON for our fp 1798 // operations, but can't figure out how to. Just use the vfp instructions 1799 // if we have them. 1800 // FIXME: It'd be nice to use NEON instructions. 1801 Type *Ty = I->getType(); 1802 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1803 if (isFloat && !Subtarget->hasVFP2()) 1804 return false; 1805 1806 unsigned Opc; 1807 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1808 switch (ISDOpcode) { 1809 default: return false; 1810 case ISD::FADD: 1811 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1812 break; 1813 case ISD::FSUB: 1814 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1815 break; 1816 case ISD::FMUL: 1817 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1818 break; 1819 } 1820 unsigned Op1 = getRegForValue(I->getOperand(0)); 1821 if (Op1 == 0) return false; 1822 1823 unsigned Op2 = getRegForValue(I->getOperand(1)); 1824 if (Op2 == 0) return false; 1825 1826 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1827 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1828 TII.get(Opc), ResultReg) 1829 .addReg(Op1).addReg(Op2)); 1830 updateValueMap(I, ResultReg); 1831 return true; 1832 } 1833 1834 // Call Handling Code 1835 1836 // This is largely taken directly from CCAssignFnForNode 1837 // TODO: We may not support all of this. 1838 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1839 bool Return, 1840 bool isVarArg) { 1841 switch (CC) { 1842 default: 1843 llvm_unreachable("Unsupported calling convention"); 1844 case CallingConv::Fast: 1845 if (Subtarget->hasVFP2() && !isVarArg) { 1846 if (!Subtarget->isAAPCS_ABI()) 1847 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1848 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1849 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1850 } 1851 // Fallthrough 1852 case CallingConv::C: 1853 case CallingConv::CXX_FAST_TLS: 1854 // Use target triple & subtarget features to do actual dispatch. 1855 if (Subtarget->isAAPCS_ABI()) { 1856 if (Subtarget->hasVFP2() && 1857 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1858 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1859 else 1860 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1861 } else { 1862 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1863 } 1864 case CallingConv::ARM_AAPCS_VFP: 1865 case CallingConv::Swift: 1866 if (!isVarArg) 1867 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1868 // Fall through to soft float variant, variadic functions don't 1869 // use hard floating point ABI. 1870 case CallingConv::ARM_AAPCS: 1871 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1872 case CallingConv::ARM_APCS: 1873 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1874 case CallingConv::GHC: 1875 if (Return) 1876 llvm_unreachable("Can't return in GHC call convention"); 1877 else 1878 return CC_ARM_APCS_GHC; 1879 } 1880 } 1881 1882 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1883 SmallVectorImpl<unsigned> &ArgRegs, 1884 SmallVectorImpl<MVT> &ArgVTs, 1885 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1886 SmallVectorImpl<unsigned> &RegArgs, 1887 CallingConv::ID CC, 1888 unsigned &NumBytes, 1889 bool isVarArg) { 1890 SmallVector<CCValAssign, 16> ArgLocs; 1891 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context); 1892 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1893 CCAssignFnForCall(CC, false, isVarArg)); 1894 1895 // Check that we can handle all of the arguments. If we can't, then bail out 1896 // now before we add code to the MBB. 1897 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1898 CCValAssign &VA = ArgLocs[i]; 1899 MVT ArgVT = ArgVTs[VA.getValNo()]; 1900 1901 // We don't handle NEON/vector parameters yet. 1902 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1903 return false; 1904 1905 // Now copy/store arg to correct locations. 1906 if (VA.isRegLoc() && !VA.needsCustom()) { 1907 continue; 1908 } else if (VA.needsCustom()) { 1909 // TODO: We need custom lowering for vector (v2f64) args. 1910 if (VA.getLocVT() != MVT::f64 || 1911 // TODO: Only handle register args for now. 1912 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1913 return false; 1914 } else { 1915 switch (ArgVT.SimpleTy) { 1916 default: 1917 return false; 1918 case MVT::i1: 1919 case MVT::i8: 1920 case MVT::i16: 1921 case MVT::i32: 1922 break; 1923 case MVT::f32: 1924 if (!Subtarget->hasVFP2()) 1925 return false; 1926 break; 1927 case MVT::f64: 1928 if (!Subtarget->hasVFP2()) 1929 return false; 1930 break; 1931 } 1932 } 1933 } 1934 1935 // At the point, we are able to handle the call's arguments in fast isel. 1936 1937 // Get a count of how many bytes are to be pushed on the stack. 1938 NumBytes = CCInfo.getNextStackOffset(); 1939 1940 // Issue CALLSEQ_START 1941 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1942 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1943 TII.get(AdjStackDown)) 1944 .addImm(NumBytes)); 1945 1946 // Process the args. 1947 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1948 CCValAssign &VA = ArgLocs[i]; 1949 const Value *ArgVal = Args[VA.getValNo()]; 1950 unsigned Arg = ArgRegs[VA.getValNo()]; 1951 MVT ArgVT = ArgVTs[VA.getValNo()]; 1952 1953 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1954 "We don't handle NEON/vector parameters yet."); 1955 1956 // Handle arg promotion, etc. 1957 switch (VA.getLocInfo()) { 1958 case CCValAssign::Full: break; 1959 case CCValAssign::SExt: { 1960 MVT DestVT = VA.getLocVT(); 1961 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1962 assert (Arg != 0 && "Failed to emit a sext"); 1963 ArgVT = DestVT; 1964 break; 1965 } 1966 case CCValAssign::AExt: 1967 // Intentional fall-through. Handle AExt and ZExt. 1968 case CCValAssign::ZExt: { 1969 MVT DestVT = VA.getLocVT(); 1970 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1971 assert (Arg != 0 && "Failed to emit a zext"); 1972 ArgVT = DestVT; 1973 break; 1974 } 1975 case CCValAssign::BCvt: { 1976 unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1977 /*TODO: Kill=*/false); 1978 assert(BC != 0 && "Failed to emit a bitcast!"); 1979 Arg = BC; 1980 ArgVT = VA.getLocVT(); 1981 break; 1982 } 1983 default: llvm_unreachable("Unknown arg promotion!"); 1984 } 1985 1986 // Now copy/store arg to correct locations. 1987 if (VA.isRegLoc() && !VA.needsCustom()) { 1988 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1989 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); 1990 RegArgs.push_back(VA.getLocReg()); 1991 } else if (VA.needsCustom()) { 1992 // TODO: We need custom lowering for vector (v2f64) args. 1993 assert(VA.getLocVT() == MVT::f64 && 1994 "Custom lowering for v2f64 args not available"); 1995 1996 CCValAssign &NextVA = ArgLocs[++i]; 1997 1998 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1999 "We only handle register args!"); 2000 2001 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2002 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2003 .addReg(NextVA.getLocReg(), RegState::Define) 2004 .addReg(Arg)); 2005 RegArgs.push_back(VA.getLocReg()); 2006 RegArgs.push_back(NextVA.getLocReg()); 2007 } else { 2008 assert(VA.isMemLoc()); 2009 // Need to store on the stack. 2010 2011 // Don't emit stores for undef values. 2012 if (isa<UndefValue>(ArgVal)) 2013 continue; 2014 2015 Address Addr; 2016 Addr.BaseType = Address::RegBase; 2017 Addr.Base.Reg = ARM::SP; 2018 Addr.Offset = VA.getLocMemOffset(); 2019 2020 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2021 assert(EmitRet && "Could not emit a store for argument!"); 2022 } 2023 } 2024 2025 return true; 2026 } 2027 2028 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2029 const Instruction *I, CallingConv::ID CC, 2030 unsigned &NumBytes, bool isVarArg) { 2031 // Issue CALLSEQ_END 2032 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2033 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2034 TII.get(AdjStackUp)) 2035 .addImm(NumBytes).addImm(0)); 2036 2037 // Now the return value. 2038 if (RetVT != MVT::isVoid) { 2039 SmallVector<CCValAssign, 16> RVLocs; 2040 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2041 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2042 2043 // Copy all of the result registers out of their specified physreg. 2044 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2045 // For this move we copy into two registers and then move into the 2046 // double fp reg we want. 2047 MVT DestVT = RVLocs[0].getValVT(); 2048 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2049 unsigned ResultReg = createResultReg(DstRC); 2050 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2051 TII.get(ARM::VMOVDRR), ResultReg) 2052 .addReg(RVLocs[0].getLocReg()) 2053 .addReg(RVLocs[1].getLocReg())); 2054 2055 UsedRegs.push_back(RVLocs[0].getLocReg()); 2056 UsedRegs.push_back(RVLocs[1].getLocReg()); 2057 2058 // Finally update the result. 2059 updateValueMap(I, ResultReg); 2060 } else { 2061 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2062 MVT CopyVT = RVLocs[0].getValVT(); 2063 2064 // Special handling for extended integers. 2065 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2066 CopyVT = MVT::i32; 2067 2068 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2069 2070 unsigned ResultReg = createResultReg(DstRC); 2071 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2072 TII.get(TargetOpcode::COPY), 2073 ResultReg).addReg(RVLocs[0].getLocReg()); 2074 UsedRegs.push_back(RVLocs[0].getLocReg()); 2075 2076 // Finally update the result. 2077 updateValueMap(I, ResultReg); 2078 } 2079 } 2080 2081 return true; 2082 } 2083 2084 bool ARMFastISel::SelectRet(const Instruction *I) { 2085 const ReturnInst *Ret = cast<ReturnInst>(I); 2086 const Function &F = *I->getParent()->getParent(); 2087 2088 if (!FuncInfo.CanLowerReturn) 2089 return false; 2090 2091 if (TLI.supportSwiftError() && 2092 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 2093 return false; 2094 2095 if (TLI.supportSplitCSR(FuncInfo.MF)) 2096 return false; 2097 2098 // Build a list of return value registers. 2099 SmallVector<unsigned, 4> RetRegs; 2100 2101 CallingConv::ID CC = F.getCallingConv(); 2102 if (Ret->getNumOperands() > 0) { 2103 SmallVector<ISD::OutputArg, 4> Outs; 2104 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); 2105 2106 // Analyze operands of the call, assigning locations to each operand. 2107 SmallVector<CCValAssign, 16> ValLocs; 2108 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); 2109 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2110 F.isVarArg())); 2111 2112 const Value *RV = Ret->getOperand(0); 2113 unsigned Reg = getRegForValue(RV); 2114 if (Reg == 0) 2115 return false; 2116 2117 // Only handle a single return value for now. 2118 if (ValLocs.size() != 1) 2119 return false; 2120 2121 CCValAssign &VA = ValLocs[0]; 2122 2123 // Don't bother handling odd stuff for now. 2124 if (VA.getLocInfo() != CCValAssign::Full) 2125 return false; 2126 // Only handle register returns for now. 2127 if (!VA.isRegLoc()) 2128 return false; 2129 2130 unsigned SrcReg = Reg + VA.getValNo(); 2131 EVT RVEVT = TLI.getValueType(DL, RV->getType()); 2132 if (!RVEVT.isSimple()) return false; 2133 MVT RVVT = RVEVT.getSimpleVT(); 2134 MVT DestVT = VA.getValVT(); 2135 // Special handling for extended integers. 2136 if (RVVT != DestVT) { 2137 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2138 return false; 2139 2140 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2141 2142 // Perform extension if flagged as either zext or sext. Otherwise, do 2143 // nothing. 2144 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2145 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2146 if (SrcReg == 0) return false; 2147 } 2148 } 2149 2150 // Make the copy. 2151 unsigned DstReg = VA.getLocReg(); 2152 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2153 // Avoid a cross-class copy. This is very unlikely. 2154 if (!SrcRC->contains(DstReg)) 2155 return false; 2156 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2157 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); 2158 2159 // Add register to return instruction. 2160 RetRegs.push_back(VA.getLocReg()); 2161 } 2162 2163 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2164 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2165 TII.get(RetOpc)); 2166 AddOptionalDefs(MIB); 2167 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 2168 MIB.addReg(RetRegs[i], RegState::Implicit); 2169 return true; 2170 } 2171 2172 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2173 if (UseReg) 2174 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2175 else 2176 return isThumb2 ? ARM::tBL : ARM::BL; 2177 } 2178 2179 unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2180 // Manually compute the global's type to avoid building it when unnecessary. 2181 Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); 2182 EVT LCREVT = TLI.getValueType(DL, GVTy); 2183 if (!LCREVT.isSimple()) return 0; 2184 2185 GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false, 2186 GlobalValue::ExternalLinkage, nullptr, 2187 Name); 2188 assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); 2189 return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2190 } 2191 2192 // A quick function that will emit a call for a named libcall in F with the 2193 // vector of passed arguments for the Instruction in I. We can assume that we 2194 // can emit a call for any libcall we can produce. This is an abridged version 2195 // of the full call infrastructure since we won't need to worry about things 2196 // like computed function pointers or strange arguments at call sites. 2197 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2198 // with X86. 2199 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2200 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2201 2202 // Handle *simple* calls for now. 2203 Type *RetTy = I->getType(); 2204 MVT RetVT; 2205 if (RetTy->isVoidTy()) 2206 RetVT = MVT::isVoid; 2207 else if (!isTypeLegal(RetTy, RetVT)) 2208 return false; 2209 2210 // Can't handle non-double multi-reg retvals. 2211 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2212 SmallVector<CCValAssign, 16> RVLocs; 2213 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); 2214 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2215 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2216 return false; 2217 } 2218 2219 // Set up the argument vectors. 2220 SmallVector<Value*, 8> Args; 2221 SmallVector<unsigned, 8> ArgRegs; 2222 SmallVector<MVT, 8> ArgVTs; 2223 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2224 Args.reserve(I->getNumOperands()); 2225 ArgRegs.reserve(I->getNumOperands()); 2226 ArgVTs.reserve(I->getNumOperands()); 2227 ArgFlags.reserve(I->getNumOperands()); 2228 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2229 Value *Op = I->getOperand(i); 2230 unsigned Arg = getRegForValue(Op); 2231 if (Arg == 0) return false; 2232 2233 Type *ArgTy = Op->getType(); 2234 MVT ArgVT; 2235 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2236 2237 ISD::ArgFlagsTy Flags; 2238 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2239 Flags.setOrigAlign(OriginalAlignment); 2240 2241 Args.push_back(Op); 2242 ArgRegs.push_back(Arg); 2243 ArgVTs.push_back(ArgVT); 2244 ArgFlags.push_back(Flags); 2245 } 2246 2247 // Handle the arguments now that we've gotten them. 2248 SmallVector<unsigned, 4> RegArgs; 2249 unsigned NumBytes; 2250 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2251 RegArgs, CC, NumBytes, false)) 2252 return false; 2253 2254 unsigned CalleeReg = 0; 2255 if (Subtarget->genLongCalls()) { 2256 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2257 if (CalleeReg == 0) return false; 2258 } 2259 2260 // Issue the call. 2261 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls()); 2262 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2263 DbgLoc, TII.get(CallOpc)); 2264 // BL / BLX don't take a predicate, but tBL / tBLX do. 2265 if (isThumb2) 2266 AddDefaultPred(MIB); 2267 if (Subtarget->genLongCalls()) 2268 MIB.addReg(CalleeReg); 2269 else 2270 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2271 2272 // Add implicit physical register uses to the call. 2273 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2274 MIB.addReg(RegArgs[i], RegState::Implicit); 2275 2276 // Add a register mask with the call-preserved registers. 2277 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2278 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); 2279 2280 // Finish off the call including any return values. 2281 SmallVector<unsigned, 4> UsedRegs; 2282 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2283 2284 // Set all unused physreg defs as dead. 2285 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2286 2287 return true; 2288 } 2289 2290 bool ARMFastISel::SelectCall(const Instruction *I, 2291 const char *IntrMemName = nullptr) { 2292 const CallInst *CI = cast<CallInst>(I); 2293 const Value *Callee = CI->getCalledValue(); 2294 2295 // Can't handle inline asm. 2296 if (isa<InlineAsm>(Callee)) return false; 2297 2298 // Allow SelectionDAG isel to handle tail calls. 2299 if (CI->isTailCall()) return false; 2300 2301 // Check the calling convention. 2302 ImmutableCallSite CS(CI); 2303 CallingConv::ID CC = CS.getCallingConv(); 2304 2305 // TODO: Avoid some calling conventions? 2306 2307 FunctionType *FTy = CS.getFunctionType(); 2308 bool isVarArg = FTy->isVarArg(); 2309 2310 // Handle *simple* calls for now. 2311 Type *RetTy = I->getType(); 2312 MVT RetVT; 2313 if (RetTy->isVoidTy()) 2314 RetVT = MVT::isVoid; 2315 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2316 RetVT != MVT::i8 && RetVT != MVT::i1) 2317 return false; 2318 2319 // Can't handle non-double multi-reg retvals. 2320 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2321 RetVT != MVT::i16 && RetVT != MVT::i32) { 2322 SmallVector<CCValAssign, 16> RVLocs; 2323 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2324 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2325 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2326 return false; 2327 } 2328 2329 // Set up the argument vectors. 2330 SmallVector<Value*, 8> Args; 2331 SmallVector<unsigned, 8> ArgRegs; 2332 SmallVector<MVT, 8> ArgVTs; 2333 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2334 unsigned arg_size = CS.arg_size(); 2335 Args.reserve(arg_size); 2336 ArgRegs.reserve(arg_size); 2337 ArgVTs.reserve(arg_size); 2338 ArgFlags.reserve(arg_size); 2339 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2340 i != e; ++i) { 2341 // If we're lowering a memory intrinsic instead of a regular call, skip the 2342 // last two arguments, which shouldn't be passed to the underlying function. 2343 if (IntrMemName && e-i <= 2) 2344 break; 2345 2346 ISD::ArgFlagsTy Flags; 2347 unsigned AttrInd = i - CS.arg_begin() + 1; 2348 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2349 Flags.setSExt(); 2350 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2351 Flags.setZExt(); 2352 2353 // FIXME: Only handle *easy* calls for now. 2354 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2355 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2356 CS.paramHasAttr(AttrInd, Attribute::SwiftSelf) || 2357 CS.paramHasAttr(AttrInd, Attribute::SwiftError) || 2358 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2359 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2360 return false; 2361 2362 Type *ArgTy = (*i)->getType(); 2363 MVT ArgVT; 2364 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2365 ArgVT != MVT::i1) 2366 return false; 2367 2368 unsigned Arg = getRegForValue(*i); 2369 if (Arg == 0) 2370 return false; 2371 2372 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2373 Flags.setOrigAlign(OriginalAlignment); 2374 2375 Args.push_back(*i); 2376 ArgRegs.push_back(Arg); 2377 ArgVTs.push_back(ArgVT); 2378 ArgFlags.push_back(Flags); 2379 } 2380 2381 // Handle the arguments now that we've gotten them. 2382 SmallVector<unsigned, 4> RegArgs; 2383 unsigned NumBytes; 2384 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2385 RegArgs, CC, NumBytes, isVarArg)) 2386 return false; 2387 2388 bool UseReg = false; 2389 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2390 if (!GV || Subtarget->genLongCalls()) UseReg = true; 2391 2392 unsigned CalleeReg = 0; 2393 if (UseReg) { 2394 if (IntrMemName) 2395 CalleeReg = getLibcallReg(IntrMemName); 2396 else 2397 CalleeReg = getRegForValue(Callee); 2398 2399 if (CalleeReg == 0) return false; 2400 } 2401 2402 // Issue the call. 2403 unsigned CallOpc = ARMSelectCallOp(UseReg); 2404 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2405 DbgLoc, TII.get(CallOpc)); 2406 2407 // ARM calls don't take a predicate, but tBL / tBLX do. 2408 if(isThumb2) 2409 AddDefaultPred(MIB); 2410 if (UseReg) 2411 MIB.addReg(CalleeReg); 2412 else if (!IntrMemName) 2413 MIB.addGlobalAddress(GV, 0, 0); 2414 else 2415 MIB.addExternalSymbol(IntrMemName, 0); 2416 2417 // Add implicit physical register uses to the call. 2418 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2419 MIB.addReg(RegArgs[i], RegState::Implicit); 2420 2421 // Add a register mask with the call-preserved registers. 2422 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2423 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); 2424 2425 // Finish off the call including any return values. 2426 SmallVector<unsigned, 4> UsedRegs; 2427 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2428 return false; 2429 2430 // Set all unused physreg defs as dead. 2431 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2432 2433 return true; 2434 } 2435 2436 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2437 return Len <= 16; 2438 } 2439 2440 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2441 uint64_t Len, unsigned Alignment) { 2442 // Make sure we don't bloat code by inlining very large memcpy's. 2443 if (!ARMIsMemCpySmall(Len)) 2444 return false; 2445 2446 while (Len) { 2447 MVT VT; 2448 if (!Alignment || Alignment >= 4) { 2449 if (Len >= 4) 2450 VT = MVT::i32; 2451 else if (Len >= 2) 2452 VT = MVT::i16; 2453 else { 2454 assert (Len == 1 && "Expected a length of 1!"); 2455 VT = MVT::i8; 2456 } 2457 } else { 2458 // Bound based on alignment. 2459 if (Len >= 2 && Alignment == 2) 2460 VT = MVT::i16; 2461 else { 2462 VT = MVT::i8; 2463 } 2464 } 2465 2466 bool RV; 2467 unsigned ResultReg; 2468 RV = ARMEmitLoad(VT, ResultReg, Src); 2469 assert (RV == true && "Should be able to handle this load."); 2470 RV = ARMEmitStore(VT, ResultReg, Dest); 2471 assert (RV == true && "Should be able to handle this store."); 2472 (void)RV; 2473 2474 unsigned Size = VT.getSizeInBits()/8; 2475 Len -= Size; 2476 Dest.Offset += Size; 2477 Src.Offset += Size; 2478 } 2479 2480 return true; 2481 } 2482 2483 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2484 // FIXME: Handle more intrinsics. 2485 switch (I.getIntrinsicID()) { 2486 default: return false; 2487 case Intrinsic::frameaddress: { 2488 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo(); 2489 MFI.setFrameAddressIsTaken(true); 2490 2491 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 2492 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass 2493 : &ARM::GPRRegClass; 2494 2495 const ARMBaseRegisterInfo *RegInfo = 2496 static_cast<const ARMBaseRegisterInfo *>(Subtarget->getRegisterInfo()); 2497 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2498 unsigned SrcReg = FramePtr; 2499 2500 // Recursively load frame address 2501 // ldr r0 [fp] 2502 // ldr r0 [r0] 2503 // ldr r0 [r0] 2504 // ... 2505 unsigned DestReg; 2506 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2507 while (Depth--) { 2508 DestReg = createResultReg(RC); 2509 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2510 TII.get(LdrOpc), DestReg) 2511 .addReg(SrcReg).addImm(0)); 2512 SrcReg = DestReg; 2513 } 2514 updateValueMap(&I, SrcReg); 2515 return true; 2516 } 2517 case Intrinsic::memcpy: 2518 case Intrinsic::memmove: { 2519 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2520 // Don't handle volatile. 2521 if (MTI.isVolatile()) 2522 return false; 2523 2524 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2525 // we would emit dead code because we don't currently handle memmoves. 2526 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2527 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2528 // Small memcpy's are common enough that we want to do them without a call 2529 // if possible. 2530 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2531 if (ARMIsMemCpySmall(Len)) { 2532 Address Dest, Src; 2533 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2534 !ARMComputeAddress(MTI.getRawSource(), Src)) 2535 return false; 2536 unsigned Alignment = MTI.getAlignment(); 2537 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2538 return true; 2539 } 2540 } 2541 2542 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2543 return false; 2544 2545 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2546 return false; 2547 2548 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2549 return SelectCall(&I, IntrMemName); 2550 } 2551 case Intrinsic::memset: { 2552 const MemSetInst &MSI = cast<MemSetInst>(I); 2553 // Don't handle volatile. 2554 if (MSI.isVolatile()) 2555 return false; 2556 2557 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2558 return false; 2559 2560 if (MSI.getDestAddressSpace() > 255) 2561 return false; 2562 2563 return SelectCall(&I, "memset"); 2564 } 2565 case Intrinsic::trap: { 2566 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get( 2567 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP)); 2568 return true; 2569 } 2570 } 2571 } 2572 2573 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2574 // The high bits for a type smaller than the register size are assumed to be 2575 // undefined. 2576 Value *Op = I->getOperand(0); 2577 2578 EVT SrcVT, DestVT; 2579 SrcVT = TLI.getValueType(DL, Op->getType(), true); 2580 DestVT = TLI.getValueType(DL, I->getType(), true); 2581 2582 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2583 return false; 2584 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2585 return false; 2586 2587 unsigned SrcReg = getRegForValue(Op); 2588 if (!SrcReg) return false; 2589 2590 // Because the high bits are undefined, a truncate doesn't generate 2591 // any code. 2592 updateValueMap(I, SrcReg); 2593 return true; 2594 } 2595 2596 unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2597 bool isZExt) { 2598 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2599 return 0; 2600 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) 2601 return 0; 2602 2603 // Table of which combinations can be emitted as a single instruction, 2604 // and which will require two. 2605 static const uint8_t isSingleInstrTbl[3][2][2][2] = { 2606 // ARM Thumb 2607 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops 2608 // ext: s z s z s z s z 2609 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, 2610 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, 2611 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } 2612 }; 2613 2614 // Target registers for: 2615 // - For ARM can never be PC. 2616 // - For 16-bit Thumb are restricted to lower 8 registers. 2617 // - For 32-bit Thumb are restricted to non-SP and non-PC. 2618 static const TargetRegisterClass *RCTbl[2][2] = { 2619 // Instructions: Two Single 2620 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, 2621 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } 2622 }; 2623 2624 // Table governing the instruction(s) to be emitted. 2625 static const struct InstructionTable { 2626 uint32_t Opc : 16; 2627 uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. 2628 uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. 2629 uint32_t Imm : 8; // All instructions have either a shift or a mask. 2630 } IT[2][2][3][2] = { 2631 { // Two instructions (first is left shift, second is in this table). 2632 { // ARM Opc S Shift Imm 2633 /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, 2634 /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, 2635 /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, 2636 /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, 2637 /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, 2638 /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } 2639 }, 2640 { // Thumb Opc S Shift Imm 2641 /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, 2642 /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, 2643 /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, 2644 /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, 2645 /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, 2646 /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } 2647 } 2648 }, 2649 { // Single instruction. 2650 { // ARM Opc S Shift Imm 2651 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2652 /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, 2653 /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, 2654 /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, 2655 /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, 2656 /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } 2657 }, 2658 { // Thumb Opc S Shift Imm 2659 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2660 /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, 2661 /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, 2662 /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, 2663 /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, 2664 /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } 2665 } 2666 } 2667 }; 2668 2669 unsigned SrcBits = SrcVT.getSizeInBits(); 2670 unsigned DestBits = DestVT.getSizeInBits(); 2671 (void) DestBits; 2672 assert((SrcBits < DestBits) && "can only extend to larger types"); 2673 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && 2674 "other sizes unimplemented"); 2675 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && 2676 "other sizes unimplemented"); 2677 2678 bool hasV6Ops = Subtarget->hasV6Ops(); 2679 unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} 2680 assert((Bitness < 3) && "sanity-check table bounds"); 2681 2682 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; 2683 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; 2684 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; 2685 unsigned Opc = ITP->Opc; 2686 assert(ARM::KILL != Opc && "Invalid table entry"); 2687 unsigned hasS = ITP->hasS; 2688 ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; 2689 assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && 2690 "only MOVsi has shift operand addressing mode"); 2691 unsigned Imm = ITP->Imm; 2692 2693 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). 2694 bool setsCPSR = &ARM::tGPRRegClass == RC; 2695 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; 2696 unsigned ResultReg; 2697 // MOVsi encodes shift and immediate in shift operand addressing mode. 2698 // The following condition has the same value when emitting two 2699 // instruction sequences: both are shifts. 2700 bool ImmIsSO = (Shift != ARM_AM::no_shift); 2701 2702 // Either one or two instructions are emitted. 2703 // They're always of the form: 2704 // dst = in OP imm 2705 // CPSR is set only by 16-bit Thumb instructions. 2706 // Predicate, if any, is AL. 2707 // S bit, if available, is always 0. 2708 // When two are emitted the first's result will feed as the second's input, 2709 // that value is then dead. 2710 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; 2711 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { 2712 ResultReg = createResultReg(RC); 2713 bool isLsl = (0 == Instr) && !isSingleInstr; 2714 unsigned Opcode = isLsl ? LSLOpc : Opc; 2715 ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; 2716 unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; 2717 bool isKill = 1 == Instr; 2718 MachineInstrBuilder MIB = BuildMI( 2719 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg); 2720 if (setsCPSR) 2721 MIB.addReg(ARM::CPSR, RegState::Define); 2722 SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); 2723 AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); 2724 if (hasS) 2725 AddDefaultCC(MIB); 2726 // Second instruction consumes the first's result. 2727 SrcReg = ResultReg; 2728 } 2729 2730 return ResultReg; 2731 } 2732 2733 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2734 // On ARM, in general, integer casts don't involve legal types; this code 2735 // handles promotable integers. 2736 Type *DestTy = I->getType(); 2737 Value *Src = I->getOperand(0); 2738 Type *SrcTy = Src->getType(); 2739 2740 bool isZExt = isa<ZExtInst>(I); 2741 unsigned SrcReg = getRegForValue(Src); 2742 if (!SrcReg) return false; 2743 2744 EVT SrcEVT, DestEVT; 2745 SrcEVT = TLI.getValueType(DL, SrcTy, true); 2746 DestEVT = TLI.getValueType(DL, DestTy, true); 2747 if (!SrcEVT.isSimple()) return false; 2748 if (!DestEVT.isSimple()) return false; 2749 2750 MVT SrcVT = SrcEVT.getSimpleVT(); 2751 MVT DestVT = DestEVT.getSimpleVT(); 2752 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2753 if (ResultReg == 0) return false; 2754 updateValueMap(I, ResultReg); 2755 return true; 2756 } 2757 2758 bool ARMFastISel::SelectShift(const Instruction *I, 2759 ARM_AM::ShiftOpc ShiftTy) { 2760 // We handle thumb2 mode by target independent selector 2761 // or SelectionDAG ISel. 2762 if (isThumb2) 2763 return false; 2764 2765 // Only handle i32 now. 2766 EVT DestVT = TLI.getValueType(DL, I->getType(), true); 2767 if (DestVT != MVT::i32) 2768 return false; 2769 2770 unsigned Opc = ARM::MOVsr; 2771 unsigned ShiftImm; 2772 Value *Src2Value = I->getOperand(1); 2773 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2774 ShiftImm = CI->getZExtValue(); 2775 2776 // Fall back to selection DAG isel if the shift amount 2777 // is zero or greater than the width of the value type. 2778 if (ShiftImm == 0 || ShiftImm >=32) 2779 return false; 2780 2781 Opc = ARM::MOVsi; 2782 } 2783 2784 Value *Src1Value = I->getOperand(0); 2785 unsigned Reg1 = getRegForValue(Src1Value); 2786 if (Reg1 == 0) return false; 2787 2788 unsigned Reg2 = 0; 2789 if (Opc == ARM::MOVsr) { 2790 Reg2 = getRegForValue(Src2Value); 2791 if (Reg2 == 0) return false; 2792 } 2793 2794 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 2795 if(ResultReg == 0) return false; 2796 2797 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2798 TII.get(Opc), ResultReg) 2799 .addReg(Reg1); 2800 2801 if (Opc == ARM::MOVsi) 2802 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2803 else if (Opc == ARM::MOVsr) { 2804 MIB.addReg(Reg2); 2805 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2806 } 2807 2808 AddOptionalDefs(MIB); 2809 updateValueMap(I, ResultReg); 2810 return true; 2811 } 2812 2813 // TODO: SoftFP support. 2814 bool ARMFastISel::fastSelectInstruction(const Instruction *I) { 2815 2816 switch (I->getOpcode()) { 2817 case Instruction::Load: 2818 return SelectLoad(I); 2819 case Instruction::Store: 2820 return SelectStore(I); 2821 case Instruction::Br: 2822 return SelectBranch(I); 2823 case Instruction::IndirectBr: 2824 return SelectIndirectBr(I); 2825 case Instruction::ICmp: 2826 case Instruction::FCmp: 2827 return SelectCmp(I); 2828 case Instruction::FPExt: 2829 return SelectFPExt(I); 2830 case Instruction::FPTrunc: 2831 return SelectFPTrunc(I); 2832 case Instruction::SIToFP: 2833 return SelectIToFP(I, /*isSigned*/ true); 2834 case Instruction::UIToFP: 2835 return SelectIToFP(I, /*isSigned*/ false); 2836 case Instruction::FPToSI: 2837 return SelectFPToI(I, /*isSigned*/ true); 2838 case Instruction::FPToUI: 2839 return SelectFPToI(I, /*isSigned*/ false); 2840 case Instruction::Add: 2841 return SelectBinaryIntOp(I, ISD::ADD); 2842 case Instruction::Or: 2843 return SelectBinaryIntOp(I, ISD::OR); 2844 case Instruction::Sub: 2845 return SelectBinaryIntOp(I, ISD::SUB); 2846 case Instruction::FAdd: 2847 return SelectBinaryFPOp(I, ISD::FADD); 2848 case Instruction::FSub: 2849 return SelectBinaryFPOp(I, ISD::FSUB); 2850 case Instruction::FMul: 2851 return SelectBinaryFPOp(I, ISD::FMUL); 2852 case Instruction::SDiv: 2853 return SelectDiv(I, /*isSigned*/ true); 2854 case Instruction::UDiv: 2855 return SelectDiv(I, /*isSigned*/ false); 2856 case Instruction::SRem: 2857 return SelectRem(I, /*isSigned*/ true); 2858 case Instruction::URem: 2859 return SelectRem(I, /*isSigned*/ false); 2860 case Instruction::Call: 2861 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2862 return SelectIntrinsicCall(*II); 2863 return SelectCall(I); 2864 case Instruction::Select: 2865 return SelectSelect(I); 2866 case Instruction::Ret: 2867 return SelectRet(I); 2868 case Instruction::Trunc: 2869 return SelectTrunc(I); 2870 case Instruction::ZExt: 2871 case Instruction::SExt: 2872 return SelectIntExt(I); 2873 case Instruction::Shl: 2874 return SelectShift(I, ARM_AM::lsl); 2875 case Instruction::LShr: 2876 return SelectShift(I, ARM_AM::lsr); 2877 case Instruction::AShr: 2878 return SelectShift(I, ARM_AM::asr); 2879 default: break; 2880 } 2881 return false; 2882 } 2883 2884 namespace { 2885 // This table describes sign- and zero-extend instructions which can be 2886 // folded into a preceding load. All of these extends have an immediate 2887 // (sometimes a mask and sometimes a shift) that's applied after 2888 // extension. 2889 const struct FoldableLoadExtendsStruct { 2890 uint16_t Opc[2]; // ARM, Thumb. 2891 uint8_t ExpectedImm; 2892 uint8_t isZExt : 1; 2893 uint8_t ExpectedVT : 7; 2894 } FoldableLoadExtends[] = { 2895 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, 2896 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, 2897 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, 2898 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, 2899 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } 2900 }; 2901 } 2902 2903 /// \brief The specified machine instr operand is a vreg, and that 2904 /// vreg is being provided by the specified load instruction. If possible, 2905 /// try to fold the load as an operand to the instruction, returning true if 2906 /// successful. 2907 bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 2908 const LoadInst *LI) { 2909 // Verify we have a legal type before going any further. 2910 MVT VT; 2911 if (!isLoadTypeLegal(LI->getType(), VT)) 2912 return false; 2913 2914 // Combine load followed by zero- or sign-extend. 2915 // ldrb r1, [r0] ldrb r1, [r0] 2916 // uxtb r2, r1 => 2917 // mov r3, r2 mov r3, r1 2918 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) 2919 return false; 2920 const uint64_t Imm = MI->getOperand(2).getImm(); 2921 2922 bool Found = false; 2923 bool isZExt; 2924 for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends); 2925 i != e; ++i) { 2926 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() && 2927 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm && 2928 MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) { 2929 Found = true; 2930 isZExt = FoldableLoadExtends[i].isZExt; 2931 } 2932 } 2933 if (!Found) return false; 2934 2935 // See if we can handle this address. 2936 Address Addr; 2937 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2938 2939 unsigned ResultReg = MI->getOperand(0).getReg(); 2940 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2941 return false; 2942 MI->eraseFromParent(); 2943 return true; 2944 } 2945 2946 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2947 unsigned Align, MVT VT) { 2948 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 2949 2950 LLVMContext *Context = &MF->getFunction()->getContext(); 2951 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2952 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2953 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( 2954 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, 2955 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier, 2956 /*AddCurrentAddress=*/UseGOT_PREL); 2957 2958 unsigned ConstAlign = 2959 MF->getDataLayout().getPrefTypeAlignment(Type::getInt32PtrTy(*Context)); 2960 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign); 2961 2962 unsigned TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass); 2963 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp; 2964 MachineInstrBuilder MIB = 2965 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), TempReg) 2966 .addConstantPoolIndex(Idx); 2967 if (Opc == ARM::LDRcp) 2968 MIB.addImm(0); 2969 AddDefaultPred(MIB); 2970 2971 // Fix the address by adding pc. 2972 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 2973 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR 2974 : ARM::PICADD; 2975 DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0); 2976 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 2977 .addReg(TempReg) 2978 .addImm(ARMPCLabelIndex); 2979 if (!Subtarget->isThumb()) 2980 AddDefaultPred(MIB); 2981 2982 if (UseGOT_PREL && Subtarget->isThumb()) { 2983 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 2984 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2985 TII.get(ARM::t2LDRi12), NewDestReg) 2986 .addReg(DestReg) 2987 .addImm(0); 2988 DestReg = NewDestReg; 2989 AddOptionalDefs(MIB); 2990 } 2991 return DestReg; 2992 } 2993 2994 bool ARMFastISel::fastLowerArguments() { 2995 if (!FuncInfo.CanLowerReturn) 2996 return false; 2997 2998 const Function *F = FuncInfo.Fn; 2999 if (F->isVarArg()) 3000 return false; 3001 3002 CallingConv::ID CC = F->getCallingConv(); 3003 switch (CC) { 3004 default: 3005 return false; 3006 case CallingConv::Fast: 3007 case CallingConv::C: 3008 case CallingConv::ARM_AAPCS_VFP: 3009 case CallingConv::ARM_AAPCS: 3010 case CallingConv::ARM_APCS: 3011 case CallingConv::Swift: 3012 break; 3013 } 3014 3015 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments 3016 // which are passed in r0 - r3. 3017 unsigned Idx = 1; 3018 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3019 I != E; ++I, ++Idx) { 3020 if (Idx > 4) 3021 return false; 3022 3023 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || 3024 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || 3025 F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) || 3026 F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) || 3027 F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) 3028 return false; 3029 3030 Type *ArgTy = I->getType(); 3031 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) 3032 return false; 3033 3034 EVT ArgVT = TLI.getValueType(DL, ArgTy); 3035 if (!ArgVT.isSimple()) return false; 3036 switch (ArgVT.getSimpleVT().SimpleTy) { 3037 case MVT::i8: 3038 case MVT::i16: 3039 case MVT::i32: 3040 break; 3041 default: 3042 return false; 3043 } 3044 } 3045 3046 3047 static const MCPhysReg GPRArgRegs[] = { 3048 ARM::R0, ARM::R1, ARM::R2, ARM::R3 3049 }; 3050 3051 const TargetRegisterClass *RC = &ARM::rGPRRegClass; 3052 Idx = 0; 3053 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3054 I != E; ++I, ++Idx) { 3055 unsigned SrcReg = GPRArgRegs[Idx]; 3056 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); 3057 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. 3058 // Without this, EmitLiveInCopies may eliminate the livein if its only 3059 // use is a bitcast (which isn't turned into an instruction). 3060 unsigned ResultReg = createResultReg(RC); 3061 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 3062 TII.get(TargetOpcode::COPY), 3063 ResultReg).addReg(DstReg, getKillRegState(true)); 3064 updateValueMap(&*I, ResultReg); 3065 } 3066 3067 return true; 3068 } 3069 3070 namespace llvm { 3071 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 3072 const TargetLibraryInfo *libInfo) { 3073 if (funcInfo.MF->getSubtarget<ARMSubtarget>().useFastISel()) 3074 return new ARMFastISel(funcInfo, libInfo); 3075 3076 return nullptr; 3077 } 3078 } 3079