1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseRegisterInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMConstantPoolValue.h" 20 #include "ARMISelLowering.h" 21 #include "ARMMachineFunctionInfo.h" 22 #include "ARMSubtarget.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/CodeGen/Analysis.h" 26 #include "llvm/CodeGen/FastISel.h" 27 #include "llvm/CodeGen/FunctionLoweringInfo.h" 28 #include "llvm/CodeGen/MachineConstantPool.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/IR/CallSite.h" 35 #include "llvm/IR/CallingConv.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/GetElementPtrTypeIterator.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/Module.h" 43 #include "llvm/IR/Operator.h" 44 #include "llvm/Support/ErrorHandling.h" 45 #include "llvm/Target/TargetInstrInfo.h" 46 #include "llvm/Target/TargetLowering.h" 47 #include "llvm/Target/TargetMachine.h" 48 #include "llvm/Target/TargetOptions.h" 49 using namespace llvm; 50 51 namespace { 52 53 // All possible address modes, plus some. 54 typedef struct Address { 55 enum { 56 RegBase, 57 FrameIndexBase 58 } BaseType; 59 60 union { 61 unsigned Reg; 62 int FI; 63 } Base; 64 65 int Offset; 66 67 // Innocuous defaults for our address. 68 Address() 69 : BaseType(RegBase), Offset(0) { 70 Base.Reg = 0; 71 } 72 } Address; 73 74 class ARMFastISel final : public FastISel { 75 76 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 77 /// make the right decision when generating code for different targets. 78 const ARMSubtarget *Subtarget; 79 Module &M; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 91 const TargetLibraryInfo *libInfo) 92 : FastISel(funcInfo, libInfo), 93 Subtarget( 94 &static_cast<const ARMSubtarget &>(funcInfo.MF->getSubtarget())), 95 M(const_cast<Module &>(*funcInfo.Fn->getParent())), 96 TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()), 97 TLI(*Subtarget->getTargetLowering()) { 98 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 99 isThumb2 = AFI->isThumbFunction(); 100 Context = &funcInfo.Fn->getContext(); 101 } 102 103 // Code from FastISel.cpp. 104 private: 105 unsigned fastEmitInst_r(unsigned MachineInstOpcode, 106 const TargetRegisterClass *RC, 107 unsigned Op0, bool Op0IsKill); 108 unsigned fastEmitInst_rr(unsigned MachineInstOpcode, 109 const TargetRegisterClass *RC, 110 unsigned Op0, bool Op0IsKill, 111 unsigned Op1, bool Op1IsKill); 112 unsigned fastEmitInst_ri(unsigned MachineInstOpcode, 113 const TargetRegisterClass *RC, 114 unsigned Op0, bool Op0IsKill, 115 uint64_t Imm); 116 unsigned fastEmitInst_rri(unsigned MachineInstOpcode, 117 const TargetRegisterClass *RC, 118 unsigned Op0, bool Op0IsKill, 119 unsigned Op1, bool Op1IsKill, 120 uint64_t Imm); 121 unsigned fastEmitInst_i(unsigned MachineInstOpcode, 122 const TargetRegisterClass *RC, 123 uint64_t Imm); 124 125 // Backend specific FastISel code. 126 private: 127 bool fastSelectInstruction(const Instruction *I) override; 128 unsigned fastMaterializeConstant(const Constant *C) override; 129 unsigned fastMaterializeAlloca(const AllocaInst *AI) override; 130 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 131 const LoadInst *LI) override; 132 bool fastLowerArguments() override; 133 private: 134 #include "ARMGenFastISel.inc" 135 136 // Instruction selection routines. 137 private: 138 bool SelectLoad(const Instruction *I); 139 bool SelectStore(const Instruction *I); 140 bool SelectBranch(const Instruction *I); 141 bool SelectIndirectBr(const Instruction *I); 142 bool SelectCmp(const Instruction *I); 143 bool SelectFPExt(const Instruction *I); 144 bool SelectFPTrunc(const Instruction *I); 145 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 146 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 147 bool SelectIToFP(const Instruction *I, bool isSigned); 148 bool SelectFPToI(const Instruction *I, bool isSigned); 149 bool SelectDiv(const Instruction *I, bool isSigned); 150 bool SelectRem(const Instruction *I, bool isSigned); 151 bool SelectCall(const Instruction *I, const char *IntrMemName); 152 bool SelectIntrinsicCall(const IntrinsicInst &I); 153 bool SelectSelect(const Instruction *I); 154 bool SelectRet(const Instruction *I); 155 bool SelectTrunc(const Instruction *I); 156 bool SelectIntExt(const Instruction *I); 157 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 158 159 // Utility routines. 160 private: 161 bool isPositionIndependent() const; 162 bool isTypeLegal(Type *Ty, MVT &VT); 163 bool isLoadTypeLegal(Type *Ty, MVT &VT); 164 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 165 bool isZExt); 166 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 167 unsigned Alignment = 0, bool isZExt = true, 168 bool allocReg = true); 169 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 170 unsigned Alignment = 0); 171 bool ARMComputeAddress(const Value *Obj, Address &Addr); 172 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 173 bool ARMIsMemCpySmall(uint64_t Len); 174 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 175 unsigned Alignment); 176 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 177 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 178 unsigned ARMMaterializeInt(const Constant *C, MVT VT); 179 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 180 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 181 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 182 unsigned ARMSelectCallOp(bool UseReg); 183 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 184 185 const TargetLowering *getTargetLowering() { return &TLI; } 186 187 // Call handling routines. 188 private: 189 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 190 bool Return, 191 bool isVarArg); 192 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 193 SmallVectorImpl<unsigned> &ArgRegs, 194 SmallVectorImpl<MVT> &ArgVTs, 195 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 196 SmallVectorImpl<unsigned> &RegArgs, 197 CallingConv::ID CC, 198 unsigned &NumBytes, 199 bool isVarArg); 200 unsigned getLibcallReg(const Twine &Name); 201 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 202 const Instruction *I, CallingConv::ID CC, 203 unsigned &NumBytes, bool isVarArg); 204 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 205 206 // OptionalDef handling routines. 207 private: 208 bool isARMNEONPred(const MachineInstr *MI); 209 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 210 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 211 void AddLoadStoreOperands(MVT VT, Address &Addr, 212 const MachineInstrBuilder &MIB, 213 unsigned Flags, bool useAM3); 214 }; 215 216 } // end anonymous namespace 217 218 #include "ARMGenCallingConv.inc" 219 220 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 221 // we don't care about implicit defs here, just places we'll need to add a 222 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 223 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 224 if (!MI->hasOptionalDef()) 225 return false; 226 227 // Look to see if our OptionalDef is defining CPSR or CCR. 228 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 229 const MachineOperand &MO = MI->getOperand(i); 230 if (!MO.isReg() || !MO.isDef()) continue; 231 if (MO.getReg() == ARM::CPSR) 232 *CPSR = true; 233 } 234 return true; 235 } 236 237 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 238 const MCInstrDesc &MCID = MI->getDesc(); 239 240 // If we're a thumb2 or not NEON function we'll be handled via isPredicable. 241 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 242 AFI->isThumb2Function()) 243 return MI->isPredicable(); 244 245 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 246 if (MCID.OpInfo[i].isPredicate()) 247 return true; 248 249 return false; 250 } 251 252 // If the machine is predicable go ahead and add the predicate operands, if 253 // it needs default CC operands add those. 254 // TODO: If we want to support thumb1 then we'll need to deal with optional 255 // CPSR defs that need to be added before the remaining operands. See s_cc_out 256 // for descriptions why. 257 const MachineInstrBuilder & 258 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 259 MachineInstr *MI = &*MIB; 260 261 // Do we use a predicate? or... 262 // Are we NEON in ARM mode and have a predicate operand? If so, I know 263 // we're not predicable but add it anyways. 264 if (isARMNEONPred(MI)) 265 AddDefaultPred(MIB); 266 267 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 268 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 269 bool CPSR = false; 270 if (DefinesOptionalPredicate(MI, &CPSR)) { 271 if (CPSR) 272 AddDefaultT1CC(MIB); 273 else 274 AddDefaultCC(MIB); 275 } 276 return MIB; 277 } 278 279 unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode, 280 const TargetRegisterClass *RC, 281 unsigned Op0, bool Op0IsKill) { 282 unsigned ResultReg = createResultReg(RC); 283 const MCInstrDesc &II = TII.get(MachineInstOpcode); 284 285 // Make sure the input operand is sufficiently constrained to be legal 286 // for this instruction. 287 Op0 = constrainOperandRegClass(II, Op0, 1); 288 if (II.getNumDefs() >= 1) { 289 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 290 ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); 291 } else { 292 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 293 .addReg(Op0, Op0IsKill * RegState::Kill)); 294 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 295 TII.get(TargetOpcode::COPY), ResultReg) 296 .addReg(II.ImplicitDefs[0])); 297 } 298 return ResultReg; 299 } 300 301 unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 302 const TargetRegisterClass *RC, 303 unsigned Op0, bool Op0IsKill, 304 unsigned Op1, bool Op1IsKill) { 305 unsigned ResultReg = createResultReg(RC); 306 const MCInstrDesc &II = TII.get(MachineInstOpcode); 307 308 // Make sure the input operands are sufficiently constrained to be legal 309 // for this instruction. 310 Op0 = constrainOperandRegClass(II, Op0, 1); 311 Op1 = constrainOperandRegClass(II, Op1, 2); 312 313 if (II.getNumDefs() >= 1) { 314 AddOptionalDefs( 315 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 316 .addReg(Op0, Op0IsKill * RegState::Kill) 317 .addReg(Op1, Op1IsKill * RegState::Kill)); 318 } else { 319 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 320 .addReg(Op0, Op0IsKill * RegState::Kill) 321 .addReg(Op1, Op1IsKill * RegState::Kill)); 322 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 323 TII.get(TargetOpcode::COPY), ResultReg) 324 .addReg(II.ImplicitDefs[0])); 325 } 326 return ResultReg; 327 } 328 329 unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 330 const TargetRegisterClass *RC, 331 unsigned Op0, bool Op0IsKill, 332 uint64_t Imm) { 333 unsigned ResultReg = createResultReg(RC); 334 const MCInstrDesc &II = TII.get(MachineInstOpcode); 335 336 // Make sure the input operand is sufficiently constrained to be legal 337 // for this instruction. 338 Op0 = constrainOperandRegClass(II, Op0, 1); 339 if (II.getNumDefs() >= 1) { 340 AddOptionalDefs( 341 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 342 .addReg(Op0, Op0IsKill * RegState::Kill) 343 .addImm(Imm)); 344 } else { 345 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 346 .addReg(Op0, Op0IsKill * RegState::Kill) 347 .addImm(Imm)); 348 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 349 TII.get(TargetOpcode::COPY), ResultReg) 350 .addReg(II.ImplicitDefs[0])); 351 } 352 return ResultReg; 353 } 354 355 unsigned ARMFastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 356 const TargetRegisterClass *RC, 357 unsigned Op0, bool Op0IsKill, 358 unsigned Op1, bool Op1IsKill, 359 uint64_t Imm) { 360 unsigned ResultReg = createResultReg(RC); 361 const MCInstrDesc &II = TII.get(MachineInstOpcode); 362 363 // Make sure the input operands are sufficiently constrained to be legal 364 // for this instruction. 365 Op0 = constrainOperandRegClass(II, Op0, 1); 366 Op1 = constrainOperandRegClass(II, Op1, 2); 367 if (II.getNumDefs() >= 1) { 368 AddOptionalDefs( 369 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 370 .addReg(Op0, Op0IsKill * RegState::Kill) 371 .addReg(Op1, Op1IsKill * RegState::Kill) 372 .addImm(Imm)); 373 } else { 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 375 .addReg(Op0, Op0IsKill * RegState::Kill) 376 .addReg(Op1, Op1IsKill * RegState::Kill) 377 .addImm(Imm)); 378 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 379 TII.get(TargetOpcode::COPY), ResultReg) 380 .addReg(II.ImplicitDefs[0])); 381 } 382 return ResultReg; 383 } 384 385 unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode, 386 const TargetRegisterClass *RC, 387 uint64_t Imm) { 388 unsigned ResultReg = createResultReg(RC); 389 const MCInstrDesc &II = TII.get(MachineInstOpcode); 390 391 if (II.getNumDefs() >= 1) { 392 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 393 ResultReg).addImm(Imm)); 394 } else { 395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 396 .addImm(Imm)); 397 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 398 TII.get(TargetOpcode::COPY), ResultReg) 399 .addReg(II.ImplicitDefs[0])); 400 } 401 return ResultReg; 402 } 403 404 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 405 // checks from the various callers. 406 unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 407 if (VT == MVT::f64) return 0; 408 409 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 410 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 411 TII.get(ARM::VMOVSR), MoveReg) 412 .addReg(SrcReg)); 413 return MoveReg; 414 } 415 416 unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 417 if (VT == MVT::i64) return 0; 418 419 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 420 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 421 TII.get(ARM::VMOVRS), MoveReg) 422 .addReg(SrcReg)); 423 return MoveReg; 424 } 425 426 // For double width floating point we need to materialize two constants 427 // (the high and the low) into integer registers then use a move to get 428 // the combined constant into an FP reg. 429 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 430 const APFloat Val = CFP->getValueAPF(); 431 bool is64bit = VT == MVT::f64; 432 433 // This checks to see if we can use VFP3 instructions to materialize 434 // a constant, otherwise we have to go through the constant pool. 435 if (TLI.isFPImmLegal(Val, VT)) { 436 int Imm; 437 unsigned Opc; 438 if (is64bit) { 439 Imm = ARM_AM::getFP64Imm(Val); 440 Opc = ARM::FCONSTD; 441 } else { 442 Imm = ARM_AM::getFP32Imm(Val); 443 Opc = ARM::FCONSTS; 444 } 445 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 446 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 447 TII.get(Opc), DestReg).addImm(Imm)); 448 return DestReg; 449 } 450 451 // Require VFP2 for loading fp constants. 452 if (!Subtarget->hasVFP2()) return false; 453 454 // MachineConstantPool wants an explicit alignment. 455 unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); 456 if (Align == 0) { 457 // TODO: Figure out if this is correct. 458 Align = DL.getTypeAllocSize(CFP->getType()); 459 } 460 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 461 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 462 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 463 464 // The extra reg is for addrmode5. 465 AddOptionalDefs( 466 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 467 .addConstantPoolIndex(Idx) 468 .addReg(0)); 469 return DestReg; 470 } 471 472 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 473 474 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 475 return 0; 476 477 // If we can do this in a single instruction without a constant pool entry 478 // do so now. 479 const ConstantInt *CI = cast<ConstantInt>(C); 480 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 481 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 482 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 483 &ARM::GPRRegClass; 484 unsigned ImmReg = createResultReg(RC); 485 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 486 TII.get(Opc), ImmReg) 487 .addImm(CI->getZExtValue())); 488 return ImmReg; 489 } 490 491 // Use MVN to emit negative constants. 492 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 493 unsigned Imm = (unsigned)~(CI->getSExtValue()); 494 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 495 (ARM_AM::getSOImmVal(Imm) != -1); 496 if (UseImm) { 497 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 498 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 499 &ARM::GPRRegClass; 500 unsigned ImmReg = createResultReg(RC); 501 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 502 TII.get(Opc), ImmReg) 503 .addImm(Imm)); 504 return ImmReg; 505 } 506 } 507 508 unsigned ResultReg = 0; 509 if (Subtarget->useMovt(*FuncInfo.MF)) 510 ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 511 512 if (ResultReg) 513 return ResultReg; 514 515 // Load from constant pool. For now 32-bit only. 516 if (VT != MVT::i32) 517 return 0; 518 519 // MachineConstantPool wants an explicit alignment. 520 unsigned Align = DL.getPrefTypeAlignment(C->getType()); 521 if (Align == 0) { 522 // TODO: Figure out if this is correct. 523 Align = DL.getTypeAllocSize(C->getType()); 524 } 525 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 526 ResultReg = createResultReg(TLI.getRegClassFor(VT)); 527 if (isThumb2) 528 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 529 TII.get(ARM::t2LDRpci), ResultReg) 530 .addConstantPoolIndex(Idx)); 531 else { 532 // The extra immediate is for addrmode2. 533 ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0); 534 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 535 TII.get(ARM::LDRcp), ResultReg) 536 .addConstantPoolIndex(Idx) 537 .addImm(0)); 538 } 539 return ResultReg; 540 } 541 542 bool ARMFastISel::isPositionIndependent() const { 543 return TLI.isPositionIndependent(); 544 } 545 546 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 547 // For now 32-bit only. 548 if (VT != MVT::i32 || GV->isThreadLocal()) return 0; 549 550 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV); 551 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass 552 : &ARM::GPRRegClass; 553 unsigned DestReg = createResultReg(RC); 554 555 // FastISel TLS support on non-MachO is broken, punt to SelectionDAG. 556 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 557 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 558 if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0; 559 560 bool IsPositionIndependent = isPositionIndependent(); 561 // Use movw+movt when possible, it avoids constant pool entries. 562 // Non-darwin targets only support static movt relocations in FastISel. 563 if (Subtarget->useMovt(*FuncInfo.MF) && 564 (Subtarget->isTargetMachO() || !IsPositionIndependent)) { 565 unsigned Opc; 566 unsigned char TF = 0; 567 if (Subtarget->isTargetMachO()) 568 TF = ARMII::MO_NONLAZY; 569 570 if (IsPositionIndependent) 571 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 572 else 573 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 574 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 575 TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); 576 } else { 577 // MachineConstantPool wants an explicit alignment. 578 unsigned Align = DL.getPrefTypeAlignment(GV->getType()); 579 if (Align == 0) { 580 // TODO: Figure out if this is correct. 581 Align = DL.getTypeAllocSize(GV->getType()); 582 } 583 584 if (Subtarget->isTargetELF() && IsPositionIndependent) 585 return ARMLowerPICELF(GV, Align, VT); 586 587 // Grab index. 588 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; 589 unsigned Id = AFI->createPICLabelUId(); 590 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 591 ARMCP::CPValue, 592 PCAdj); 593 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 594 595 // Load value. 596 MachineInstrBuilder MIB; 597 if (isThumb2) { 598 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci; 599 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), 600 DestReg).addConstantPoolIndex(Idx); 601 if (IsPositionIndependent) 602 MIB.addImm(Id); 603 AddOptionalDefs(MIB); 604 } else { 605 // The extra immediate is for addrmode2. 606 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 607 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 608 TII.get(ARM::LDRcp), DestReg) 609 .addConstantPoolIndex(Idx) 610 .addImm(0); 611 AddOptionalDefs(MIB); 612 613 if (IsPositionIndependent) { 614 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 615 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 616 617 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 618 DbgLoc, TII.get(Opc), NewDestReg) 619 .addReg(DestReg) 620 .addImm(Id); 621 AddOptionalDefs(MIB); 622 return NewDestReg; 623 } 624 } 625 } 626 627 if (IsIndirect) { 628 MachineInstrBuilder MIB; 629 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 630 if (isThumb2) 631 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 632 TII.get(ARM::t2LDRi12), NewDestReg) 633 .addReg(DestReg) 634 .addImm(0); 635 else 636 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 637 TII.get(ARM::LDRi12), NewDestReg) 638 .addReg(DestReg) 639 .addImm(0); 640 DestReg = NewDestReg; 641 AddOptionalDefs(MIB); 642 } 643 644 return DestReg; 645 } 646 647 unsigned ARMFastISel::fastMaterializeConstant(const Constant *C) { 648 EVT CEVT = TLI.getValueType(DL, C->getType(), true); 649 650 // Only handle simple types. 651 if (!CEVT.isSimple()) return 0; 652 MVT VT = CEVT.getSimpleVT(); 653 654 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 655 return ARMMaterializeFP(CFP, VT); 656 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 657 return ARMMaterializeGV(GV, VT); 658 else if (isa<ConstantInt>(C)) 659 return ARMMaterializeInt(C, VT); 660 661 return 0; 662 } 663 664 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 665 666 unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) { 667 // Don't handle dynamic allocas. 668 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 669 670 MVT VT; 671 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 672 673 DenseMap<const AllocaInst*, int>::iterator SI = 674 FuncInfo.StaticAllocaMap.find(AI); 675 676 // This will get lowered later into the correct offsets and registers 677 // via rewriteXFrameIndex. 678 if (SI != FuncInfo.StaticAllocaMap.end()) { 679 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 680 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 681 unsigned ResultReg = createResultReg(RC); 682 ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0); 683 684 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 685 TII.get(Opc), ResultReg) 686 .addFrameIndex(SI->second) 687 .addImm(0)); 688 return ResultReg; 689 } 690 691 return 0; 692 } 693 694 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 695 EVT evt = TLI.getValueType(DL, Ty, true); 696 697 // Only handle simple types. 698 if (evt == MVT::Other || !evt.isSimple()) return false; 699 VT = evt.getSimpleVT(); 700 701 // Handle all legal types, i.e. a register that will directly hold this 702 // value. 703 return TLI.isTypeLegal(VT); 704 } 705 706 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 707 if (isTypeLegal(Ty, VT)) return true; 708 709 // If this is a type than can be sign or zero-extended to a basic operation 710 // go ahead and accept it now. 711 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 712 return true; 713 714 return false; 715 } 716 717 // Computes the address to get to an object. 718 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 719 // Some boilerplate from the X86 FastISel. 720 const User *U = nullptr; 721 unsigned Opcode = Instruction::UserOp1; 722 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 723 // Don't walk into other basic blocks unless the object is an alloca from 724 // another block, otherwise it may not have a virtual register assigned. 725 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 726 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 727 Opcode = I->getOpcode(); 728 U = I; 729 } 730 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 731 Opcode = C->getOpcode(); 732 U = C; 733 } 734 735 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 736 if (Ty->getAddressSpace() > 255) 737 // Fast instruction selection doesn't support the special 738 // address spaces. 739 return false; 740 741 switch (Opcode) { 742 default: 743 break; 744 case Instruction::BitCast: 745 // Look through bitcasts. 746 return ARMComputeAddress(U->getOperand(0), Addr); 747 case Instruction::IntToPtr: 748 // Look past no-op inttoptrs. 749 if (TLI.getValueType(DL, U->getOperand(0)->getType()) == 750 TLI.getPointerTy(DL)) 751 return ARMComputeAddress(U->getOperand(0), Addr); 752 break; 753 case Instruction::PtrToInt: 754 // Look past no-op ptrtoints. 755 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)) 756 return ARMComputeAddress(U->getOperand(0), Addr); 757 break; 758 case Instruction::GetElementPtr: { 759 Address SavedAddr = Addr; 760 int TmpOffset = Addr.Offset; 761 762 // Iterate through the GEP folding the constants into offsets where 763 // we can. 764 gep_type_iterator GTI = gep_type_begin(U); 765 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 766 i != e; ++i, ++GTI) { 767 const Value *Op = *i; 768 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 769 const StructLayout *SL = DL.getStructLayout(STy); 770 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 771 TmpOffset += SL->getElementOffset(Idx); 772 } else { 773 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); 774 for (;;) { 775 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 776 // Constant-offset addressing. 777 TmpOffset += CI->getSExtValue() * S; 778 break; 779 } 780 if (canFoldAddIntoGEP(U, Op)) { 781 // A compatible add with a constant operand. Fold the constant. 782 ConstantInt *CI = 783 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 784 TmpOffset += CI->getSExtValue() * S; 785 // Iterate on the other operand. 786 Op = cast<AddOperator>(Op)->getOperand(0); 787 continue; 788 } 789 // Unsupported 790 goto unsupported_gep; 791 } 792 } 793 } 794 795 // Try to grab the base operand now. 796 Addr.Offset = TmpOffset; 797 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 798 799 // We failed, restore everything and try the other options. 800 Addr = SavedAddr; 801 802 unsupported_gep: 803 break; 804 } 805 case Instruction::Alloca: { 806 const AllocaInst *AI = cast<AllocaInst>(Obj); 807 DenseMap<const AllocaInst*, int>::iterator SI = 808 FuncInfo.StaticAllocaMap.find(AI); 809 if (SI != FuncInfo.StaticAllocaMap.end()) { 810 Addr.BaseType = Address::FrameIndexBase; 811 Addr.Base.FI = SI->second; 812 return true; 813 } 814 break; 815 } 816 } 817 818 // Try to get this in a register if nothing else has worked. 819 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 820 return Addr.Base.Reg != 0; 821 } 822 823 void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 824 bool needsLowering = false; 825 switch (VT.SimpleTy) { 826 default: llvm_unreachable("Unhandled load/store type!"); 827 case MVT::i1: 828 case MVT::i8: 829 case MVT::i16: 830 case MVT::i32: 831 if (!useAM3) { 832 // Integer loads/stores handle 12-bit offsets. 833 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 834 // Handle negative offsets. 835 if (needsLowering && isThumb2) 836 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 837 Addr.Offset > -256); 838 } else { 839 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 840 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 841 } 842 break; 843 case MVT::f32: 844 case MVT::f64: 845 // Floating point operands handle 8-bit offsets. 846 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 847 break; 848 } 849 850 // If this is a stack pointer and the offset needs to be simplified then 851 // put the alloca address into a register, set the base type back to 852 // register and continue. This should almost never happen. 853 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 854 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass 855 : &ARM::GPRRegClass; 856 unsigned ResultReg = createResultReg(RC); 857 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 858 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 859 TII.get(Opc), ResultReg) 860 .addFrameIndex(Addr.Base.FI) 861 .addImm(0)); 862 Addr.Base.Reg = ResultReg; 863 Addr.BaseType = Address::RegBase; 864 } 865 866 // Since the offset is too large for the load/store instruction 867 // get the reg+offset into a register. 868 if (needsLowering) { 869 Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 870 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 871 Addr.Offset = 0; 872 } 873 } 874 875 void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 876 const MachineInstrBuilder &MIB, 877 unsigned Flags, bool useAM3) { 878 // addrmode5 output depends on the selection dag addressing dividing the 879 // offset by 4 that it then later multiplies. Do this here as well. 880 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 881 Addr.Offset /= 4; 882 883 // Frame base works a bit differently. Handle it separately. 884 if (Addr.BaseType == Address::FrameIndexBase) { 885 int FI = Addr.Base.FI; 886 int Offset = Addr.Offset; 887 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( 888 MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags, 889 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 890 // Now add the rest of the operands. 891 MIB.addFrameIndex(FI); 892 893 // ARM halfword load/stores and signed byte loads need an additional 894 // operand. 895 if (useAM3) { 896 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 897 MIB.addReg(0); 898 MIB.addImm(Imm); 899 } else { 900 MIB.addImm(Addr.Offset); 901 } 902 MIB.addMemOperand(MMO); 903 } else { 904 // Now add the rest of the operands. 905 MIB.addReg(Addr.Base.Reg); 906 907 // ARM halfword load/stores and signed byte loads need an additional 908 // operand. 909 if (useAM3) { 910 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 911 MIB.addReg(0); 912 MIB.addImm(Imm); 913 } else { 914 MIB.addImm(Addr.Offset); 915 } 916 } 917 AddOptionalDefs(MIB); 918 } 919 920 bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 921 unsigned Alignment, bool isZExt, bool allocReg) { 922 unsigned Opc; 923 bool useAM3 = false; 924 bool needVMOV = false; 925 const TargetRegisterClass *RC; 926 switch (VT.SimpleTy) { 927 // This is mostly going to be Neon/vector support. 928 default: return false; 929 case MVT::i1: 930 case MVT::i8: 931 if (isThumb2) { 932 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 933 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 934 else 935 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 936 } else { 937 if (isZExt) { 938 Opc = ARM::LDRBi12; 939 } else { 940 Opc = ARM::LDRSB; 941 useAM3 = true; 942 } 943 } 944 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 945 break; 946 case MVT::i16: 947 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 948 return false; 949 950 if (isThumb2) { 951 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 952 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 953 else 954 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 955 } else { 956 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 957 useAM3 = true; 958 } 959 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 960 break; 961 case MVT::i32: 962 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 963 return false; 964 965 if (isThumb2) { 966 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 967 Opc = ARM::t2LDRi8; 968 else 969 Opc = ARM::t2LDRi12; 970 } else { 971 Opc = ARM::LDRi12; 972 } 973 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 974 break; 975 case MVT::f32: 976 if (!Subtarget->hasVFP2()) return false; 977 // Unaligned loads need special handling. Floats require word-alignment. 978 if (Alignment && Alignment < 4) { 979 needVMOV = true; 980 VT = MVT::i32; 981 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 982 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 983 } else { 984 Opc = ARM::VLDRS; 985 RC = TLI.getRegClassFor(VT); 986 } 987 break; 988 case MVT::f64: 989 if (!Subtarget->hasVFP2()) return false; 990 // FIXME: Unaligned loads need special handling. Doublewords require 991 // word-alignment. 992 if (Alignment && Alignment < 4) 993 return false; 994 995 Opc = ARM::VLDRD; 996 RC = TLI.getRegClassFor(VT); 997 break; 998 } 999 // Simplify this down to something we can handle. 1000 ARMSimplifyAddress(Addr, VT, useAM3); 1001 1002 // Create the base instruction, then add the operands. 1003 if (allocReg) 1004 ResultReg = createResultReg(RC); 1005 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1006 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1007 TII.get(Opc), ResultReg); 1008 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1009 1010 // If we had an unaligned load of a float we've converted it to an regular 1011 // load. Now we must move from the GRP to the FP register. 1012 if (needVMOV) { 1013 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1014 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1015 TII.get(ARM::VMOVSR), MoveReg) 1016 .addReg(ResultReg)); 1017 ResultReg = MoveReg; 1018 } 1019 return true; 1020 } 1021 1022 bool ARMFastISel::SelectLoad(const Instruction *I) { 1023 // Atomic loads need special handling. 1024 if (cast<LoadInst>(I)->isAtomic()) 1025 return false; 1026 1027 const Value *SV = I->getOperand(0); 1028 if (TLI.supportSwiftError()) { 1029 // Swifterror values can come from either a function parameter with 1030 // swifterror attribute or an alloca with swifterror attribute. 1031 if (const Argument *Arg = dyn_cast<Argument>(SV)) { 1032 if (Arg->hasSwiftErrorAttr()) 1033 return false; 1034 } 1035 1036 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) { 1037 if (Alloca->isSwiftError()) 1038 return false; 1039 } 1040 } 1041 1042 // Verify we have a legal type before going any further. 1043 MVT VT; 1044 if (!isLoadTypeLegal(I->getType(), VT)) 1045 return false; 1046 1047 // See if we can handle this address. 1048 Address Addr; 1049 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1050 1051 unsigned ResultReg; 1052 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1053 return false; 1054 updateValueMap(I, ResultReg); 1055 return true; 1056 } 1057 1058 bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1059 unsigned Alignment) { 1060 unsigned StrOpc; 1061 bool useAM3 = false; 1062 switch (VT.SimpleTy) { 1063 // This is mostly going to be Neon/vector support. 1064 default: return false; 1065 case MVT::i1: { 1066 unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass 1067 : &ARM::GPRRegClass); 1068 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1069 SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); 1070 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1071 TII.get(Opc), Res) 1072 .addReg(SrcReg).addImm(1)); 1073 SrcReg = Res; 1074 } // Fallthrough here. 1075 case MVT::i8: 1076 if (isThumb2) { 1077 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1078 StrOpc = ARM::t2STRBi8; 1079 else 1080 StrOpc = ARM::t2STRBi12; 1081 } else { 1082 StrOpc = ARM::STRBi12; 1083 } 1084 break; 1085 case MVT::i16: 1086 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1087 return false; 1088 1089 if (isThumb2) { 1090 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1091 StrOpc = ARM::t2STRHi8; 1092 else 1093 StrOpc = ARM::t2STRHi12; 1094 } else { 1095 StrOpc = ARM::STRH; 1096 useAM3 = true; 1097 } 1098 break; 1099 case MVT::i32: 1100 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1101 return false; 1102 1103 if (isThumb2) { 1104 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1105 StrOpc = ARM::t2STRi8; 1106 else 1107 StrOpc = ARM::t2STRi12; 1108 } else { 1109 StrOpc = ARM::STRi12; 1110 } 1111 break; 1112 case MVT::f32: 1113 if (!Subtarget->hasVFP2()) return false; 1114 // Unaligned stores need special handling. Floats require word-alignment. 1115 if (Alignment && Alignment < 4) { 1116 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1117 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1118 TII.get(ARM::VMOVRS), MoveReg) 1119 .addReg(SrcReg)); 1120 SrcReg = MoveReg; 1121 VT = MVT::i32; 1122 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1123 } else { 1124 StrOpc = ARM::VSTRS; 1125 } 1126 break; 1127 case MVT::f64: 1128 if (!Subtarget->hasVFP2()) return false; 1129 // FIXME: Unaligned stores need special handling. Doublewords require 1130 // word-alignment. 1131 if (Alignment && Alignment < 4) 1132 return false; 1133 1134 StrOpc = ARM::VSTRD; 1135 break; 1136 } 1137 // Simplify this down to something we can handle. 1138 ARMSimplifyAddress(Addr, VT, useAM3); 1139 1140 // Create the base instruction, then add the operands. 1141 SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); 1142 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1143 TII.get(StrOpc)) 1144 .addReg(SrcReg); 1145 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1146 return true; 1147 } 1148 1149 bool ARMFastISel::SelectStore(const Instruction *I) { 1150 Value *Op0 = I->getOperand(0); 1151 unsigned SrcReg = 0; 1152 1153 // Atomic stores need special handling. 1154 if (cast<StoreInst>(I)->isAtomic()) 1155 return false; 1156 1157 const Value *PtrV = I->getOperand(1); 1158 if (TLI.supportSwiftError()) { 1159 // Swifterror values can come from either a function parameter with 1160 // swifterror attribute or an alloca with swifterror attribute. 1161 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) { 1162 if (Arg->hasSwiftErrorAttr()) 1163 return false; 1164 } 1165 1166 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) { 1167 if (Alloca->isSwiftError()) 1168 return false; 1169 } 1170 } 1171 1172 // Verify we have a legal type before going any further. 1173 MVT VT; 1174 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1175 return false; 1176 1177 // Get the value to be stored into a register. 1178 SrcReg = getRegForValue(Op0); 1179 if (SrcReg == 0) return false; 1180 1181 // See if we can handle this address. 1182 Address Addr; 1183 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1184 return false; 1185 1186 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1187 return false; 1188 return true; 1189 } 1190 1191 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1192 switch (Pred) { 1193 // Needs two compares... 1194 case CmpInst::FCMP_ONE: 1195 case CmpInst::FCMP_UEQ: 1196 default: 1197 // AL is our "false" for now. The other two need more compares. 1198 return ARMCC::AL; 1199 case CmpInst::ICMP_EQ: 1200 case CmpInst::FCMP_OEQ: 1201 return ARMCC::EQ; 1202 case CmpInst::ICMP_SGT: 1203 case CmpInst::FCMP_OGT: 1204 return ARMCC::GT; 1205 case CmpInst::ICMP_SGE: 1206 case CmpInst::FCMP_OGE: 1207 return ARMCC::GE; 1208 case CmpInst::ICMP_UGT: 1209 case CmpInst::FCMP_UGT: 1210 return ARMCC::HI; 1211 case CmpInst::FCMP_OLT: 1212 return ARMCC::MI; 1213 case CmpInst::ICMP_ULE: 1214 case CmpInst::FCMP_OLE: 1215 return ARMCC::LS; 1216 case CmpInst::FCMP_ORD: 1217 return ARMCC::VC; 1218 case CmpInst::FCMP_UNO: 1219 return ARMCC::VS; 1220 case CmpInst::FCMP_UGE: 1221 return ARMCC::PL; 1222 case CmpInst::ICMP_SLT: 1223 case CmpInst::FCMP_ULT: 1224 return ARMCC::LT; 1225 case CmpInst::ICMP_SLE: 1226 case CmpInst::FCMP_ULE: 1227 return ARMCC::LE; 1228 case CmpInst::FCMP_UNE: 1229 case CmpInst::ICMP_NE: 1230 return ARMCC::NE; 1231 case CmpInst::ICMP_UGE: 1232 return ARMCC::HS; 1233 case CmpInst::ICMP_ULT: 1234 return ARMCC::LO; 1235 } 1236 } 1237 1238 bool ARMFastISel::SelectBranch(const Instruction *I) { 1239 const BranchInst *BI = cast<BranchInst>(I); 1240 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1241 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1242 1243 // Simple branch support. 1244 1245 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1246 // behavior. 1247 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1248 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1249 1250 // Get the compare predicate. 1251 // Try to take advantage of fallthrough opportunities. 1252 CmpInst::Predicate Predicate = CI->getPredicate(); 1253 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1254 std::swap(TBB, FBB); 1255 Predicate = CmpInst::getInversePredicate(Predicate); 1256 } 1257 1258 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1259 1260 // We may not handle every CC for now. 1261 if (ARMPred == ARMCC::AL) return false; 1262 1263 // Emit the compare. 1264 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1265 return false; 1266 1267 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1268 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1269 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1270 finishCondBranch(BI->getParent(), TBB, FBB); 1271 return true; 1272 } 1273 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1274 MVT SourceVT; 1275 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1276 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1277 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1278 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1279 OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); 1280 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1281 TII.get(TstOpc)) 1282 .addReg(OpReg).addImm(1)); 1283 1284 unsigned CCMode = ARMCC::NE; 1285 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1286 std::swap(TBB, FBB); 1287 CCMode = ARMCC::EQ; 1288 } 1289 1290 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1291 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1292 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1293 1294 finishCondBranch(BI->getParent(), TBB, FBB); 1295 return true; 1296 } 1297 } else if (const ConstantInt *CI = 1298 dyn_cast<ConstantInt>(BI->getCondition())) { 1299 uint64_t Imm = CI->getZExtValue(); 1300 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1301 fastEmitBranch(Target, DbgLoc); 1302 return true; 1303 } 1304 1305 unsigned CmpReg = getRegForValue(BI->getCondition()); 1306 if (CmpReg == 0) return false; 1307 1308 // We've been divorced from our compare! Our block was split, and 1309 // now our compare lives in a predecessor block. We musn't 1310 // re-compare here, as the children of the compare aren't guaranteed 1311 // live across the block boundary (we *could* check for this). 1312 // Regardless, the compare has been done in the predecessor block, 1313 // and it left a value for us in a virtual register. Ergo, we test 1314 // the one-bit value left in the virtual register. 1315 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1316 CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); 1317 AddOptionalDefs( 1318 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1319 .addReg(CmpReg) 1320 .addImm(1)); 1321 1322 unsigned CCMode = ARMCC::NE; 1323 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1324 std::swap(TBB, FBB); 1325 CCMode = ARMCC::EQ; 1326 } 1327 1328 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1329 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1330 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1331 finishCondBranch(BI->getParent(), TBB, FBB); 1332 return true; 1333 } 1334 1335 bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1336 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1337 if (AddrReg == 0) return false; 1338 1339 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1340 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1341 TII.get(Opc)).addReg(AddrReg)); 1342 1343 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1344 for (const BasicBlock *SuccBB : IB->successors()) 1345 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]); 1346 1347 return true; 1348 } 1349 1350 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1351 bool isZExt) { 1352 Type *Ty = Src1Value->getType(); 1353 EVT SrcEVT = TLI.getValueType(DL, Ty, true); 1354 if (!SrcEVT.isSimple()) return false; 1355 MVT SrcVT = SrcEVT.getSimpleVT(); 1356 1357 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1358 if (isFloat && !Subtarget->hasVFP2()) 1359 return false; 1360 1361 // Check to see if the 2nd operand is a constant that we can encode directly 1362 // in the compare. 1363 int Imm = 0; 1364 bool UseImm = false; 1365 bool isNegativeImm = false; 1366 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1367 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1368 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1369 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1370 SrcVT == MVT::i1) { 1371 const APInt &CIVal = ConstInt->getValue(); 1372 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1373 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1374 // then a cmn, because there is no way to represent 2147483648 as a 1375 // signed 32-bit int. 1376 if (Imm < 0 && Imm != (int)0x80000000) { 1377 isNegativeImm = true; 1378 Imm = -Imm; 1379 } 1380 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1381 (ARM_AM::getSOImmVal(Imm) != -1); 1382 } 1383 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1384 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1385 if (ConstFP->isZero() && !ConstFP->isNegative()) 1386 UseImm = true; 1387 } 1388 1389 unsigned CmpOpc; 1390 bool isICmp = true; 1391 bool needsExt = false; 1392 switch (SrcVT.SimpleTy) { 1393 default: return false; 1394 // TODO: Verify compares. 1395 case MVT::f32: 1396 isICmp = false; 1397 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1398 break; 1399 case MVT::f64: 1400 isICmp = false; 1401 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1402 break; 1403 case MVT::i1: 1404 case MVT::i8: 1405 case MVT::i16: 1406 needsExt = true; 1407 // Intentional fall-through. 1408 case MVT::i32: 1409 if (isThumb2) { 1410 if (!UseImm) 1411 CmpOpc = ARM::t2CMPrr; 1412 else 1413 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1414 } else { 1415 if (!UseImm) 1416 CmpOpc = ARM::CMPrr; 1417 else 1418 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1419 } 1420 break; 1421 } 1422 1423 unsigned SrcReg1 = getRegForValue(Src1Value); 1424 if (SrcReg1 == 0) return false; 1425 1426 unsigned SrcReg2 = 0; 1427 if (!UseImm) { 1428 SrcReg2 = getRegForValue(Src2Value); 1429 if (SrcReg2 == 0) return false; 1430 } 1431 1432 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1433 if (needsExt) { 1434 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1435 if (SrcReg1 == 0) return false; 1436 if (!UseImm) { 1437 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1438 if (SrcReg2 == 0) return false; 1439 } 1440 } 1441 1442 const MCInstrDesc &II = TII.get(CmpOpc); 1443 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); 1444 if (!UseImm) { 1445 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); 1446 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1447 .addReg(SrcReg1).addReg(SrcReg2)); 1448 } else { 1449 MachineInstrBuilder MIB; 1450 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1451 .addReg(SrcReg1); 1452 1453 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1454 if (isICmp) 1455 MIB.addImm(Imm); 1456 AddOptionalDefs(MIB); 1457 } 1458 1459 // For floating point we need to move the result to a comparison register 1460 // that we can then use for branches. 1461 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1462 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1463 TII.get(ARM::FMSTAT))); 1464 return true; 1465 } 1466 1467 bool ARMFastISel::SelectCmp(const Instruction *I) { 1468 const CmpInst *CI = cast<CmpInst>(I); 1469 1470 // Get the compare predicate. 1471 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1472 1473 // We may not handle every CC for now. 1474 if (ARMPred == ARMCC::AL) return false; 1475 1476 // Emit the compare. 1477 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1478 return false; 1479 1480 // Now set a register based on the comparison. Explicitly set the predicates 1481 // here. 1482 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1483 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass 1484 : &ARM::GPRRegClass; 1485 unsigned DestReg = createResultReg(RC); 1486 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1487 unsigned ZeroReg = fastMaterializeConstant(Zero); 1488 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1489 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg) 1490 .addReg(ZeroReg).addImm(1) 1491 .addImm(ARMPred).addReg(ARM::CPSR); 1492 1493 updateValueMap(I, DestReg); 1494 return true; 1495 } 1496 1497 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1498 // Make sure we have VFP and that we're extending float to double. 1499 if (!Subtarget->hasVFP2()) return false; 1500 1501 Value *V = I->getOperand(0); 1502 if (!I->getType()->isDoubleTy() || 1503 !V->getType()->isFloatTy()) return false; 1504 1505 unsigned Op = getRegForValue(V); 1506 if (Op == 0) return false; 1507 1508 unsigned Result = createResultReg(&ARM::DPRRegClass); 1509 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1510 TII.get(ARM::VCVTDS), Result) 1511 .addReg(Op)); 1512 updateValueMap(I, Result); 1513 return true; 1514 } 1515 1516 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1517 // Make sure we have VFP and that we're truncating double to float. 1518 if (!Subtarget->hasVFP2()) return false; 1519 1520 Value *V = I->getOperand(0); 1521 if (!(I->getType()->isFloatTy() && 1522 V->getType()->isDoubleTy())) return false; 1523 1524 unsigned Op = getRegForValue(V); 1525 if (Op == 0) return false; 1526 1527 unsigned Result = createResultReg(&ARM::SPRRegClass); 1528 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1529 TII.get(ARM::VCVTSD), Result) 1530 .addReg(Op)); 1531 updateValueMap(I, Result); 1532 return true; 1533 } 1534 1535 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1536 // Make sure we have VFP. 1537 if (!Subtarget->hasVFP2()) return false; 1538 1539 MVT DstVT; 1540 Type *Ty = I->getType(); 1541 if (!isTypeLegal(Ty, DstVT)) 1542 return false; 1543 1544 Value *Src = I->getOperand(0); 1545 EVT SrcEVT = TLI.getValueType(DL, Src->getType(), true); 1546 if (!SrcEVT.isSimple()) 1547 return false; 1548 MVT SrcVT = SrcEVT.getSimpleVT(); 1549 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1550 return false; 1551 1552 unsigned SrcReg = getRegForValue(Src); 1553 if (SrcReg == 0) return false; 1554 1555 // Handle sign-extension. 1556 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1557 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1558 /*isZExt*/!isSigned); 1559 if (SrcReg == 0) return false; 1560 } 1561 1562 // The conversion routine works on fp-reg to fp-reg and the operand above 1563 // was an integer, move it to the fp registers if possible. 1564 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1565 if (FP == 0) return false; 1566 1567 unsigned Opc; 1568 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1569 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1570 else return false; 1571 1572 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1573 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1574 TII.get(Opc), ResultReg).addReg(FP)); 1575 updateValueMap(I, ResultReg); 1576 return true; 1577 } 1578 1579 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1580 // Make sure we have VFP. 1581 if (!Subtarget->hasVFP2()) return false; 1582 1583 MVT DstVT; 1584 Type *RetTy = I->getType(); 1585 if (!isTypeLegal(RetTy, DstVT)) 1586 return false; 1587 1588 unsigned Op = getRegForValue(I->getOperand(0)); 1589 if (Op == 0) return false; 1590 1591 unsigned Opc; 1592 Type *OpTy = I->getOperand(0)->getType(); 1593 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1594 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1595 else return false; 1596 1597 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1598 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1599 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1600 TII.get(Opc), ResultReg).addReg(Op)); 1601 1602 // This result needs to be in an integer register, but the conversion only 1603 // takes place in fp-regs. 1604 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1605 if (IntReg == 0) return false; 1606 1607 updateValueMap(I, IntReg); 1608 return true; 1609 } 1610 1611 bool ARMFastISel::SelectSelect(const Instruction *I) { 1612 MVT VT; 1613 if (!isTypeLegal(I->getType(), VT)) 1614 return false; 1615 1616 // Things need to be register sized for register moves. 1617 if (VT != MVT::i32) return false; 1618 1619 unsigned CondReg = getRegForValue(I->getOperand(0)); 1620 if (CondReg == 0) return false; 1621 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1622 if (Op1Reg == 0) return false; 1623 1624 // Check to see if we can use an immediate in the conditional move. 1625 int Imm = 0; 1626 bool UseImm = false; 1627 bool isNegativeImm = false; 1628 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1629 assert (VT == MVT::i32 && "Expecting an i32."); 1630 Imm = (int)ConstInt->getValue().getZExtValue(); 1631 if (Imm < 0) { 1632 isNegativeImm = true; 1633 Imm = ~Imm; 1634 } 1635 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1636 (ARM_AM::getSOImmVal(Imm) != -1); 1637 } 1638 1639 unsigned Op2Reg = 0; 1640 if (!UseImm) { 1641 Op2Reg = getRegForValue(I->getOperand(2)); 1642 if (Op2Reg == 0) return false; 1643 } 1644 1645 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1646 CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0); 1647 AddOptionalDefs( 1648 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1649 .addReg(CondReg) 1650 .addImm(1)); 1651 1652 unsigned MovCCOpc; 1653 const TargetRegisterClass *RC; 1654 if (!UseImm) { 1655 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1656 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1657 } else { 1658 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1659 if (!isNegativeImm) 1660 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1661 else 1662 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1663 } 1664 unsigned ResultReg = createResultReg(RC); 1665 if (!UseImm) { 1666 Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); 1667 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); 1668 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1669 ResultReg) 1670 .addReg(Op2Reg) 1671 .addReg(Op1Reg) 1672 .addImm(ARMCC::NE) 1673 .addReg(ARM::CPSR); 1674 } else { 1675 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); 1676 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1677 ResultReg) 1678 .addReg(Op1Reg) 1679 .addImm(Imm) 1680 .addImm(ARMCC::EQ) 1681 .addReg(ARM::CPSR); 1682 } 1683 updateValueMap(I, ResultReg); 1684 return true; 1685 } 1686 1687 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1688 MVT VT; 1689 Type *Ty = I->getType(); 1690 if (!isTypeLegal(Ty, VT)) 1691 return false; 1692 1693 // If we have integer div support we should have selected this automagically. 1694 // In case we have a real miss go ahead and return false and we'll pick 1695 // it up later. 1696 if (Subtarget->hasDivide()) return false; 1697 1698 // Otherwise emit a libcall. 1699 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1700 if (VT == MVT::i8) 1701 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1702 else if (VT == MVT::i16) 1703 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1704 else if (VT == MVT::i32) 1705 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1706 else if (VT == MVT::i64) 1707 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1708 else if (VT == MVT::i128) 1709 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1710 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1711 1712 return ARMEmitLibcall(I, LC); 1713 } 1714 1715 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1716 MVT VT; 1717 Type *Ty = I->getType(); 1718 if (!isTypeLegal(Ty, VT)) 1719 return false; 1720 1721 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1722 if (VT == MVT::i8) 1723 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1724 else if (VT == MVT::i16) 1725 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1726 else if (VT == MVT::i32) 1727 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1728 else if (VT == MVT::i64) 1729 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1730 else if (VT == MVT::i128) 1731 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1732 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1733 1734 return ARMEmitLibcall(I, LC); 1735 } 1736 1737 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1738 EVT DestVT = TLI.getValueType(DL, I->getType(), true); 1739 1740 // We can get here in the case when we have a binary operation on a non-legal 1741 // type and the target independent selector doesn't know how to handle it. 1742 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1743 return false; 1744 1745 unsigned Opc; 1746 switch (ISDOpcode) { 1747 default: return false; 1748 case ISD::ADD: 1749 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1750 break; 1751 case ISD::OR: 1752 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1753 break; 1754 case ISD::SUB: 1755 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1756 break; 1757 } 1758 1759 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1760 if (SrcReg1 == 0) return false; 1761 1762 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1763 // in the instruction, rather then materializing the value in a register. 1764 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1765 if (SrcReg2 == 0) return false; 1766 1767 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 1768 SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); 1769 SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); 1770 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1771 TII.get(Opc), ResultReg) 1772 .addReg(SrcReg1).addReg(SrcReg2)); 1773 updateValueMap(I, ResultReg); 1774 return true; 1775 } 1776 1777 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1778 EVT FPVT = TLI.getValueType(DL, I->getType(), true); 1779 if (!FPVT.isSimple()) return false; 1780 MVT VT = FPVT.getSimpleVT(); 1781 1782 // FIXME: Support vector types where possible. 1783 if (VT.isVector()) 1784 return false; 1785 1786 // We can get here in the case when we want to use NEON for our fp 1787 // operations, but can't figure out how to. Just use the vfp instructions 1788 // if we have them. 1789 // FIXME: It'd be nice to use NEON instructions. 1790 Type *Ty = I->getType(); 1791 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1792 if (isFloat && !Subtarget->hasVFP2()) 1793 return false; 1794 1795 unsigned Opc; 1796 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1797 switch (ISDOpcode) { 1798 default: return false; 1799 case ISD::FADD: 1800 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1801 break; 1802 case ISD::FSUB: 1803 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1804 break; 1805 case ISD::FMUL: 1806 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1807 break; 1808 } 1809 unsigned Op1 = getRegForValue(I->getOperand(0)); 1810 if (Op1 == 0) return false; 1811 1812 unsigned Op2 = getRegForValue(I->getOperand(1)); 1813 if (Op2 == 0) return false; 1814 1815 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1816 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1817 TII.get(Opc), ResultReg) 1818 .addReg(Op1).addReg(Op2)); 1819 updateValueMap(I, ResultReg); 1820 return true; 1821 } 1822 1823 // Call Handling Code 1824 1825 // This is largely taken directly from CCAssignFnForNode 1826 // TODO: We may not support all of this. 1827 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1828 bool Return, 1829 bool isVarArg) { 1830 switch (CC) { 1831 default: 1832 llvm_unreachable("Unsupported calling convention"); 1833 case CallingConv::Fast: 1834 if (Subtarget->hasVFP2() && !isVarArg) { 1835 if (!Subtarget->isAAPCS_ABI()) 1836 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1837 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1838 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1839 } 1840 // Fallthrough 1841 case CallingConv::C: 1842 case CallingConv::CXX_FAST_TLS: 1843 // Use target triple & subtarget features to do actual dispatch. 1844 if (Subtarget->isAAPCS_ABI()) { 1845 if (Subtarget->hasVFP2() && 1846 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1847 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1848 else 1849 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1850 } else { 1851 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1852 } 1853 case CallingConv::ARM_AAPCS_VFP: 1854 case CallingConv::Swift: 1855 if (!isVarArg) 1856 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1857 // Fall through to soft float variant, variadic functions don't 1858 // use hard floating point ABI. 1859 case CallingConv::ARM_AAPCS: 1860 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1861 case CallingConv::ARM_APCS: 1862 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1863 case CallingConv::GHC: 1864 if (Return) 1865 llvm_unreachable("Can't return in GHC call convention"); 1866 else 1867 return CC_ARM_APCS_GHC; 1868 } 1869 } 1870 1871 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1872 SmallVectorImpl<unsigned> &ArgRegs, 1873 SmallVectorImpl<MVT> &ArgVTs, 1874 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1875 SmallVectorImpl<unsigned> &RegArgs, 1876 CallingConv::ID CC, 1877 unsigned &NumBytes, 1878 bool isVarArg) { 1879 SmallVector<CCValAssign, 16> ArgLocs; 1880 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context); 1881 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1882 CCAssignFnForCall(CC, false, isVarArg)); 1883 1884 // Check that we can handle all of the arguments. If we can't, then bail out 1885 // now before we add code to the MBB. 1886 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1887 CCValAssign &VA = ArgLocs[i]; 1888 MVT ArgVT = ArgVTs[VA.getValNo()]; 1889 1890 // We don't handle NEON/vector parameters yet. 1891 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1892 return false; 1893 1894 // Now copy/store arg to correct locations. 1895 if (VA.isRegLoc() && !VA.needsCustom()) { 1896 continue; 1897 } else if (VA.needsCustom()) { 1898 // TODO: We need custom lowering for vector (v2f64) args. 1899 if (VA.getLocVT() != MVT::f64 || 1900 // TODO: Only handle register args for now. 1901 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1902 return false; 1903 } else { 1904 switch (ArgVT.SimpleTy) { 1905 default: 1906 return false; 1907 case MVT::i1: 1908 case MVT::i8: 1909 case MVT::i16: 1910 case MVT::i32: 1911 break; 1912 case MVT::f32: 1913 if (!Subtarget->hasVFP2()) 1914 return false; 1915 break; 1916 case MVT::f64: 1917 if (!Subtarget->hasVFP2()) 1918 return false; 1919 break; 1920 } 1921 } 1922 } 1923 1924 // At the point, we are able to handle the call's arguments in fast isel. 1925 1926 // Get a count of how many bytes are to be pushed on the stack. 1927 NumBytes = CCInfo.getNextStackOffset(); 1928 1929 // Issue CALLSEQ_START 1930 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1931 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1932 TII.get(AdjStackDown)) 1933 .addImm(NumBytes)); 1934 1935 // Process the args. 1936 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1937 CCValAssign &VA = ArgLocs[i]; 1938 const Value *ArgVal = Args[VA.getValNo()]; 1939 unsigned Arg = ArgRegs[VA.getValNo()]; 1940 MVT ArgVT = ArgVTs[VA.getValNo()]; 1941 1942 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1943 "We don't handle NEON/vector parameters yet."); 1944 1945 // Handle arg promotion, etc. 1946 switch (VA.getLocInfo()) { 1947 case CCValAssign::Full: break; 1948 case CCValAssign::SExt: { 1949 MVT DestVT = VA.getLocVT(); 1950 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1951 assert (Arg != 0 && "Failed to emit a sext"); 1952 ArgVT = DestVT; 1953 break; 1954 } 1955 case CCValAssign::AExt: 1956 // Intentional fall-through. Handle AExt and ZExt. 1957 case CCValAssign::ZExt: { 1958 MVT DestVT = VA.getLocVT(); 1959 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1960 assert (Arg != 0 && "Failed to emit a zext"); 1961 ArgVT = DestVT; 1962 break; 1963 } 1964 case CCValAssign::BCvt: { 1965 unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1966 /*TODO: Kill=*/false); 1967 assert(BC != 0 && "Failed to emit a bitcast!"); 1968 Arg = BC; 1969 ArgVT = VA.getLocVT(); 1970 break; 1971 } 1972 default: llvm_unreachable("Unknown arg promotion!"); 1973 } 1974 1975 // Now copy/store arg to correct locations. 1976 if (VA.isRegLoc() && !VA.needsCustom()) { 1977 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1978 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); 1979 RegArgs.push_back(VA.getLocReg()); 1980 } else if (VA.needsCustom()) { 1981 // TODO: We need custom lowering for vector (v2f64) args. 1982 assert(VA.getLocVT() == MVT::f64 && 1983 "Custom lowering for v2f64 args not available"); 1984 1985 CCValAssign &NextVA = ArgLocs[++i]; 1986 1987 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1988 "We only handle register args!"); 1989 1990 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1991 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1992 .addReg(NextVA.getLocReg(), RegState::Define) 1993 .addReg(Arg)); 1994 RegArgs.push_back(VA.getLocReg()); 1995 RegArgs.push_back(NextVA.getLocReg()); 1996 } else { 1997 assert(VA.isMemLoc()); 1998 // Need to store on the stack. 1999 2000 // Don't emit stores for undef values. 2001 if (isa<UndefValue>(ArgVal)) 2002 continue; 2003 2004 Address Addr; 2005 Addr.BaseType = Address::RegBase; 2006 Addr.Base.Reg = ARM::SP; 2007 Addr.Offset = VA.getLocMemOffset(); 2008 2009 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2010 assert(EmitRet && "Could not emit a store for argument!"); 2011 } 2012 } 2013 2014 return true; 2015 } 2016 2017 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2018 const Instruction *I, CallingConv::ID CC, 2019 unsigned &NumBytes, bool isVarArg) { 2020 // Issue CALLSEQ_END 2021 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2022 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2023 TII.get(AdjStackUp)) 2024 .addImm(NumBytes).addImm(0)); 2025 2026 // Now the return value. 2027 if (RetVT != MVT::isVoid) { 2028 SmallVector<CCValAssign, 16> RVLocs; 2029 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2030 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2031 2032 // Copy all of the result registers out of their specified physreg. 2033 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2034 // For this move we copy into two registers and then move into the 2035 // double fp reg we want. 2036 MVT DestVT = RVLocs[0].getValVT(); 2037 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2038 unsigned ResultReg = createResultReg(DstRC); 2039 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2040 TII.get(ARM::VMOVDRR), ResultReg) 2041 .addReg(RVLocs[0].getLocReg()) 2042 .addReg(RVLocs[1].getLocReg())); 2043 2044 UsedRegs.push_back(RVLocs[0].getLocReg()); 2045 UsedRegs.push_back(RVLocs[1].getLocReg()); 2046 2047 // Finally update the result. 2048 updateValueMap(I, ResultReg); 2049 } else { 2050 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2051 MVT CopyVT = RVLocs[0].getValVT(); 2052 2053 // Special handling for extended integers. 2054 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2055 CopyVT = MVT::i32; 2056 2057 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2058 2059 unsigned ResultReg = createResultReg(DstRC); 2060 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2061 TII.get(TargetOpcode::COPY), 2062 ResultReg).addReg(RVLocs[0].getLocReg()); 2063 UsedRegs.push_back(RVLocs[0].getLocReg()); 2064 2065 // Finally update the result. 2066 updateValueMap(I, ResultReg); 2067 } 2068 } 2069 2070 return true; 2071 } 2072 2073 bool ARMFastISel::SelectRet(const Instruction *I) { 2074 const ReturnInst *Ret = cast<ReturnInst>(I); 2075 const Function &F = *I->getParent()->getParent(); 2076 2077 if (!FuncInfo.CanLowerReturn) 2078 return false; 2079 2080 if (TLI.supportSwiftError() && 2081 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 2082 return false; 2083 2084 if (TLI.supportSplitCSR(FuncInfo.MF)) 2085 return false; 2086 2087 // Build a list of return value registers. 2088 SmallVector<unsigned, 4> RetRegs; 2089 2090 CallingConv::ID CC = F.getCallingConv(); 2091 if (Ret->getNumOperands() > 0) { 2092 SmallVector<ISD::OutputArg, 4> Outs; 2093 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); 2094 2095 // Analyze operands of the call, assigning locations to each operand. 2096 SmallVector<CCValAssign, 16> ValLocs; 2097 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); 2098 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2099 F.isVarArg())); 2100 2101 const Value *RV = Ret->getOperand(0); 2102 unsigned Reg = getRegForValue(RV); 2103 if (Reg == 0) 2104 return false; 2105 2106 // Only handle a single return value for now. 2107 if (ValLocs.size() != 1) 2108 return false; 2109 2110 CCValAssign &VA = ValLocs[0]; 2111 2112 // Don't bother handling odd stuff for now. 2113 if (VA.getLocInfo() != CCValAssign::Full) 2114 return false; 2115 // Only handle register returns for now. 2116 if (!VA.isRegLoc()) 2117 return false; 2118 2119 unsigned SrcReg = Reg + VA.getValNo(); 2120 EVT RVEVT = TLI.getValueType(DL, RV->getType()); 2121 if (!RVEVT.isSimple()) return false; 2122 MVT RVVT = RVEVT.getSimpleVT(); 2123 MVT DestVT = VA.getValVT(); 2124 // Special handling for extended integers. 2125 if (RVVT != DestVT) { 2126 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2127 return false; 2128 2129 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2130 2131 // Perform extension if flagged as either zext or sext. Otherwise, do 2132 // nothing. 2133 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2134 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2135 if (SrcReg == 0) return false; 2136 } 2137 } 2138 2139 // Make the copy. 2140 unsigned DstReg = VA.getLocReg(); 2141 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2142 // Avoid a cross-class copy. This is very unlikely. 2143 if (!SrcRC->contains(DstReg)) 2144 return false; 2145 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2146 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); 2147 2148 // Add register to return instruction. 2149 RetRegs.push_back(VA.getLocReg()); 2150 } 2151 2152 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2153 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2154 TII.get(RetOpc)); 2155 AddOptionalDefs(MIB); 2156 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 2157 MIB.addReg(RetRegs[i], RegState::Implicit); 2158 return true; 2159 } 2160 2161 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2162 if (UseReg) 2163 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2164 else 2165 return isThumb2 ? ARM::tBL : ARM::BL; 2166 } 2167 2168 unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2169 // Manually compute the global's type to avoid building it when unnecessary. 2170 Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); 2171 EVT LCREVT = TLI.getValueType(DL, GVTy); 2172 if (!LCREVT.isSimple()) return 0; 2173 2174 GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false, 2175 GlobalValue::ExternalLinkage, nullptr, 2176 Name); 2177 assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); 2178 return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2179 } 2180 2181 // A quick function that will emit a call for a named libcall in F with the 2182 // vector of passed arguments for the Instruction in I. We can assume that we 2183 // can emit a call for any libcall we can produce. This is an abridged version 2184 // of the full call infrastructure since we won't need to worry about things 2185 // like computed function pointers or strange arguments at call sites. 2186 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2187 // with X86. 2188 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2189 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2190 2191 // Handle *simple* calls for now. 2192 Type *RetTy = I->getType(); 2193 MVT RetVT; 2194 if (RetTy->isVoidTy()) 2195 RetVT = MVT::isVoid; 2196 else if (!isTypeLegal(RetTy, RetVT)) 2197 return false; 2198 2199 // Can't handle non-double multi-reg retvals. 2200 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2201 SmallVector<CCValAssign, 16> RVLocs; 2202 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); 2203 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2204 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2205 return false; 2206 } 2207 2208 // Set up the argument vectors. 2209 SmallVector<Value*, 8> Args; 2210 SmallVector<unsigned, 8> ArgRegs; 2211 SmallVector<MVT, 8> ArgVTs; 2212 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2213 Args.reserve(I->getNumOperands()); 2214 ArgRegs.reserve(I->getNumOperands()); 2215 ArgVTs.reserve(I->getNumOperands()); 2216 ArgFlags.reserve(I->getNumOperands()); 2217 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2218 Value *Op = I->getOperand(i); 2219 unsigned Arg = getRegForValue(Op); 2220 if (Arg == 0) return false; 2221 2222 Type *ArgTy = Op->getType(); 2223 MVT ArgVT; 2224 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2225 2226 ISD::ArgFlagsTy Flags; 2227 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2228 Flags.setOrigAlign(OriginalAlignment); 2229 2230 Args.push_back(Op); 2231 ArgRegs.push_back(Arg); 2232 ArgVTs.push_back(ArgVT); 2233 ArgFlags.push_back(Flags); 2234 } 2235 2236 // Handle the arguments now that we've gotten them. 2237 SmallVector<unsigned, 4> RegArgs; 2238 unsigned NumBytes; 2239 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2240 RegArgs, CC, NumBytes, false)) 2241 return false; 2242 2243 unsigned CalleeReg = 0; 2244 if (Subtarget->genLongCalls()) { 2245 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2246 if (CalleeReg == 0) return false; 2247 } 2248 2249 // Issue the call. 2250 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls()); 2251 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2252 DbgLoc, TII.get(CallOpc)); 2253 // BL / BLX don't take a predicate, but tBL / tBLX do. 2254 if (isThumb2) 2255 AddDefaultPred(MIB); 2256 if (Subtarget->genLongCalls()) 2257 MIB.addReg(CalleeReg); 2258 else 2259 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2260 2261 // Add implicit physical register uses to the call. 2262 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2263 MIB.addReg(RegArgs[i], RegState::Implicit); 2264 2265 // Add a register mask with the call-preserved registers. 2266 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2267 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); 2268 2269 // Finish off the call including any return values. 2270 SmallVector<unsigned, 4> UsedRegs; 2271 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2272 2273 // Set all unused physreg defs as dead. 2274 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2275 2276 return true; 2277 } 2278 2279 bool ARMFastISel::SelectCall(const Instruction *I, 2280 const char *IntrMemName = nullptr) { 2281 const CallInst *CI = cast<CallInst>(I); 2282 const Value *Callee = CI->getCalledValue(); 2283 2284 // Can't handle inline asm. 2285 if (isa<InlineAsm>(Callee)) return false; 2286 2287 // Allow SelectionDAG isel to handle tail calls. 2288 if (CI->isTailCall()) return false; 2289 2290 // Check the calling convention. 2291 ImmutableCallSite CS(CI); 2292 CallingConv::ID CC = CS.getCallingConv(); 2293 2294 // TODO: Avoid some calling conventions? 2295 2296 FunctionType *FTy = CS.getFunctionType(); 2297 bool isVarArg = FTy->isVarArg(); 2298 2299 // Handle *simple* calls for now. 2300 Type *RetTy = I->getType(); 2301 MVT RetVT; 2302 if (RetTy->isVoidTy()) 2303 RetVT = MVT::isVoid; 2304 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2305 RetVT != MVT::i8 && RetVT != MVT::i1) 2306 return false; 2307 2308 // Can't handle non-double multi-reg retvals. 2309 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2310 RetVT != MVT::i16 && RetVT != MVT::i32) { 2311 SmallVector<CCValAssign, 16> RVLocs; 2312 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2313 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2314 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2315 return false; 2316 } 2317 2318 // Set up the argument vectors. 2319 SmallVector<Value*, 8> Args; 2320 SmallVector<unsigned, 8> ArgRegs; 2321 SmallVector<MVT, 8> ArgVTs; 2322 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2323 unsigned arg_size = CS.arg_size(); 2324 Args.reserve(arg_size); 2325 ArgRegs.reserve(arg_size); 2326 ArgVTs.reserve(arg_size); 2327 ArgFlags.reserve(arg_size); 2328 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2329 i != e; ++i) { 2330 // If we're lowering a memory intrinsic instead of a regular call, skip the 2331 // last two arguments, which shouldn't be passed to the underlying function. 2332 if (IntrMemName && e-i <= 2) 2333 break; 2334 2335 ISD::ArgFlagsTy Flags; 2336 unsigned AttrInd = i - CS.arg_begin() + 1; 2337 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2338 Flags.setSExt(); 2339 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2340 Flags.setZExt(); 2341 2342 // FIXME: Only handle *easy* calls for now. 2343 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2344 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2345 CS.paramHasAttr(AttrInd, Attribute::SwiftSelf) || 2346 CS.paramHasAttr(AttrInd, Attribute::SwiftError) || 2347 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2348 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2349 return false; 2350 2351 Type *ArgTy = (*i)->getType(); 2352 MVT ArgVT; 2353 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2354 ArgVT != MVT::i1) 2355 return false; 2356 2357 unsigned Arg = getRegForValue(*i); 2358 if (Arg == 0) 2359 return false; 2360 2361 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2362 Flags.setOrigAlign(OriginalAlignment); 2363 2364 Args.push_back(*i); 2365 ArgRegs.push_back(Arg); 2366 ArgVTs.push_back(ArgVT); 2367 ArgFlags.push_back(Flags); 2368 } 2369 2370 // Handle the arguments now that we've gotten them. 2371 SmallVector<unsigned, 4> RegArgs; 2372 unsigned NumBytes; 2373 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2374 RegArgs, CC, NumBytes, isVarArg)) 2375 return false; 2376 2377 bool UseReg = false; 2378 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2379 if (!GV || Subtarget->genLongCalls()) UseReg = true; 2380 2381 unsigned CalleeReg = 0; 2382 if (UseReg) { 2383 if (IntrMemName) 2384 CalleeReg = getLibcallReg(IntrMemName); 2385 else 2386 CalleeReg = getRegForValue(Callee); 2387 2388 if (CalleeReg == 0) return false; 2389 } 2390 2391 // Issue the call. 2392 unsigned CallOpc = ARMSelectCallOp(UseReg); 2393 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2394 DbgLoc, TII.get(CallOpc)); 2395 2396 // ARM calls don't take a predicate, but tBL / tBLX do. 2397 if(isThumb2) 2398 AddDefaultPred(MIB); 2399 if (UseReg) 2400 MIB.addReg(CalleeReg); 2401 else if (!IntrMemName) 2402 MIB.addGlobalAddress(GV, 0, 0); 2403 else 2404 MIB.addExternalSymbol(IntrMemName, 0); 2405 2406 // Add implicit physical register uses to the call. 2407 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2408 MIB.addReg(RegArgs[i], RegState::Implicit); 2409 2410 // Add a register mask with the call-preserved registers. 2411 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2412 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); 2413 2414 // Finish off the call including any return values. 2415 SmallVector<unsigned, 4> UsedRegs; 2416 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2417 return false; 2418 2419 // Set all unused physreg defs as dead. 2420 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2421 2422 return true; 2423 } 2424 2425 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2426 return Len <= 16; 2427 } 2428 2429 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2430 uint64_t Len, unsigned Alignment) { 2431 // Make sure we don't bloat code by inlining very large memcpy's. 2432 if (!ARMIsMemCpySmall(Len)) 2433 return false; 2434 2435 while (Len) { 2436 MVT VT; 2437 if (!Alignment || Alignment >= 4) { 2438 if (Len >= 4) 2439 VT = MVT::i32; 2440 else if (Len >= 2) 2441 VT = MVT::i16; 2442 else { 2443 assert (Len == 1 && "Expected a length of 1!"); 2444 VT = MVT::i8; 2445 } 2446 } else { 2447 // Bound based on alignment. 2448 if (Len >= 2 && Alignment == 2) 2449 VT = MVT::i16; 2450 else { 2451 VT = MVT::i8; 2452 } 2453 } 2454 2455 bool RV; 2456 unsigned ResultReg; 2457 RV = ARMEmitLoad(VT, ResultReg, Src); 2458 assert (RV == true && "Should be able to handle this load."); 2459 RV = ARMEmitStore(VT, ResultReg, Dest); 2460 assert (RV == true && "Should be able to handle this store."); 2461 (void)RV; 2462 2463 unsigned Size = VT.getSizeInBits()/8; 2464 Len -= Size; 2465 Dest.Offset += Size; 2466 Src.Offset += Size; 2467 } 2468 2469 return true; 2470 } 2471 2472 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2473 // FIXME: Handle more intrinsics. 2474 switch (I.getIntrinsicID()) { 2475 default: return false; 2476 case Intrinsic::frameaddress: { 2477 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2478 MFI->setFrameAddressIsTaken(true); 2479 2480 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 2481 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass 2482 : &ARM::GPRRegClass; 2483 2484 const ARMBaseRegisterInfo *RegInfo = 2485 static_cast<const ARMBaseRegisterInfo *>(Subtarget->getRegisterInfo()); 2486 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2487 unsigned SrcReg = FramePtr; 2488 2489 // Recursively load frame address 2490 // ldr r0 [fp] 2491 // ldr r0 [r0] 2492 // ldr r0 [r0] 2493 // ... 2494 unsigned DestReg; 2495 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2496 while (Depth--) { 2497 DestReg = createResultReg(RC); 2498 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2499 TII.get(LdrOpc), DestReg) 2500 .addReg(SrcReg).addImm(0)); 2501 SrcReg = DestReg; 2502 } 2503 updateValueMap(&I, SrcReg); 2504 return true; 2505 } 2506 case Intrinsic::memcpy: 2507 case Intrinsic::memmove: { 2508 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2509 // Don't handle volatile. 2510 if (MTI.isVolatile()) 2511 return false; 2512 2513 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2514 // we would emit dead code because we don't currently handle memmoves. 2515 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2516 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2517 // Small memcpy's are common enough that we want to do them without a call 2518 // if possible. 2519 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2520 if (ARMIsMemCpySmall(Len)) { 2521 Address Dest, Src; 2522 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2523 !ARMComputeAddress(MTI.getRawSource(), Src)) 2524 return false; 2525 unsigned Alignment = MTI.getAlignment(); 2526 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2527 return true; 2528 } 2529 } 2530 2531 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2532 return false; 2533 2534 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2535 return false; 2536 2537 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2538 return SelectCall(&I, IntrMemName); 2539 } 2540 case Intrinsic::memset: { 2541 const MemSetInst &MSI = cast<MemSetInst>(I); 2542 // Don't handle volatile. 2543 if (MSI.isVolatile()) 2544 return false; 2545 2546 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2547 return false; 2548 2549 if (MSI.getDestAddressSpace() > 255) 2550 return false; 2551 2552 return SelectCall(&I, "memset"); 2553 } 2554 case Intrinsic::trap: { 2555 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get( 2556 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP)); 2557 return true; 2558 } 2559 } 2560 } 2561 2562 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2563 // The high bits for a type smaller than the register size are assumed to be 2564 // undefined. 2565 Value *Op = I->getOperand(0); 2566 2567 EVT SrcVT, DestVT; 2568 SrcVT = TLI.getValueType(DL, Op->getType(), true); 2569 DestVT = TLI.getValueType(DL, I->getType(), true); 2570 2571 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2572 return false; 2573 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2574 return false; 2575 2576 unsigned SrcReg = getRegForValue(Op); 2577 if (!SrcReg) return false; 2578 2579 // Because the high bits are undefined, a truncate doesn't generate 2580 // any code. 2581 updateValueMap(I, SrcReg); 2582 return true; 2583 } 2584 2585 unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2586 bool isZExt) { 2587 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2588 return 0; 2589 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) 2590 return 0; 2591 2592 // Table of which combinations can be emitted as a single instruction, 2593 // and which will require two. 2594 static const uint8_t isSingleInstrTbl[3][2][2][2] = { 2595 // ARM Thumb 2596 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops 2597 // ext: s z s z s z s z 2598 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, 2599 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, 2600 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } 2601 }; 2602 2603 // Target registers for: 2604 // - For ARM can never be PC. 2605 // - For 16-bit Thumb are restricted to lower 8 registers. 2606 // - For 32-bit Thumb are restricted to non-SP and non-PC. 2607 static const TargetRegisterClass *RCTbl[2][2] = { 2608 // Instructions: Two Single 2609 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, 2610 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } 2611 }; 2612 2613 // Table governing the instruction(s) to be emitted. 2614 static const struct InstructionTable { 2615 uint32_t Opc : 16; 2616 uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. 2617 uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. 2618 uint32_t Imm : 8; // All instructions have either a shift or a mask. 2619 } IT[2][2][3][2] = { 2620 { // Two instructions (first is left shift, second is in this table). 2621 { // ARM Opc S Shift Imm 2622 /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, 2623 /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, 2624 /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, 2625 /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, 2626 /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, 2627 /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } 2628 }, 2629 { // Thumb Opc S Shift Imm 2630 /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, 2631 /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, 2632 /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, 2633 /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, 2634 /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, 2635 /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } 2636 } 2637 }, 2638 { // Single instruction. 2639 { // ARM Opc S Shift Imm 2640 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2641 /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, 2642 /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, 2643 /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, 2644 /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, 2645 /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } 2646 }, 2647 { // Thumb Opc S Shift Imm 2648 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2649 /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, 2650 /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, 2651 /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, 2652 /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, 2653 /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } 2654 } 2655 } 2656 }; 2657 2658 unsigned SrcBits = SrcVT.getSizeInBits(); 2659 unsigned DestBits = DestVT.getSizeInBits(); 2660 (void) DestBits; 2661 assert((SrcBits < DestBits) && "can only extend to larger types"); 2662 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && 2663 "other sizes unimplemented"); 2664 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && 2665 "other sizes unimplemented"); 2666 2667 bool hasV6Ops = Subtarget->hasV6Ops(); 2668 unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} 2669 assert((Bitness < 3) && "sanity-check table bounds"); 2670 2671 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; 2672 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; 2673 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; 2674 unsigned Opc = ITP->Opc; 2675 assert(ARM::KILL != Opc && "Invalid table entry"); 2676 unsigned hasS = ITP->hasS; 2677 ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; 2678 assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && 2679 "only MOVsi has shift operand addressing mode"); 2680 unsigned Imm = ITP->Imm; 2681 2682 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). 2683 bool setsCPSR = &ARM::tGPRRegClass == RC; 2684 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; 2685 unsigned ResultReg; 2686 // MOVsi encodes shift and immediate in shift operand addressing mode. 2687 // The following condition has the same value when emitting two 2688 // instruction sequences: both are shifts. 2689 bool ImmIsSO = (Shift != ARM_AM::no_shift); 2690 2691 // Either one or two instructions are emitted. 2692 // They're always of the form: 2693 // dst = in OP imm 2694 // CPSR is set only by 16-bit Thumb instructions. 2695 // Predicate, if any, is AL. 2696 // S bit, if available, is always 0. 2697 // When two are emitted the first's result will feed as the second's input, 2698 // that value is then dead. 2699 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; 2700 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { 2701 ResultReg = createResultReg(RC); 2702 bool isLsl = (0 == Instr) && !isSingleInstr; 2703 unsigned Opcode = isLsl ? LSLOpc : Opc; 2704 ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; 2705 unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; 2706 bool isKill = 1 == Instr; 2707 MachineInstrBuilder MIB = BuildMI( 2708 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg); 2709 if (setsCPSR) 2710 MIB.addReg(ARM::CPSR, RegState::Define); 2711 SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); 2712 AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); 2713 if (hasS) 2714 AddDefaultCC(MIB); 2715 // Second instruction consumes the first's result. 2716 SrcReg = ResultReg; 2717 } 2718 2719 return ResultReg; 2720 } 2721 2722 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2723 // On ARM, in general, integer casts don't involve legal types; this code 2724 // handles promotable integers. 2725 Type *DestTy = I->getType(); 2726 Value *Src = I->getOperand(0); 2727 Type *SrcTy = Src->getType(); 2728 2729 bool isZExt = isa<ZExtInst>(I); 2730 unsigned SrcReg = getRegForValue(Src); 2731 if (!SrcReg) return false; 2732 2733 EVT SrcEVT, DestEVT; 2734 SrcEVT = TLI.getValueType(DL, SrcTy, true); 2735 DestEVT = TLI.getValueType(DL, DestTy, true); 2736 if (!SrcEVT.isSimple()) return false; 2737 if (!DestEVT.isSimple()) return false; 2738 2739 MVT SrcVT = SrcEVT.getSimpleVT(); 2740 MVT DestVT = DestEVT.getSimpleVT(); 2741 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2742 if (ResultReg == 0) return false; 2743 updateValueMap(I, ResultReg); 2744 return true; 2745 } 2746 2747 bool ARMFastISel::SelectShift(const Instruction *I, 2748 ARM_AM::ShiftOpc ShiftTy) { 2749 // We handle thumb2 mode by target independent selector 2750 // or SelectionDAG ISel. 2751 if (isThumb2) 2752 return false; 2753 2754 // Only handle i32 now. 2755 EVT DestVT = TLI.getValueType(DL, I->getType(), true); 2756 if (DestVT != MVT::i32) 2757 return false; 2758 2759 unsigned Opc = ARM::MOVsr; 2760 unsigned ShiftImm; 2761 Value *Src2Value = I->getOperand(1); 2762 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2763 ShiftImm = CI->getZExtValue(); 2764 2765 // Fall back to selection DAG isel if the shift amount 2766 // is zero or greater than the width of the value type. 2767 if (ShiftImm == 0 || ShiftImm >=32) 2768 return false; 2769 2770 Opc = ARM::MOVsi; 2771 } 2772 2773 Value *Src1Value = I->getOperand(0); 2774 unsigned Reg1 = getRegForValue(Src1Value); 2775 if (Reg1 == 0) return false; 2776 2777 unsigned Reg2 = 0; 2778 if (Opc == ARM::MOVsr) { 2779 Reg2 = getRegForValue(Src2Value); 2780 if (Reg2 == 0) return false; 2781 } 2782 2783 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 2784 if(ResultReg == 0) return false; 2785 2786 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2787 TII.get(Opc), ResultReg) 2788 .addReg(Reg1); 2789 2790 if (Opc == ARM::MOVsi) 2791 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2792 else if (Opc == ARM::MOVsr) { 2793 MIB.addReg(Reg2); 2794 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2795 } 2796 2797 AddOptionalDefs(MIB); 2798 updateValueMap(I, ResultReg); 2799 return true; 2800 } 2801 2802 // TODO: SoftFP support. 2803 bool ARMFastISel::fastSelectInstruction(const Instruction *I) { 2804 2805 switch (I->getOpcode()) { 2806 case Instruction::Load: 2807 return SelectLoad(I); 2808 case Instruction::Store: 2809 return SelectStore(I); 2810 case Instruction::Br: 2811 return SelectBranch(I); 2812 case Instruction::IndirectBr: 2813 return SelectIndirectBr(I); 2814 case Instruction::ICmp: 2815 case Instruction::FCmp: 2816 return SelectCmp(I); 2817 case Instruction::FPExt: 2818 return SelectFPExt(I); 2819 case Instruction::FPTrunc: 2820 return SelectFPTrunc(I); 2821 case Instruction::SIToFP: 2822 return SelectIToFP(I, /*isSigned*/ true); 2823 case Instruction::UIToFP: 2824 return SelectIToFP(I, /*isSigned*/ false); 2825 case Instruction::FPToSI: 2826 return SelectFPToI(I, /*isSigned*/ true); 2827 case Instruction::FPToUI: 2828 return SelectFPToI(I, /*isSigned*/ false); 2829 case Instruction::Add: 2830 return SelectBinaryIntOp(I, ISD::ADD); 2831 case Instruction::Or: 2832 return SelectBinaryIntOp(I, ISD::OR); 2833 case Instruction::Sub: 2834 return SelectBinaryIntOp(I, ISD::SUB); 2835 case Instruction::FAdd: 2836 return SelectBinaryFPOp(I, ISD::FADD); 2837 case Instruction::FSub: 2838 return SelectBinaryFPOp(I, ISD::FSUB); 2839 case Instruction::FMul: 2840 return SelectBinaryFPOp(I, ISD::FMUL); 2841 case Instruction::SDiv: 2842 return SelectDiv(I, /*isSigned*/ true); 2843 case Instruction::UDiv: 2844 return SelectDiv(I, /*isSigned*/ false); 2845 case Instruction::SRem: 2846 return SelectRem(I, /*isSigned*/ true); 2847 case Instruction::URem: 2848 return SelectRem(I, /*isSigned*/ false); 2849 case Instruction::Call: 2850 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2851 return SelectIntrinsicCall(*II); 2852 return SelectCall(I); 2853 case Instruction::Select: 2854 return SelectSelect(I); 2855 case Instruction::Ret: 2856 return SelectRet(I); 2857 case Instruction::Trunc: 2858 return SelectTrunc(I); 2859 case Instruction::ZExt: 2860 case Instruction::SExt: 2861 return SelectIntExt(I); 2862 case Instruction::Shl: 2863 return SelectShift(I, ARM_AM::lsl); 2864 case Instruction::LShr: 2865 return SelectShift(I, ARM_AM::lsr); 2866 case Instruction::AShr: 2867 return SelectShift(I, ARM_AM::asr); 2868 default: break; 2869 } 2870 return false; 2871 } 2872 2873 namespace { 2874 // This table describes sign- and zero-extend instructions which can be 2875 // folded into a preceding load. All of these extends have an immediate 2876 // (sometimes a mask and sometimes a shift) that's applied after 2877 // extension. 2878 const struct FoldableLoadExtendsStruct { 2879 uint16_t Opc[2]; // ARM, Thumb. 2880 uint8_t ExpectedImm; 2881 uint8_t isZExt : 1; 2882 uint8_t ExpectedVT : 7; 2883 } FoldableLoadExtends[] = { 2884 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, 2885 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, 2886 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, 2887 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, 2888 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } 2889 }; 2890 } 2891 2892 /// \brief The specified machine instr operand is a vreg, and that 2893 /// vreg is being provided by the specified load instruction. If possible, 2894 /// try to fold the load as an operand to the instruction, returning true if 2895 /// successful. 2896 bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 2897 const LoadInst *LI) { 2898 // Verify we have a legal type before going any further. 2899 MVT VT; 2900 if (!isLoadTypeLegal(LI->getType(), VT)) 2901 return false; 2902 2903 // Combine load followed by zero- or sign-extend. 2904 // ldrb r1, [r0] ldrb r1, [r0] 2905 // uxtb r2, r1 => 2906 // mov r3, r2 mov r3, r1 2907 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) 2908 return false; 2909 const uint64_t Imm = MI->getOperand(2).getImm(); 2910 2911 bool Found = false; 2912 bool isZExt; 2913 for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends); 2914 i != e; ++i) { 2915 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() && 2916 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm && 2917 MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) { 2918 Found = true; 2919 isZExt = FoldableLoadExtends[i].isZExt; 2920 } 2921 } 2922 if (!Found) return false; 2923 2924 // See if we can handle this address. 2925 Address Addr; 2926 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2927 2928 unsigned ResultReg = MI->getOperand(0).getReg(); 2929 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2930 return false; 2931 MI->eraseFromParent(); 2932 return true; 2933 } 2934 2935 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2936 unsigned Align, MVT VT) { 2937 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 2938 2939 LLVMContext *Context = &MF->getFunction()->getContext(); 2940 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2941 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2942 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( 2943 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, 2944 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier, 2945 /*AddCurrentAddress=*/UseGOT_PREL); 2946 2947 unsigned ConstAlign = 2948 MF->getDataLayout().getPrefTypeAlignment(Type::getInt32PtrTy(*Context)); 2949 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign); 2950 2951 unsigned TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass); 2952 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp; 2953 MachineInstrBuilder MIB = 2954 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), TempReg) 2955 .addConstantPoolIndex(Idx); 2956 if (Opc == ARM::LDRcp) 2957 MIB.addImm(0); 2958 AddDefaultPred(MIB); 2959 2960 // Fix the address by adding pc. 2961 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 2962 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR 2963 : ARM::PICADD; 2964 DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0); 2965 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 2966 .addReg(TempReg) 2967 .addImm(ARMPCLabelIndex); 2968 if (!Subtarget->isThumb()) 2969 AddDefaultPred(MIB); 2970 2971 if (UseGOT_PREL && Subtarget->isThumb()) { 2972 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 2973 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2974 TII.get(ARM::t2LDRi12), NewDestReg) 2975 .addReg(DestReg) 2976 .addImm(0); 2977 DestReg = NewDestReg; 2978 AddOptionalDefs(MIB); 2979 } 2980 return DestReg; 2981 } 2982 2983 bool ARMFastISel::fastLowerArguments() { 2984 if (!FuncInfo.CanLowerReturn) 2985 return false; 2986 2987 const Function *F = FuncInfo.Fn; 2988 if (F->isVarArg()) 2989 return false; 2990 2991 CallingConv::ID CC = F->getCallingConv(); 2992 switch (CC) { 2993 default: 2994 return false; 2995 case CallingConv::Fast: 2996 case CallingConv::C: 2997 case CallingConv::ARM_AAPCS_VFP: 2998 case CallingConv::ARM_AAPCS: 2999 case CallingConv::ARM_APCS: 3000 case CallingConv::Swift: 3001 break; 3002 } 3003 3004 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments 3005 // which are passed in r0 - r3. 3006 unsigned Idx = 1; 3007 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3008 I != E; ++I, ++Idx) { 3009 if (Idx > 4) 3010 return false; 3011 3012 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || 3013 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || 3014 F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) || 3015 F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) || 3016 F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) 3017 return false; 3018 3019 Type *ArgTy = I->getType(); 3020 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) 3021 return false; 3022 3023 EVT ArgVT = TLI.getValueType(DL, ArgTy); 3024 if (!ArgVT.isSimple()) return false; 3025 switch (ArgVT.getSimpleVT().SimpleTy) { 3026 case MVT::i8: 3027 case MVT::i16: 3028 case MVT::i32: 3029 break; 3030 default: 3031 return false; 3032 } 3033 } 3034 3035 3036 static const MCPhysReg GPRArgRegs[] = { 3037 ARM::R0, ARM::R1, ARM::R2, ARM::R3 3038 }; 3039 3040 const TargetRegisterClass *RC = &ARM::rGPRRegClass; 3041 Idx = 0; 3042 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3043 I != E; ++I, ++Idx) { 3044 unsigned SrcReg = GPRArgRegs[Idx]; 3045 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); 3046 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. 3047 // Without this, EmitLiveInCopies may eliminate the livein if its only 3048 // use is a bitcast (which isn't turned into an instruction). 3049 unsigned ResultReg = createResultReg(RC); 3050 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 3051 TII.get(TargetOpcode::COPY), 3052 ResultReg).addReg(DstReg, getKillRegState(true)); 3053 updateValueMap(&*I, ResultReg); 3054 } 3055 3056 return true; 3057 } 3058 3059 namespace llvm { 3060 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 3061 const TargetLibraryInfo *libInfo) { 3062 if (funcInfo.MF->getSubtarget<ARMSubtarget>().useFastISel()) 3063 return new ARMFastISel(funcInfo, libInfo); 3064 3065 return nullptr; 3066 } 3067 } 3068