1 //===- ARMInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the targeting of the InstructionSelector class for ARM. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "ARMRegisterBankInfo.h" 15 #include "ARMSubtarget.h" 16 #include "ARMTargetMachine.h" 17 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 19 #include "llvm/CodeGen/MachineConstantPool.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/Support/Debug.h" 22 23 #define DEBUG_TYPE "arm-isel" 24 25 using namespace llvm; 26 27 namespace { 28 29 #define GET_GLOBALISEL_PREDICATE_BITSET 30 #include "ARMGenGlobalISel.inc" 31 #undef GET_GLOBALISEL_PREDICATE_BITSET 32 33 class ARMInstructionSelector : public InstructionSelector { 34 public: 35 ARMInstructionSelector(const ARMBaseTargetMachine &TM, const ARMSubtarget &STI, 36 const ARMRegisterBankInfo &RBI); 37 38 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override; 39 static const char *getName() { return DEBUG_TYPE; } 40 41 private: 42 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 43 44 struct CmpConstants; 45 struct InsertInfo; 46 47 bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB, 48 MachineRegisterInfo &MRI) const; 49 50 // Helper for inserting a comparison sequence that sets \p ResReg to either 1 51 // if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or 52 // \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS). 53 bool insertComparison(CmpConstants Helper, InsertInfo I, unsigned ResReg, 54 ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg, 55 unsigned PrevRes) const; 56 57 // Set \p DestReg to \p Constant. 58 void putConstant(InsertInfo I, unsigned DestReg, unsigned Constant) const; 59 60 bool selectGlobal(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 61 bool selectSelect(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 62 bool selectShift(unsigned ShiftOpc, MachineInstrBuilder &MIB) const; 63 64 // Check if the types match and both operands have the expected size and 65 // register bank. 66 bool validOpRegPair(MachineRegisterInfo &MRI, unsigned LHS, unsigned RHS, 67 unsigned ExpectedSize, unsigned ExpectedRegBankID) const; 68 69 // Check if the register has the expected size and register bank. 70 bool validReg(MachineRegisterInfo &MRI, unsigned Reg, unsigned ExpectedSize, 71 unsigned ExpectedRegBankID) const; 72 73 const ARMBaseInstrInfo &TII; 74 const ARMBaseRegisterInfo &TRI; 75 const ARMBaseTargetMachine &TM; 76 const ARMRegisterBankInfo &RBI; 77 const ARMSubtarget &STI; 78 79 #define GET_GLOBALISEL_PREDICATES_DECL 80 #include "ARMGenGlobalISel.inc" 81 #undef GET_GLOBALISEL_PREDICATES_DECL 82 83 // We declare the temporaries used by selectImpl() in the class to minimize the 84 // cost of constructing placeholder values. 85 #define GET_GLOBALISEL_TEMPORARIES_DECL 86 #include "ARMGenGlobalISel.inc" 87 #undef GET_GLOBALISEL_TEMPORARIES_DECL 88 }; 89 } // end anonymous namespace 90 91 namespace llvm { 92 InstructionSelector * 93 createARMInstructionSelector(const ARMBaseTargetMachine &TM, 94 const ARMSubtarget &STI, 95 const ARMRegisterBankInfo &RBI) { 96 return new ARMInstructionSelector(TM, STI, RBI); 97 } 98 } 99 100 const unsigned zero_reg = 0; 101 102 #define GET_GLOBALISEL_IMPL 103 #include "ARMGenGlobalISel.inc" 104 #undef GET_GLOBALISEL_IMPL 105 106 ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine &TM, 107 const ARMSubtarget &STI, 108 const ARMRegisterBankInfo &RBI) 109 : InstructionSelector(), TII(*STI.getInstrInfo()), 110 TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI), STI(STI), 111 #define GET_GLOBALISEL_PREDICATES_INIT 112 #include "ARMGenGlobalISel.inc" 113 #undef GET_GLOBALISEL_PREDICATES_INIT 114 #define GET_GLOBALISEL_TEMPORARIES_INIT 115 #include "ARMGenGlobalISel.inc" 116 #undef GET_GLOBALISEL_TEMPORARIES_INIT 117 { 118 } 119 120 static const TargetRegisterClass *guessRegClass(unsigned Reg, 121 MachineRegisterInfo &MRI, 122 const TargetRegisterInfo &TRI, 123 const RegisterBankInfo &RBI) { 124 const RegisterBank *RegBank = RBI.getRegBank(Reg, MRI, TRI); 125 assert(RegBank && "Can't get reg bank for virtual register"); 126 127 const unsigned Size = MRI.getType(Reg).getSizeInBits(); 128 assert((RegBank->getID() == ARM::GPRRegBankID || 129 RegBank->getID() == ARM::FPRRegBankID) && 130 "Unsupported reg bank"); 131 132 if (RegBank->getID() == ARM::FPRRegBankID) { 133 if (Size == 32) 134 return &ARM::SPRRegClass; 135 else if (Size == 64) 136 return &ARM::DPRRegClass; 137 else 138 llvm_unreachable("Unsupported destination size"); 139 } 140 141 return &ARM::GPRRegClass; 142 } 143 144 static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, 145 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, 146 const RegisterBankInfo &RBI) { 147 unsigned DstReg = I.getOperand(0).getReg(); 148 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 149 return true; 150 151 const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI); 152 153 // No need to constrain SrcReg. It will get constrained when 154 // we hit another of its uses or its defs. 155 // Copies do not have constraints. 156 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 157 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 158 << " operand\n"); 159 return false; 160 } 161 return true; 162 } 163 164 static bool selectMergeValues(MachineInstrBuilder &MIB, 165 const ARMBaseInstrInfo &TII, 166 MachineRegisterInfo &MRI, 167 const TargetRegisterInfo &TRI, 168 const RegisterBankInfo &RBI) { 169 assert(TII.getSubtarget().hasVFP2() && "Can't select merge without VFP"); 170 171 // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs 172 // into one DPR. 173 unsigned VReg0 = MIB->getOperand(0).getReg(); 174 (void)VReg0; 175 assert(MRI.getType(VReg0).getSizeInBits() == 64 && 176 RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::FPRRegBankID && 177 "Unsupported operand for G_MERGE_VALUES"); 178 unsigned VReg1 = MIB->getOperand(1).getReg(); 179 (void)VReg1; 180 assert(MRI.getType(VReg1).getSizeInBits() == 32 && 181 RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && 182 "Unsupported operand for G_MERGE_VALUES"); 183 unsigned VReg2 = MIB->getOperand(2).getReg(); 184 (void)VReg2; 185 assert(MRI.getType(VReg2).getSizeInBits() == 32 && 186 RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::GPRRegBankID && 187 "Unsupported operand for G_MERGE_VALUES"); 188 189 MIB->setDesc(TII.get(ARM::VMOVDRR)); 190 MIB.add(predOps(ARMCC::AL)); 191 192 return true; 193 } 194 195 static bool selectUnmergeValues(MachineInstrBuilder &MIB, 196 const ARMBaseInstrInfo &TII, 197 MachineRegisterInfo &MRI, 198 const TargetRegisterInfo &TRI, 199 const RegisterBankInfo &RBI) { 200 assert(TII.getSubtarget().hasVFP2() && "Can't select unmerge without VFP"); 201 202 // We only support G_UNMERGE_VALUES as a way to break up one DPR into two 203 // GPRs. 204 unsigned VReg0 = MIB->getOperand(0).getReg(); 205 (void)VReg0; 206 assert(MRI.getType(VReg0).getSizeInBits() == 32 && 207 RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::GPRRegBankID && 208 "Unsupported operand for G_UNMERGE_VALUES"); 209 unsigned VReg1 = MIB->getOperand(1).getReg(); 210 (void)VReg1; 211 assert(MRI.getType(VReg1).getSizeInBits() == 32 && 212 RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && 213 "Unsupported operand for G_UNMERGE_VALUES"); 214 unsigned VReg2 = MIB->getOperand(2).getReg(); 215 (void)VReg2; 216 assert(MRI.getType(VReg2).getSizeInBits() == 64 && 217 RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::FPRRegBankID && 218 "Unsupported operand for G_UNMERGE_VALUES"); 219 220 MIB->setDesc(TII.get(ARM::VMOVRRD)); 221 MIB.add(predOps(ARMCC::AL)); 222 223 return true; 224 } 225 226 /// Select the opcode for simple extensions (that translate to a single SXT/UXT 227 /// instruction). Extension operations more complicated than that should not 228 /// invoke this. Returns the original opcode if it doesn't know how to select a 229 /// better one. 230 static unsigned selectSimpleExtOpc(unsigned Opc, unsigned Size) { 231 using namespace TargetOpcode; 232 233 if (Size != 8 && Size != 16) 234 return Opc; 235 236 if (Opc == G_SEXT) 237 return Size == 8 ? ARM::SXTB : ARM::SXTH; 238 239 if (Opc == G_ZEXT) 240 return Size == 8 ? ARM::UXTB : ARM::UXTH; 241 242 return Opc; 243 } 244 245 /// Select the opcode for simple loads and stores. For types smaller than 32 246 /// bits, the value will be zero extended. Returns the original opcode if it 247 /// doesn't know how to select a better one. 248 static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned RegBank, 249 unsigned Size) { 250 bool isStore = Opc == TargetOpcode::G_STORE; 251 252 if (RegBank == ARM::GPRRegBankID) { 253 switch (Size) { 254 case 1: 255 case 8: 256 return isStore ? ARM::STRBi12 : ARM::LDRBi12; 257 case 16: 258 return isStore ? ARM::STRH : ARM::LDRH; 259 case 32: 260 return isStore ? ARM::STRi12 : ARM::LDRi12; 261 default: 262 return Opc; 263 } 264 } 265 266 if (RegBank == ARM::FPRRegBankID) { 267 switch (Size) { 268 case 32: 269 return isStore ? ARM::VSTRS : ARM::VLDRS; 270 case 64: 271 return isStore ? ARM::VSTRD : ARM::VLDRD; 272 default: 273 return Opc; 274 } 275 } 276 277 return Opc; 278 } 279 280 // When lowering comparisons, we sometimes need to perform two compares instead 281 // of just one. Get the condition codes for both comparisons. If only one is 282 // needed, the second member of the pair is ARMCC::AL. 283 static std::pair<ARMCC::CondCodes, ARMCC::CondCodes> 284 getComparePreds(CmpInst::Predicate Pred) { 285 std::pair<ARMCC::CondCodes, ARMCC::CondCodes> Preds = {ARMCC::AL, ARMCC::AL}; 286 switch (Pred) { 287 case CmpInst::FCMP_ONE: 288 Preds = {ARMCC::GT, ARMCC::MI}; 289 break; 290 case CmpInst::FCMP_UEQ: 291 Preds = {ARMCC::EQ, ARMCC::VS}; 292 break; 293 case CmpInst::ICMP_EQ: 294 case CmpInst::FCMP_OEQ: 295 Preds.first = ARMCC::EQ; 296 break; 297 case CmpInst::ICMP_SGT: 298 case CmpInst::FCMP_OGT: 299 Preds.first = ARMCC::GT; 300 break; 301 case CmpInst::ICMP_SGE: 302 case CmpInst::FCMP_OGE: 303 Preds.first = ARMCC::GE; 304 break; 305 case CmpInst::ICMP_UGT: 306 case CmpInst::FCMP_UGT: 307 Preds.first = ARMCC::HI; 308 break; 309 case CmpInst::FCMP_OLT: 310 Preds.first = ARMCC::MI; 311 break; 312 case CmpInst::ICMP_ULE: 313 case CmpInst::FCMP_OLE: 314 Preds.first = ARMCC::LS; 315 break; 316 case CmpInst::FCMP_ORD: 317 Preds.first = ARMCC::VC; 318 break; 319 case CmpInst::FCMP_UNO: 320 Preds.first = ARMCC::VS; 321 break; 322 case CmpInst::FCMP_UGE: 323 Preds.first = ARMCC::PL; 324 break; 325 case CmpInst::ICMP_SLT: 326 case CmpInst::FCMP_ULT: 327 Preds.first = ARMCC::LT; 328 break; 329 case CmpInst::ICMP_SLE: 330 case CmpInst::FCMP_ULE: 331 Preds.first = ARMCC::LE; 332 break; 333 case CmpInst::FCMP_UNE: 334 case CmpInst::ICMP_NE: 335 Preds.first = ARMCC::NE; 336 break; 337 case CmpInst::ICMP_UGE: 338 Preds.first = ARMCC::HS; 339 break; 340 case CmpInst::ICMP_ULT: 341 Preds.first = ARMCC::LO; 342 break; 343 default: 344 break; 345 } 346 assert(Preds.first != ARMCC::AL && "No comparisons needed?"); 347 return Preds; 348 } 349 350 struct ARMInstructionSelector::CmpConstants { 351 CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned OpRegBank, 352 unsigned OpSize) 353 : ComparisonOpcode(CmpOpcode), ReadFlagsOpcode(FlagsOpcode), 354 OperandRegBankID(OpRegBank), OperandSize(OpSize) {} 355 356 // The opcode used for performing the comparison. 357 const unsigned ComparisonOpcode; 358 359 // The opcode used for reading the flags set by the comparison. May be 360 // ARM::INSTRUCTION_LIST_END if we don't need to read the flags. 361 const unsigned ReadFlagsOpcode; 362 363 // The assumed register bank ID for the operands. 364 const unsigned OperandRegBankID; 365 366 // The assumed size in bits for the operands. 367 const unsigned OperandSize; 368 }; 369 370 struct ARMInstructionSelector::InsertInfo { 371 InsertInfo(MachineInstrBuilder &MIB) 372 : MBB(*MIB->getParent()), InsertBefore(std::next(MIB->getIterator())), 373 DbgLoc(MIB->getDebugLoc()) {} 374 375 MachineBasicBlock &MBB; 376 const MachineBasicBlock::instr_iterator InsertBefore; 377 const DebugLoc &DbgLoc; 378 }; 379 380 void ARMInstructionSelector::putConstant(InsertInfo I, unsigned DestReg, 381 unsigned Constant) const { 382 (void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(ARM::MOVi)) 383 .addDef(DestReg) 384 .addImm(Constant) 385 .add(predOps(ARMCC::AL)) 386 .add(condCodeOp()); 387 } 388 389 bool ARMInstructionSelector::validOpRegPair(MachineRegisterInfo &MRI, 390 unsigned LHSReg, unsigned RHSReg, 391 unsigned ExpectedSize, 392 unsigned ExpectedRegBankID) const { 393 return MRI.getType(LHSReg) == MRI.getType(RHSReg) && 394 validReg(MRI, LHSReg, ExpectedSize, ExpectedRegBankID) && 395 validReg(MRI, RHSReg, ExpectedSize, ExpectedRegBankID); 396 } 397 398 bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg, 399 unsigned ExpectedSize, 400 unsigned ExpectedRegBankID) const { 401 if (MRI.getType(Reg).getSizeInBits() != ExpectedSize) { 402 DEBUG(dbgs() << "Unexpected size for register"); 403 return false; 404 } 405 406 if (RBI.getRegBank(Reg, MRI, TRI)->getID() != ExpectedRegBankID) { 407 DEBUG(dbgs() << "Unexpected register bank for register"); 408 return false; 409 } 410 411 return true; 412 } 413 414 bool ARMInstructionSelector::selectCmp(CmpConstants Helper, 415 MachineInstrBuilder &MIB, 416 MachineRegisterInfo &MRI) const { 417 const InsertInfo I(MIB); 418 419 auto ResReg = MIB->getOperand(0).getReg(); 420 if (!validReg(MRI, ResReg, 1, ARM::GPRRegBankID)) 421 return false; 422 423 auto Cond = 424 static_cast<CmpInst::Predicate>(MIB->getOperand(1).getPredicate()); 425 if (Cond == CmpInst::FCMP_TRUE || Cond == CmpInst::FCMP_FALSE) { 426 putConstant(I, ResReg, Cond == CmpInst::FCMP_TRUE ? 1 : 0); 427 MIB->eraseFromParent(); 428 return true; 429 } 430 431 auto LHSReg = MIB->getOperand(2).getReg(); 432 auto RHSReg = MIB->getOperand(3).getReg(); 433 if (!validOpRegPair(MRI, LHSReg, RHSReg, Helper.OperandSize, 434 Helper.OperandRegBankID)) 435 return false; 436 437 auto ARMConds = getComparePreds(Cond); 438 auto ZeroReg = MRI.createVirtualRegister(&ARM::GPRRegClass); 439 putConstant(I, ZeroReg, 0); 440 441 if (ARMConds.second == ARMCC::AL) { 442 // Simple case, we only need one comparison and we're done. 443 if (!insertComparison(Helper, I, ResReg, ARMConds.first, LHSReg, RHSReg, 444 ZeroReg)) 445 return false; 446 } else { 447 // Not so simple, we need two successive comparisons. 448 auto IntermediateRes = MRI.createVirtualRegister(&ARM::GPRRegClass); 449 if (!insertComparison(Helper, I, IntermediateRes, ARMConds.first, LHSReg, 450 RHSReg, ZeroReg)) 451 return false; 452 if (!insertComparison(Helper, I, ResReg, ARMConds.second, LHSReg, RHSReg, 453 IntermediateRes)) 454 return false; 455 } 456 457 MIB->eraseFromParent(); 458 return true; 459 } 460 461 bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I, 462 unsigned ResReg, 463 ARMCC::CondCodes Cond, 464 unsigned LHSReg, unsigned RHSReg, 465 unsigned PrevRes) const { 466 // Perform the comparison. 467 auto CmpI = 468 BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Helper.ComparisonOpcode)) 469 .addUse(LHSReg) 470 .addUse(RHSReg) 471 .add(predOps(ARMCC::AL)); 472 if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI)) 473 return false; 474 475 // Read the comparison flags (if necessary). 476 if (Helper.ReadFlagsOpcode != ARM::INSTRUCTION_LIST_END) { 477 auto ReadI = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, 478 TII.get(Helper.ReadFlagsOpcode)) 479 .add(predOps(ARMCC::AL)); 480 if (!constrainSelectedInstRegOperands(*ReadI, TII, TRI, RBI)) 481 return false; 482 } 483 484 // Select either 1 or the previous result based on the value of the flags. 485 auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(ARM::MOVCCi)) 486 .addDef(ResReg) 487 .addUse(PrevRes) 488 .addImm(1) 489 .add(predOps(Cond, ARM::CPSR)); 490 if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI)) 491 return false; 492 493 return true; 494 } 495 496 bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, 497 MachineRegisterInfo &MRI) const { 498 if ((STI.isROPI() || STI.isRWPI()) && !STI.isTargetELF()) { 499 DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n"); 500 return false; 501 } 502 503 auto GV = MIB->getOperand(1).getGlobal(); 504 if (GV->isThreadLocal()) { 505 DEBUG(dbgs() << "TLS variables not supported yet\n"); 506 return false; 507 } 508 509 auto &MBB = *MIB->getParent(); 510 auto &MF = *MBB.getParent(); 511 512 bool UseMovt = STI.useMovt(MF); 513 514 unsigned Size = TM.getPointerSize(); 515 unsigned Alignment = 4; 516 517 auto addOpsForConstantPoolLoad = [&MF, Alignment, 518 Size](MachineInstrBuilder &MIB, 519 const GlobalValue *GV, bool IsSBREL) { 520 assert(MIB->getOpcode() == ARM::LDRi12 && "Unsupported instruction"); 521 auto ConstPool = MF.getConstantPool(); 522 auto CPIndex = 523 // For SB relative entries we need a target-specific constant pool. 524 // Otherwise, just use a regular constant pool entry. 525 IsSBREL 526 ? ConstPool->getConstantPoolIndex( 527 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment) 528 : ConstPool->getConstantPoolIndex(GV, Alignment); 529 MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) 530 .addMemOperand( 531 MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF), 532 MachineMemOperand::MOLoad, Size, Alignment)) 533 .addImm(0) 534 .add(predOps(ARMCC::AL)); 535 }; 536 537 if (TM.isPositionIndependent()) { 538 bool Indirect = STI.isGVIndirectSymbol(GV); 539 // FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't 540 // support it yet. See PR28229. 541 unsigned Opc = 542 UseMovt && !STI.isTargetELF() 543 ? (Indirect ? ARM::MOV_ga_pcrel_ldr : ARM::MOV_ga_pcrel) 544 : (Indirect ? ARM::LDRLIT_ga_pcrel_ldr : ARM::LDRLIT_ga_pcrel); 545 MIB->setDesc(TII.get(Opc)); 546 547 int TargetFlags = ARMII::MO_NO_FLAG; 548 if (STI.isTargetDarwin()) 549 TargetFlags |= ARMII::MO_NONLAZY; 550 if (STI.isGVInGOT(GV)) 551 TargetFlags |= ARMII::MO_GOT; 552 MIB->getOperand(1).setTargetFlags(TargetFlags); 553 554 if (Indirect) 555 MIB.addMemOperand(MF.getMachineMemOperand( 556 MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, 557 TM.getPointerSize(), Alignment)); 558 559 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 560 } 561 562 bool isReadOnly = STI.getTargetLowering()->isReadOnly(GV); 563 if (STI.isROPI() && isReadOnly) { 564 unsigned Opc = UseMovt ? ARM::MOV_ga_pcrel : ARM::LDRLIT_ga_pcrel; 565 MIB->setDesc(TII.get(Opc)); 566 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 567 } 568 if (STI.isRWPI() && !isReadOnly) { 569 auto Offset = MRI.createVirtualRegister(&ARM::GPRRegClass); 570 MachineInstrBuilder OffsetMIB; 571 if (UseMovt) { 572 OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), 573 TII.get(ARM::MOVi32imm), Offset); 574 OffsetMIB.addGlobalAddress(GV, /*Offset*/ 0, ARMII::MO_SBREL); 575 } else { 576 // Load the offset from the constant pool. 577 OffsetMIB = 578 BuildMI(MBB, *MIB, MIB->getDebugLoc(), TII.get(ARM::LDRi12), Offset); 579 addOpsForConstantPoolLoad(OffsetMIB, GV, /*IsSBREL*/ true); 580 } 581 if (!constrainSelectedInstRegOperands(*OffsetMIB, TII, TRI, RBI)) 582 return false; 583 584 // Add the offset to the SB register. 585 MIB->setDesc(TII.get(ARM::ADDrr)); 586 MIB->RemoveOperand(1); 587 MIB.addReg(ARM::R9) // FIXME: don't hardcode R9 588 .addReg(Offset) 589 .add(predOps(ARMCC::AL)) 590 .add(condCodeOp()); 591 592 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 593 } 594 595 if (STI.isTargetELF()) { 596 if (UseMovt) { 597 MIB->setDesc(TII.get(ARM::MOVi32imm)); 598 } else { 599 // Load the global's address from the constant pool. 600 MIB->setDesc(TII.get(ARM::LDRi12)); 601 MIB->RemoveOperand(1); 602 addOpsForConstantPoolLoad(MIB, GV, /*IsSBREL*/ false); 603 } 604 } else if (STI.isTargetMachO()) { 605 if (UseMovt) 606 MIB->setDesc(TII.get(ARM::MOVi32imm)); 607 else 608 MIB->setDesc(TII.get(ARM::LDRLIT_ga_abs)); 609 } else { 610 DEBUG(dbgs() << "Object format not supported yet\n"); 611 return false; 612 } 613 614 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 615 } 616 617 bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB, 618 MachineRegisterInfo &MRI) const { 619 auto &MBB = *MIB->getParent(); 620 auto InsertBefore = std::next(MIB->getIterator()); 621 auto &DbgLoc = MIB->getDebugLoc(); 622 623 // Compare the condition to 0. 624 auto CondReg = MIB->getOperand(1).getReg(); 625 assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) && 626 "Unsupported types for select operation"); 627 auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(ARM::CMPri)) 628 .addUse(CondReg) 629 .addImm(0) 630 .add(predOps(ARMCC::AL)); 631 if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI)) 632 return false; 633 634 // Move a value into the result register based on the result of the 635 // comparison. 636 auto ResReg = MIB->getOperand(0).getReg(); 637 auto TrueReg = MIB->getOperand(2).getReg(); 638 auto FalseReg = MIB->getOperand(3).getReg(); 639 assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) && 640 validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) && 641 "Unsupported types for select operation"); 642 auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(ARM::MOVCCr)) 643 .addDef(ResReg) 644 .addUse(TrueReg) 645 .addUse(FalseReg) 646 .add(predOps(ARMCC::EQ, ARM::CPSR)); 647 if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI)) 648 return false; 649 650 MIB->eraseFromParent(); 651 return true; 652 } 653 654 bool ARMInstructionSelector::selectShift(unsigned ShiftOpc, 655 MachineInstrBuilder &MIB) const { 656 MIB->setDesc(TII.get(ARM::MOVsr)); 657 MIB.addImm(ShiftOpc); 658 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 659 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 660 } 661 662 bool ARMInstructionSelector::select(MachineInstr &I, 663 CodeGenCoverage &CoverageInfo) const { 664 assert(I.getParent() && "Instruction should be in a basic block!"); 665 assert(I.getParent()->getParent() && "Instruction should be in a function!"); 666 667 auto &MBB = *I.getParent(); 668 auto &MF = *MBB.getParent(); 669 auto &MRI = MF.getRegInfo(); 670 671 if (!isPreISelGenericOpcode(I.getOpcode())) { 672 if (I.isCopy()) 673 return selectCopy(I, TII, MRI, TRI, RBI); 674 675 return true; 676 } 677 678 using namespace TargetOpcode; 679 680 if (selectImpl(I, CoverageInfo)) 681 return true; 682 683 MachineInstrBuilder MIB{MF, I}; 684 bool isSExt = false; 685 686 switch (I.getOpcode()) { 687 case G_SEXT: 688 isSExt = true; 689 LLVM_FALLTHROUGH; 690 case G_ZEXT: { 691 LLT DstTy = MRI.getType(I.getOperand(0).getReg()); 692 // FIXME: Smaller destination sizes coming soon! 693 if (DstTy.getSizeInBits() != 32) { 694 DEBUG(dbgs() << "Unsupported destination size for extension"); 695 return false; 696 } 697 698 LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); 699 unsigned SrcSize = SrcTy.getSizeInBits(); 700 switch (SrcSize) { 701 case 1: { 702 // ZExt boils down to & 0x1; for SExt we also subtract that from 0 703 I.setDesc(TII.get(ARM::ANDri)); 704 MIB.addImm(1).add(predOps(ARMCC::AL)).add(condCodeOp()); 705 706 if (isSExt) { 707 unsigned SExtResult = I.getOperand(0).getReg(); 708 709 // Use a new virtual register for the result of the AND 710 unsigned AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass); 711 I.getOperand(0).setReg(AndResult); 712 713 auto InsertBefore = std::next(I.getIterator()); 714 auto SubI = 715 BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::RSBri)) 716 .addDef(SExtResult) 717 .addUse(AndResult) 718 .addImm(0) 719 .add(predOps(ARMCC::AL)) 720 .add(condCodeOp()); 721 if (!constrainSelectedInstRegOperands(*SubI, TII, TRI, RBI)) 722 return false; 723 } 724 break; 725 } 726 case 8: 727 case 16: { 728 unsigned NewOpc = selectSimpleExtOpc(I.getOpcode(), SrcSize); 729 if (NewOpc == I.getOpcode()) 730 return false; 731 I.setDesc(TII.get(NewOpc)); 732 MIB.addImm(0).add(predOps(ARMCC::AL)); 733 break; 734 } 735 default: 736 DEBUG(dbgs() << "Unsupported source size for extension"); 737 return false; 738 } 739 break; 740 } 741 case G_ANYEXT: 742 case G_TRUNC: { 743 // The high bits are undefined, so there's nothing special to do, just 744 // treat it as a copy. 745 auto SrcReg = I.getOperand(1).getReg(); 746 auto DstReg = I.getOperand(0).getReg(); 747 748 const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); 749 const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); 750 751 if (SrcRegBank.getID() == ARM::FPRRegBankID) { 752 // This should only happen in the obscure case where we have put a 64-bit 753 // integer into a D register. Get it out of there and keep only the 754 // interesting part. 755 assert(I.getOpcode() == G_TRUNC && "Unsupported operand for G_ANYEXT"); 756 assert(DstRegBank.getID() == ARM::GPRRegBankID && 757 "Unsupported combination of register banks"); 758 assert(MRI.getType(SrcReg).getSizeInBits() == 64 && "Unsupported size"); 759 assert(MRI.getType(DstReg).getSizeInBits() <= 32 && "Unsupported size"); 760 761 unsigned IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass); 762 auto InsertBefore = std::next(I.getIterator()); 763 auto MovI = 764 BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::VMOVRRD)) 765 .addDef(DstReg) 766 .addDef(IgnoredBits) 767 .addUse(SrcReg) 768 .add(predOps(ARMCC::AL)); 769 if (!constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI)) 770 return false; 771 772 MIB->eraseFromParent(); 773 return true; 774 } 775 776 if (SrcRegBank.getID() != DstRegBank.getID()) { 777 DEBUG(dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n"); 778 return false; 779 } 780 781 if (SrcRegBank.getID() != ARM::GPRRegBankID) { 782 DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n"); 783 return false; 784 } 785 786 I.setDesc(TII.get(COPY)); 787 return selectCopy(I, TII, MRI, TRI, RBI); 788 } 789 case G_CONSTANT: { 790 if (!MRI.getType(I.getOperand(0).getReg()).isPointer()) { 791 // Non-pointer constants should be handled by TableGen. 792 DEBUG(dbgs() << "Unsupported constant type\n"); 793 return false; 794 } 795 796 auto &Val = I.getOperand(1); 797 if (Val.isCImm()) { 798 if (!Val.getCImm()->isZero()) { 799 DEBUG(dbgs() << "Unsupported pointer constant value\n"); 800 return false; 801 } 802 Val.ChangeToImmediate(0); 803 } else { 804 assert(Val.isImm() && "Unexpected operand for G_CONSTANT"); 805 if (Val.getImm() != 0) { 806 DEBUG(dbgs() << "Unsupported pointer constant value\n"); 807 return false; 808 } 809 } 810 811 I.setDesc(TII.get(ARM::MOVi)); 812 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 813 break; 814 } 815 case G_INTTOPTR: 816 case G_PTRTOINT: { 817 auto SrcReg = I.getOperand(1).getReg(); 818 auto DstReg = I.getOperand(0).getReg(); 819 820 const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); 821 const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); 822 823 if (SrcRegBank.getID() != DstRegBank.getID()) { 824 DEBUG(dbgs() 825 << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n"); 826 return false; 827 } 828 829 if (SrcRegBank.getID() != ARM::GPRRegBankID) { 830 DEBUG(dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n"); 831 return false; 832 } 833 834 I.setDesc(TII.get(COPY)); 835 return selectCopy(I, TII, MRI, TRI, RBI); 836 } 837 case G_SELECT: 838 return selectSelect(MIB, MRI); 839 case G_ICMP: { 840 CmpConstants Helper(ARM::CMPrr, ARM::INSTRUCTION_LIST_END, 841 ARM::GPRRegBankID, 32); 842 return selectCmp(Helper, MIB, MRI); 843 } 844 case G_FCMP: { 845 assert(STI.hasVFP2() && "Can't select fcmp without VFP"); 846 847 unsigned OpReg = I.getOperand(2).getReg(); 848 unsigned Size = MRI.getType(OpReg).getSizeInBits(); 849 850 if (Size == 64 && STI.isFPOnlySP()) { 851 DEBUG(dbgs() << "Subtarget only supports single precision"); 852 return false; 853 } 854 if (Size != 32 && Size != 64) { 855 DEBUG(dbgs() << "Unsupported size for G_FCMP operand"); 856 return false; 857 } 858 859 CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT, 860 ARM::FPRRegBankID, Size); 861 return selectCmp(Helper, MIB, MRI); 862 } 863 case G_LSHR: 864 return selectShift(ARM_AM::ShiftOpc::lsr, MIB); 865 case G_ASHR: 866 return selectShift(ARM_AM::ShiftOpc::asr, MIB); 867 case G_SHL: { 868 return selectShift(ARM_AM::ShiftOpc::lsl, MIB); 869 } 870 case G_GEP: 871 I.setDesc(TII.get(ARM::ADDrr)); 872 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 873 break; 874 case G_FRAME_INDEX: 875 // Add 0 to the given frame index and hope it will eventually be folded into 876 // the user(s). 877 I.setDesc(TII.get(ARM::ADDri)); 878 MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp()); 879 break; 880 case G_GLOBAL_VALUE: 881 return selectGlobal(MIB, MRI); 882 case G_STORE: 883 case G_LOAD: { 884 const auto &MemOp = **I.memoperands_begin(); 885 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { 886 DEBUG(dbgs() << "Atomic load/store not supported yet\n"); 887 return false; 888 } 889 890 unsigned Reg = I.getOperand(0).getReg(); 891 unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID(); 892 893 LLT ValTy = MRI.getType(Reg); 894 const auto ValSize = ValTy.getSizeInBits(); 895 896 assert((ValSize != 64 || STI.hasVFP2()) && 897 "Don't know how to load/store 64-bit value without VFP"); 898 899 const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize); 900 if (NewOpc == G_LOAD || NewOpc == G_STORE) 901 return false; 902 903 I.setDesc(TII.get(NewOpc)); 904 905 if (NewOpc == ARM::LDRH || NewOpc == ARM::STRH) 906 // LDRH has a funny addressing mode (there's already a FIXME for it). 907 MIB.addReg(0); 908 MIB.addImm(0).add(predOps(ARMCC::AL)); 909 break; 910 } 911 case G_MERGE_VALUES: { 912 if (!selectMergeValues(MIB, TII, MRI, TRI, RBI)) 913 return false; 914 break; 915 } 916 case G_UNMERGE_VALUES: { 917 if (!selectUnmergeValues(MIB, TII, MRI, TRI, RBI)) 918 return false; 919 break; 920 } 921 case G_BRCOND: { 922 if (!validReg(MRI, I.getOperand(0).getReg(), 1, ARM::GPRRegBankID)) { 923 DEBUG(dbgs() << "Unsupported condition register for G_BRCOND"); 924 return false; 925 } 926 927 // Set the flags. 928 auto Test = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ARM::TSTri)) 929 .addReg(I.getOperand(0).getReg()) 930 .addImm(1) 931 .add(predOps(ARMCC::AL)); 932 if (!constrainSelectedInstRegOperands(*Test, TII, TRI, RBI)) 933 return false; 934 935 // Branch conditionally. 936 auto Branch = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ARM::Bcc)) 937 .add(I.getOperand(1)) 938 .add(predOps(ARMCC::NE, ARM::CPSR)); 939 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI)) 940 return false; 941 I.eraseFromParent(); 942 return true; 943 } 944 case G_PHI: { 945 I.setDesc(TII.get(PHI)); 946 947 unsigned DstReg = I.getOperand(0).getReg(); 948 const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI); 949 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 950 break; 951 } 952 953 return true; 954 } 955 default: 956 return false; 957 } 958 959 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 960 } 961