1 //===- ARMInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for ARM. 10 /// \todo This should be generated by TableGen. 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMRegisterBankInfo.h" 14 #include "ARMSubtarget.h" 15 #include "ARMTargetMachine.h" 16 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 17 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 18 #include "llvm/CodeGen/MachineConstantPool.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/Support/Debug.h" 21 22 #define DEBUG_TYPE "arm-isel" 23 24 using namespace llvm; 25 26 namespace { 27 28 #define GET_GLOBALISEL_PREDICATE_BITSET 29 #include "ARMGenGlobalISel.inc" 30 #undef GET_GLOBALISEL_PREDICATE_BITSET 31 32 class ARMInstructionSelector : public InstructionSelector { 33 public: 34 ARMInstructionSelector(const ARMBaseTargetMachine &TM, const ARMSubtarget &STI, 35 const ARMRegisterBankInfo &RBI); 36 37 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override; 38 static const char *getName() { return DEBUG_TYPE; } 39 40 private: 41 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 42 43 struct CmpConstants; 44 struct InsertInfo; 45 46 bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB, 47 MachineRegisterInfo &MRI) const; 48 49 // Helper for inserting a comparison sequence that sets \p ResReg to either 1 50 // if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or 51 // \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS). 52 bool insertComparison(CmpConstants Helper, InsertInfo I, unsigned ResReg, 53 ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg, 54 unsigned PrevRes) const; 55 56 // Set \p DestReg to \p Constant. 57 void putConstant(InsertInfo I, unsigned DestReg, unsigned Constant) const; 58 59 bool selectGlobal(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 60 bool selectSelect(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 61 bool selectShift(unsigned ShiftOpc, MachineInstrBuilder &MIB) const; 62 63 // Check if the types match and both operands have the expected size and 64 // register bank. 65 bool validOpRegPair(MachineRegisterInfo &MRI, unsigned LHS, unsigned RHS, 66 unsigned ExpectedSize, unsigned ExpectedRegBankID) const; 67 68 // Check if the register has the expected size and register bank. 69 bool validReg(MachineRegisterInfo &MRI, unsigned Reg, unsigned ExpectedSize, 70 unsigned ExpectedRegBankID) const; 71 72 const ARMBaseInstrInfo &TII; 73 const ARMBaseRegisterInfo &TRI; 74 const ARMBaseTargetMachine &TM; 75 const ARMRegisterBankInfo &RBI; 76 const ARMSubtarget &STI; 77 78 // Store the opcodes that we might need, so we don't have to check what kind 79 // of subtarget (ARM vs Thumb) we have all the time. 80 struct OpcodeCache { 81 unsigned ZEXT16; 82 unsigned SEXT16; 83 84 unsigned ZEXT8; 85 unsigned SEXT8; 86 87 // Used for implementing ZEXT/SEXT from i1 88 unsigned AND; 89 unsigned RSB; 90 91 unsigned STORE32; 92 unsigned LOAD32; 93 94 unsigned STORE16; 95 unsigned LOAD16; 96 97 unsigned STORE8; 98 unsigned LOAD8; 99 100 unsigned CMPrr; 101 unsigned MOVi; 102 unsigned MOVCCi; 103 104 OpcodeCache(const ARMSubtarget &STI); 105 } const Opcodes; 106 107 // Select the opcode for simple extensions (that translate to a single SXT/UXT 108 // instruction). Extension operations more complicated than that should not 109 // invoke this. Returns the original opcode if it doesn't know how to select a 110 // better one. 111 unsigned selectSimpleExtOpc(unsigned Opc, unsigned Size) const; 112 113 // Select the opcode for simple loads and stores. Returns the original opcode 114 // if it doesn't know how to select a better one. 115 unsigned selectLoadStoreOpCode(unsigned Opc, unsigned RegBank, 116 unsigned Size) const; 117 118 #define GET_GLOBALISEL_PREDICATES_DECL 119 #include "ARMGenGlobalISel.inc" 120 #undef GET_GLOBALISEL_PREDICATES_DECL 121 122 // We declare the temporaries used by selectImpl() in the class to minimize the 123 // cost of constructing placeholder values. 124 #define GET_GLOBALISEL_TEMPORARIES_DECL 125 #include "ARMGenGlobalISel.inc" 126 #undef GET_GLOBALISEL_TEMPORARIES_DECL 127 }; 128 } // end anonymous namespace 129 130 namespace llvm { 131 InstructionSelector * 132 createARMInstructionSelector(const ARMBaseTargetMachine &TM, 133 const ARMSubtarget &STI, 134 const ARMRegisterBankInfo &RBI) { 135 return new ARMInstructionSelector(TM, STI, RBI); 136 } 137 } 138 139 const unsigned zero_reg = 0; 140 141 #define GET_GLOBALISEL_IMPL 142 #include "ARMGenGlobalISel.inc" 143 #undef GET_GLOBALISEL_IMPL 144 145 ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine &TM, 146 const ARMSubtarget &STI, 147 const ARMRegisterBankInfo &RBI) 148 : InstructionSelector(), TII(*STI.getInstrInfo()), 149 TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI), STI(STI), Opcodes(STI), 150 #define GET_GLOBALISEL_PREDICATES_INIT 151 #include "ARMGenGlobalISel.inc" 152 #undef GET_GLOBALISEL_PREDICATES_INIT 153 #define GET_GLOBALISEL_TEMPORARIES_INIT 154 #include "ARMGenGlobalISel.inc" 155 #undef GET_GLOBALISEL_TEMPORARIES_INIT 156 { 157 } 158 159 static const TargetRegisterClass *guessRegClass(unsigned Reg, 160 MachineRegisterInfo &MRI, 161 const TargetRegisterInfo &TRI, 162 const RegisterBankInfo &RBI) { 163 const RegisterBank *RegBank = RBI.getRegBank(Reg, MRI, TRI); 164 assert(RegBank && "Can't get reg bank for virtual register"); 165 166 const unsigned Size = MRI.getType(Reg).getSizeInBits(); 167 assert((RegBank->getID() == ARM::GPRRegBankID || 168 RegBank->getID() == ARM::FPRRegBankID) && 169 "Unsupported reg bank"); 170 171 if (RegBank->getID() == ARM::FPRRegBankID) { 172 if (Size == 32) 173 return &ARM::SPRRegClass; 174 else if (Size == 64) 175 return &ARM::DPRRegClass; 176 else if (Size == 128) 177 return &ARM::QPRRegClass; 178 else 179 llvm_unreachable("Unsupported destination size"); 180 } 181 182 return &ARM::GPRRegClass; 183 } 184 185 static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, 186 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, 187 const RegisterBankInfo &RBI) { 188 unsigned DstReg = I.getOperand(0).getReg(); 189 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 190 return true; 191 192 const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI); 193 194 // No need to constrain SrcReg. It will get constrained when 195 // we hit another of its uses or its defs. 196 // Copies do not have constraints. 197 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 198 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 199 << " operand\n"); 200 return false; 201 } 202 return true; 203 } 204 205 static bool selectMergeValues(MachineInstrBuilder &MIB, 206 const ARMBaseInstrInfo &TII, 207 MachineRegisterInfo &MRI, 208 const TargetRegisterInfo &TRI, 209 const RegisterBankInfo &RBI) { 210 assert(TII.getSubtarget().hasVFP2() && "Can't select merge without VFP"); 211 212 // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs 213 // into one DPR. 214 unsigned VReg0 = MIB->getOperand(0).getReg(); 215 (void)VReg0; 216 assert(MRI.getType(VReg0).getSizeInBits() == 64 && 217 RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::FPRRegBankID && 218 "Unsupported operand for G_MERGE_VALUES"); 219 unsigned VReg1 = MIB->getOperand(1).getReg(); 220 (void)VReg1; 221 assert(MRI.getType(VReg1).getSizeInBits() == 32 && 222 RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && 223 "Unsupported operand for G_MERGE_VALUES"); 224 unsigned VReg2 = MIB->getOperand(2).getReg(); 225 (void)VReg2; 226 assert(MRI.getType(VReg2).getSizeInBits() == 32 && 227 RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::GPRRegBankID && 228 "Unsupported operand for G_MERGE_VALUES"); 229 230 MIB->setDesc(TII.get(ARM::VMOVDRR)); 231 MIB.add(predOps(ARMCC::AL)); 232 233 return true; 234 } 235 236 static bool selectUnmergeValues(MachineInstrBuilder &MIB, 237 const ARMBaseInstrInfo &TII, 238 MachineRegisterInfo &MRI, 239 const TargetRegisterInfo &TRI, 240 const RegisterBankInfo &RBI) { 241 assert(TII.getSubtarget().hasVFP2() && "Can't select unmerge without VFP"); 242 243 // We only support G_UNMERGE_VALUES as a way to break up one DPR into two 244 // GPRs. 245 unsigned VReg0 = MIB->getOperand(0).getReg(); 246 (void)VReg0; 247 assert(MRI.getType(VReg0).getSizeInBits() == 32 && 248 RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::GPRRegBankID && 249 "Unsupported operand for G_UNMERGE_VALUES"); 250 unsigned VReg1 = MIB->getOperand(1).getReg(); 251 (void)VReg1; 252 assert(MRI.getType(VReg1).getSizeInBits() == 32 && 253 RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && 254 "Unsupported operand for G_UNMERGE_VALUES"); 255 unsigned VReg2 = MIB->getOperand(2).getReg(); 256 (void)VReg2; 257 assert(MRI.getType(VReg2).getSizeInBits() == 64 && 258 RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::FPRRegBankID && 259 "Unsupported operand for G_UNMERGE_VALUES"); 260 261 MIB->setDesc(TII.get(ARM::VMOVRRD)); 262 MIB.add(predOps(ARMCC::AL)); 263 264 return true; 265 } 266 267 ARMInstructionSelector::OpcodeCache::OpcodeCache(const ARMSubtarget &STI) { 268 bool isThumb = STI.isThumb(); 269 270 using namespace TargetOpcode; 271 272 #define STORE_OPCODE(VAR, OPC) VAR = isThumb ? ARM::t2##OPC : ARM::OPC 273 STORE_OPCODE(SEXT16, SXTH); 274 STORE_OPCODE(ZEXT16, UXTH); 275 276 STORE_OPCODE(SEXT8, SXTB); 277 STORE_OPCODE(ZEXT8, UXTB); 278 279 STORE_OPCODE(AND, ANDri); 280 STORE_OPCODE(RSB, RSBri); 281 282 STORE_OPCODE(STORE32, STRi12); 283 STORE_OPCODE(LOAD32, LDRi12); 284 285 // LDRH/STRH are special... 286 STORE16 = isThumb ? ARM::t2STRHi12 : ARM::STRH; 287 LOAD16 = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 288 289 STORE_OPCODE(STORE8, STRBi12); 290 STORE_OPCODE(LOAD8, LDRBi12); 291 292 STORE_OPCODE(CMPrr, CMPrr); 293 STORE_OPCODE(MOVi, MOVi); 294 STORE_OPCODE(MOVCCi, MOVCCi); 295 #undef MAP_OPCODE 296 } 297 298 unsigned ARMInstructionSelector::selectSimpleExtOpc(unsigned Opc, 299 unsigned Size) const { 300 using namespace TargetOpcode; 301 302 if (Size != 8 && Size != 16) 303 return Opc; 304 305 if (Opc == G_SEXT) 306 return Size == 8 ? Opcodes.SEXT8 : Opcodes.SEXT16; 307 308 if (Opc == G_ZEXT) 309 return Size == 8 ? Opcodes.ZEXT8 : Opcodes.ZEXT16; 310 311 return Opc; 312 } 313 314 unsigned ARMInstructionSelector::selectLoadStoreOpCode(unsigned Opc, 315 unsigned RegBank, 316 unsigned Size) const { 317 bool isStore = Opc == TargetOpcode::G_STORE; 318 319 if (RegBank == ARM::GPRRegBankID) { 320 switch (Size) { 321 case 1: 322 case 8: 323 return isStore ? Opcodes.STORE8 : Opcodes.LOAD8; 324 case 16: 325 return isStore ? Opcodes.STORE16 : Opcodes.LOAD16; 326 case 32: 327 return isStore ? Opcodes.STORE32 : Opcodes.LOAD32; 328 default: 329 return Opc; 330 } 331 } 332 333 if (RegBank == ARM::FPRRegBankID) { 334 switch (Size) { 335 case 32: 336 return isStore ? ARM::VSTRS : ARM::VLDRS; 337 case 64: 338 return isStore ? ARM::VSTRD : ARM::VLDRD; 339 default: 340 return Opc; 341 } 342 } 343 344 return Opc; 345 } 346 347 // When lowering comparisons, we sometimes need to perform two compares instead 348 // of just one. Get the condition codes for both comparisons. If only one is 349 // needed, the second member of the pair is ARMCC::AL. 350 static std::pair<ARMCC::CondCodes, ARMCC::CondCodes> 351 getComparePreds(CmpInst::Predicate Pred) { 352 std::pair<ARMCC::CondCodes, ARMCC::CondCodes> Preds = {ARMCC::AL, ARMCC::AL}; 353 switch (Pred) { 354 case CmpInst::FCMP_ONE: 355 Preds = {ARMCC::GT, ARMCC::MI}; 356 break; 357 case CmpInst::FCMP_UEQ: 358 Preds = {ARMCC::EQ, ARMCC::VS}; 359 break; 360 case CmpInst::ICMP_EQ: 361 case CmpInst::FCMP_OEQ: 362 Preds.first = ARMCC::EQ; 363 break; 364 case CmpInst::ICMP_SGT: 365 case CmpInst::FCMP_OGT: 366 Preds.first = ARMCC::GT; 367 break; 368 case CmpInst::ICMP_SGE: 369 case CmpInst::FCMP_OGE: 370 Preds.first = ARMCC::GE; 371 break; 372 case CmpInst::ICMP_UGT: 373 case CmpInst::FCMP_UGT: 374 Preds.first = ARMCC::HI; 375 break; 376 case CmpInst::FCMP_OLT: 377 Preds.first = ARMCC::MI; 378 break; 379 case CmpInst::ICMP_ULE: 380 case CmpInst::FCMP_OLE: 381 Preds.first = ARMCC::LS; 382 break; 383 case CmpInst::FCMP_ORD: 384 Preds.first = ARMCC::VC; 385 break; 386 case CmpInst::FCMP_UNO: 387 Preds.first = ARMCC::VS; 388 break; 389 case CmpInst::FCMP_UGE: 390 Preds.first = ARMCC::PL; 391 break; 392 case CmpInst::ICMP_SLT: 393 case CmpInst::FCMP_ULT: 394 Preds.first = ARMCC::LT; 395 break; 396 case CmpInst::ICMP_SLE: 397 case CmpInst::FCMP_ULE: 398 Preds.first = ARMCC::LE; 399 break; 400 case CmpInst::FCMP_UNE: 401 case CmpInst::ICMP_NE: 402 Preds.first = ARMCC::NE; 403 break; 404 case CmpInst::ICMP_UGE: 405 Preds.first = ARMCC::HS; 406 break; 407 case CmpInst::ICMP_ULT: 408 Preds.first = ARMCC::LO; 409 break; 410 default: 411 break; 412 } 413 assert(Preds.first != ARMCC::AL && "No comparisons needed?"); 414 return Preds; 415 } 416 417 struct ARMInstructionSelector::CmpConstants { 418 CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned SelectOpcode, 419 unsigned OpRegBank, unsigned OpSize) 420 : ComparisonOpcode(CmpOpcode), ReadFlagsOpcode(FlagsOpcode), 421 SelectResultOpcode(SelectOpcode), OperandRegBankID(OpRegBank), 422 OperandSize(OpSize) {} 423 424 // The opcode used for performing the comparison. 425 const unsigned ComparisonOpcode; 426 427 // The opcode used for reading the flags set by the comparison. May be 428 // ARM::INSTRUCTION_LIST_END if we don't need to read the flags. 429 const unsigned ReadFlagsOpcode; 430 431 // The opcode used for materializing the result of the comparison. 432 const unsigned SelectResultOpcode; 433 434 // The assumed register bank ID for the operands. 435 const unsigned OperandRegBankID; 436 437 // The assumed size in bits for the operands. 438 const unsigned OperandSize; 439 }; 440 441 struct ARMInstructionSelector::InsertInfo { 442 InsertInfo(MachineInstrBuilder &MIB) 443 : MBB(*MIB->getParent()), InsertBefore(std::next(MIB->getIterator())), 444 DbgLoc(MIB->getDebugLoc()) {} 445 446 MachineBasicBlock &MBB; 447 const MachineBasicBlock::instr_iterator InsertBefore; 448 const DebugLoc &DbgLoc; 449 }; 450 451 void ARMInstructionSelector::putConstant(InsertInfo I, unsigned DestReg, 452 unsigned Constant) const { 453 (void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Opcodes.MOVi)) 454 .addDef(DestReg) 455 .addImm(Constant) 456 .add(predOps(ARMCC::AL)) 457 .add(condCodeOp()); 458 } 459 460 bool ARMInstructionSelector::validOpRegPair(MachineRegisterInfo &MRI, 461 unsigned LHSReg, unsigned RHSReg, 462 unsigned ExpectedSize, 463 unsigned ExpectedRegBankID) const { 464 return MRI.getType(LHSReg) == MRI.getType(RHSReg) && 465 validReg(MRI, LHSReg, ExpectedSize, ExpectedRegBankID) && 466 validReg(MRI, RHSReg, ExpectedSize, ExpectedRegBankID); 467 } 468 469 bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg, 470 unsigned ExpectedSize, 471 unsigned ExpectedRegBankID) const { 472 if (MRI.getType(Reg).getSizeInBits() != ExpectedSize) { 473 LLVM_DEBUG(dbgs() << "Unexpected size for register"); 474 return false; 475 } 476 477 if (RBI.getRegBank(Reg, MRI, TRI)->getID() != ExpectedRegBankID) { 478 LLVM_DEBUG(dbgs() << "Unexpected register bank for register"); 479 return false; 480 } 481 482 return true; 483 } 484 485 bool ARMInstructionSelector::selectCmp(CmpConstants Helper, 486 MachineInstrBuilder &MIB, 487 MachineRegisterInfo &MRI) const { 488 const InsertInfo I(MIB); 489 490 auto ResReg = MIB->getOperand(0).getReg(); 491 if (!validReg(MRI, ResReg, 1, ARM::GPRRegBankID)) 492 return false; 493 494 auto Cond = 495 static_cast<CmpInst::Predicate>(MIB->getOperand(1).getPredicate()); 496 if (Cond == CmpInst::FCMP_TRUE || Cond == CmpInst::FCMP_FALSE) { 497 putConstant(I, ResReg, Cond == CmpInst::FCMP_TRUE ? 1 : 0); 498 MIB->eraseFromParent(); 499 return true; 500 } 501 502 auto LHSReg = MIB->getOperand(2).getReg(); 503 auto RHSReg = MIB->getOperand(3).getReg(); 504 if (!validOpRegPair(MRI, LHSReg, RHSReg, Helper.OperandSize, 505 Helper.OperandRegBankID)) 506 return false; 507 508 auto ARMConds = getComparePreds(Cond); 509 auto ZeroReg = MRI.createVirtualRegister(&ARM::GPRRegClass); 510 putConstant(I, ZeroReg, 0); 511 512 if (ARMConds.second == ARMCC::AL) { 513 // Simple case, we only need one comparison and we're done. 514 if (!insertComparison(Helper, I, ResReg, ARMConds.first, LHSReg, RHSReg, 515 ZeroReg)) 516 return false; 517 } else { 518 // Not so simple, we need two successive comparisons. 519 auto IntermediateRes = MRI.createVirtualRegister(&ARM::GPRRegClass); 520 if (!insertComparison(Helper, I, IntermediateRes, ARMConds.first, LHSReg, 521 RHSReg, ZeroReg)) 522 return false; 523 if (!insertComparison(Helper, I, ResReg, ARMConds.second, LHSReg, RHSReg, 524 IntermediateRes)) 525 return false; 526 } 527 528 MIB->eraseFromParent(); 529 return true; 530 } 531 532 bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I, 533 unsigned ResReg, 534 ARMCC::CondCodes Cond, 535 unsigned LHSReg, unsigned RHSReg, 536 unsigned PrevRes) const { 537 // Perform the comparison. 538 auto CmpI = 539 BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Helper.ComparisonOpcode)) 540 .addUse(LHSReg) 541 .addUse(RHSReg) 542 .add(predOps(ARMCC::AL)); 543 if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI)) 544 return false; 545 546 // Read the comparison flags (if necessary). 547 if (Helper.ReadFlagsOpcode != ARM::INSTRUCTION_LIST_END) { 548 auto ReadI = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, 549 TII.get(Helper.ReadFlagsOpcode)) 550 .add(predOps(ARMCC::AL)); 551 if (!constrainSelectedInstRegOperands(*ReadI, TII, TRI, RBI)) 552 return false; 553 } 554 555 // Select either 1 or the previous result based on the value of the flags. 556 auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, 557 TII.get(Helper.SelectResultOpcode)) 558 .addDef(ResReg) 559 .addUse(PrevRes) 560 .addImm(1) 561 .add(predOps(Cond, ARM::CPSR)); 562 if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI)) 563 return false; 564 565 return true; 566 } 567 568 bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, 569 MachineRegisterInfo &MRI) const { 570 if ((STI.isROPI() || STI.isRWPI()) && !STI.isTargetELF()) { 571 LLVM_DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n"); 572 return false; 573 } 574 575 auto GV = MIB->getOperand(1).getGlobal(); 576 if (GV->isThreadLocal()) { 577 LLVM_DEBUG(dbgs() << "TLS variables not supported yet\n"); 578 return false; 579 } 580 581 auto &MBB = *MIB->getParent(); 582 auto &MF = *MBB.getParent(); 583 584 bool UseMovt = STI.useMovt(); 585 586 unsigned Size = TM.getPointerSize(0); 587 unsigned Alignment = 4; 588 589 auto addOpsForConstantPoolLoad = [&MF, Alignment, 590 Size](MachineInstrBuilder &MIB, 591 const GlobalValue *GV, bool IsSBREL) { 592 assert(MIB->getOpcode() == ARM::LDRi12 && "Unsupported instruction"); 593 auto ConstPool = MF.getConstantPool(); 594 auto CPIndex = 595 // For SB relative entries we need a target-specific constant pool. 596 // Otherwise, just use a regular constant pool entry. 597 IsSBREL 598 ? ConstPool->getConstantPoolIndex( 599 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment) 600 : ConstPool->getConstantPoolIndex(GV, Alignment); 601 MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) 602 .addMemOperand( 603 MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF), 604 MachineMemOperand::MOLoad, Size, Alignment)) 605 .addImm(0) 606 .add(predOps(ARMCC::AL)); 607 }; 608 609 if (TM.isPositionIndependent()) { 610 bool Indirect = STI.isGVIndirectSymbol(GV); 611 // FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't 612 // support it yet. See PR28229. 613 unsigned Opc = 614 UseMovt && !STI.isTargetELF() 615 ? (Indirect ? ARM::MOV_ga_pcrel_ldr : ARM::MOV_ga_pcrel) 616 : (Indirect ? ARM::LDRLIT_ga_pcrel_ldr : ARM::LDRLIT_ga_pcrel); 617 MIB->setDesc(TII.get(Opc)); 618 619 int TargetFlags = ARMII::MO_NO_FLAG; 620 if (STI.isTargetDarwin()) 621 TargetFlags |= ARMII::MO_NONLAZY; 622 if (STI.isGVInGOT(GV)) 623 TargetFlags |= ARMII::MO_GOT; 624 MIB->getOperand(1).setTargetFlags(TargetFlags); 625 626 if (Indirect) 627 MIB.addMemOperand(MF.getMachineMemOperand( 628 MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, 629 TM.getProgramPointerSize(), Alignment)); 630 631 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 632 } 633 634 bool isReadOnly = STI.getTargetLowering()->isReadOnly(GV); 635 if (STI.isROPI() && isReadOnly) { 636 unsigned Opc = UseMovt ? ARM::MOV_ga_pcrel : ARM::LDRLIT_ga_pcrel; 637 MIB->setDesc(TII.get(Opc)); 638 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 639 } 640 if (STI.isRWPI() && !isReadOnly) { 641 auto Offset = MRI.createVirtualRegister(&ARM::GPRRegClass); 642 MachineInstrBuilder OffsetMIB; 643 if (UseMovt) { 644 OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), 645 TII.get(ARM::MOVi32imm), Offset); 646 OffsetMIB.addGlobalAddress(GV, /*Offset*/ 0, ARMII::MO_SBREL); 647 } else { 648 // Load the offset from the constant pool. 649 OffsetMIB = 650 BuildMI(MBB, *MIB, MIB->getDebugLoc(), TII.get(ARM::LDRi12), Offset); 651 addOpsForConstantPoolLoad(OffsetMIB, GV, /*IsSBREL*/ true); 652 } 653 if (!constrainSelectedInstRegOperands(*OffsetMIB, TII, TRI, RBI)) 654 return false; 655 656 // Add the offset to the SB register. 657 MIB->setDesc(TII.get(ARM::ADDrr)); 658 MIB->RemoveOperand(1); 659 MIB.addReg(ARM::R9) // FIXME: don't hardcode R9 660 .addReg(Offset) 661 .add(predOps(ARMCC::AL)) 662 .add(condCodeOp()); 663 664 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 665 } 666 667 if (STI.isTargetELF()) { 668 if (UseMovt) { 669 MIB->setDesc(TII.get(ARM::MOVi32imm)); 670 } else { 671 // Load the global's address from the constant pool. 672 MIB->setDesc(TII.get(ARM::LDRi12)); 673 MIB->RemoveOperand(1); 674 addOpsForConstantPoolLoad(MIB, GV, /*IsSBREL*/ false); 675 } 676 } else if (STI.isTargetMachO()) { 677 if (UseMovt) 678 MIB->setDesc(TII.get(ARM::MOVi32imm)); 679 else 680 MIB->setDesc(TII.get(ARM::LDRLIT_ga_abs)); 681 } else { 682 LLVM_DEBUG(dbgs() << "Object format not supported yet\n"); 683 return false; 684 } 685 686 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 687 } 688 689 bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB, 690 MachineRegisterInfo &MRI) const { 691 auto &MBB = *MIB->getParent(); 692 auto InsertBefore = std::next(MIB->getIterator()); 693 auto &DbgLoc = MIB->getDebugLoc(); 694 695 // Compare the condition to 0. 696 auto CondReg = MIB->getOperand(1).getReg(); 697 assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) && 698 "Unsupported types for select operation"); 699 auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(ARM::CMPri)) 700 .addUse(CondReg) 701 .addImm(0) 702 .add(predOps(ARMCC::AL)); 703 if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI)) 704 return false; 705 706 // Move a value into the result register based on the result of the 707 // comparison. 708 auto ResReg = MIB->getOperand(0).getReg(); 709 auto TrueReg = MIB->getOperand(2).getReg(); 710 auto FalseReg = MIB->getOperand(3).getReg(); 711 assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) && 712 validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) && 713 "Unsupported types for select operation"); 714 auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(ARM::MOVCCr)) 715 .addDef(ResReg) 716 .addUse(TrueReg) 717 .addUse(FalseReg) 718 .add(predOps(ARMCC::EQ, ARM::CPSR)); 719 if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI)) 720 return false; 721 722 MIB->eraseFromParent(); 723 return true; 724 } 725 726 bool ARMInstructionSelector::selectShift(unsigned ShiftOpc, 727 MachineInstrBuilder &MIB) const { 728 MIB->setDesc(TII.get(ARM::MOVsr)); 729 MIB.addImm(ShiftOpc); 730 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 731 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 732 } 733 734 bool ARMInstructionSelector::select(MachineInstr &I, 735 CodeGenCoverage &CoverageInfo) const { 736 assert(I.getParent() && "Instruction should be in a basic block!"); 737 assert(I.getParent()->getParent() && "Instruction should be in a function!"); 738 739 auto &MBB = *I.getParent(); 740 auto &MF = *MBB.getParent(); 741 auto &MRI = MF.getRegInfo(); 742 743 if (!isPreISelGenericOpcode(I.getOpcode())) { 744 if (I.isCopy()) 745 return selectCopy(I, TII, MRI, TRI, RBI); 746 747 return true; 748 } 749 750 using namespace TargetOpcode; 751 752 if (selectImpl(I, CoverageInfo)) 753 return true; 754 755 MachineInstrBuilder MIB{MF, I}; 756 bool isSExt = false; 757 758 switch (I.getOpcode()) { 759 case G_SEXT: 760 isSExt = true; 761 LLVM_FALLTHROUGH; 762 case G_ZEXT: { 763 LLT DstTy = MRI.getType(I.getOperand(0).getReg()); 764 // FIXME: Smaller destination sizes coming soon! 765 if (DstTy.getSizeInBits() != 32) { 766 LLVM_DEBUG(dbgs() << "Unsupported destination size for extension"); 767 return false; 768 } 769 770 LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); 771 unsigned SrcSize = SrcTy.getSizeInBits(); 772 switch (SrcSize) { 773 case 1: { 774 // ZExt boils down to & 0x1; for SExt we also subtract that from 0 775 I.setDesc(TII.get(Opcodes.AND)); 776 MIB.addImm(1).add(predOps(ARMCC::AL)).add(condCodeOp()); 777 778 if (isSExt) { 779 unsigned SExtResult = I.getOperand(0).getReg(); 780 781 // Use a new virtual register for the result of the AND 782 unsigned AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass); 783 I.getOperand(0).setReg(AndResult); 784 785 auto InsertBefore = std::next(I.getIterator()); 786 auto SubI = 787 BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.RSB)) 788 .addDef(SExtResult) 789 .addUse(AndResult) 790 .addImm(0) 791 .add(predOps(ARMCC::AL)) 792 .add(condCodeOp()); 793 if (!constrainSelectedInstRegOperands(*SubI, TII, TRI, RBI)) 794 return false; 795 } 796 break; 797 } 798 case 8: 799 case 16: { 800 unsigned NewOpc = selectSimpleExtOpc(I.getOpcode(), SrcSize); 801 if (NewOpc == I.getOpcode()) 802 return false; 803 I.setDesc(TII.get(NewOpc)); 804 MIB.addImm(0).add(predOps(ARMCC::AL)); 805 break; 806 } 807 default: 808 LLVM_DEBUG(dbgs() << "Unsupported source size for extension"); 809 return false; 810 } 811 break; 812 } 813 case G_ANYEXT: 814 case G_TRUNC: { 815 // The high bits are undefined, so there's nothing special to do, just 816 // treat it as a copy. 817 auto SrcReg = I.getOperand(1).getReg(); 818 auto DstReg = I.getOperand(0).getReg(); 819 820 const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); 821 const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); 822 823 if (SrcRegBank.getID() == ARM::FPRRegBankID) { 824 // This should only happen in the obscure case where we have put a 64-bit 825 // integer into a D register. Get it out of there and keep only the 826 // interesting part. 827 assert(I.getOpcode() == G_TRUNC && "Unsupported operand for G_ANYEXT"); 828 assert(DstRegBank.getID() == ARM::GPRRegBankID && 829 "Unsupported combination of register banks"); 830 assert(MRI.getType(SrcReg).getSizeInBits() == 64 && "Unsupported size"); 831 assert(MRI.getType(DstReg).getSizeInBits() <= 32 && "Unsupported size"); 832 833 unsigned IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass); 834 auto InsertBefore = std::next(I.getIterator()); 835 auto MovI = 836 BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::VMOVRRD)) 837 .addDef(DstReg) 838 .addDef(IgnoredBits) 839 .addUse(SrcReg) 840 .add(predOps(ARMCC::AL)); 841 if (!constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI)) 842 return false; 843 844 MIB->eraseFromParent(); 845 return true; 846 } 847 848 if (SrcRegBank.getID() != DstRegBank.getID()) { 849 LLVM_DEBUG( 850 dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n"); 851 return false; 852 } 853 854 if (SrcRegBank.getID() != ARM::GPRRegBankID) { 855 LLVM_DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n"); 856 return false; 857 } 858 859 I.setDesc(TII.get(COPY)); 860 return selectCopy(I, TII, MRI, TRI, RBI); 861 } 862 case G_CONSTANT: { 863 if (!MRI.getType(I.getOperand(0).getReg()).isPointer()) { 864 // Non-pointer constants should be handled by TableGen. 865 LLVM_DEBUG(dbgs() << "Unsupported constant type\n"); 866 return false; 867 } 868 869 auto &Val = I.getOperand(1); 870 if (Val.isCImm()) { 871 if (!Val.getCImm()->isZero()) { 872 LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n"); 873 return false; 874 } 875 Val.ChangeToImmediate(0); 876 } else { 877 assert(Val.isImm() && "Unexpected operand for G_CONSTANT"); 878 if (Val.getImm() != 0) { 879 LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n"); 880 return false; 881 } 882 } 883 884 I.setDesc(TII.get(ARM::MOVi)); 885 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 886 break; 887 } 888 case G_INTTOPTR: 889 case G_PTRTOINT: { 890 auto SrcReg = I.getOperand(1).getReg(); 891 auto DstReg = I.getOperand(0).getReg(); 892 893 const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); 894 const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); 895 896 if (SrcRegBank.getID() != DstRegBank.getID()) { 897 LLVM_DEBUG( 898 dbgs() 899 << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n"); 900 return false; 901 } 902 903 if (SrcRegBank.getID() != ARM::GPRRegBankID) { 904 LLVM_DEBUG( 905 dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n"); 906 return false; 907 } 908 909 I.setDesc(TII.get(COPY)); 910 return selectCopy(I, TII, MRI, TRI, RBI); 911 } 912 case G_SELECT: 913 return selectSelect(MIB, MRI); 914 case G_ICMP: { 915 CmpConstants Helper(Opcodes.CMPrr, ARM::INSTRUCTION_LIST_END, 916 Opcodes.MOVCCi, ARM::GPRRegBankID, 32); 917 return selectCmp(Helper, MIB, MRI); 918 } 919 case G_FCMP: { 920 assert(STI.hasVFP2() && "Can't select fcmp without VFP"); 921 922 unsigned OpReg = I.getOperand(2).getReg(); 923 unsigned Size = MRI.getType(OpReg).getSizeInBits(); 924 925 if (Size == 64 && STI.isFPOnlySP()) { 926 LLVM_DEBUG(dbgs() << "Subtarget only supports single precision"); 927 return false; 928 } 929 if (Size != 32 && Size != 64) { 930 LLVM_DEBUG(dbgs() << "Unsupported size for G_FCMP operand"); 931 return false; 932 } 933 934 CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT, 935 Opcodes.MOVCCi, ARM::FPRRegBankID, Size); 936 return selectCmp(Helper, MIB, MRI); 937 } 938 case G_LSHR: 939 return selectShift(ARM_AM::ShiftOpc::lsr, MIB); 940 case G_ASHR: 941 return selectShift(ARM_AM::ShiftOpc::asr, MIB); 942 case G_SHL: { 943 return selectShift(ARM_AM::ShiftOpc::lsl, MIB); 944 } 945 case G_GEP: 946 I.setDesc(TII.get(STI.isThumb2() ? ARM::t2ADDrr : ARM::ADDrr)); 947 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 948 break; 949 case G_FRAME_INDEX: 950 // Add 0 to the given frame index and hope it will eventually be folded into 951 // the user(s). 952 I.setDesc(TII.get(ARM::ADDri)); 953 MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp()); 954 break; 955 case G_GLOBAL_VALUE: 956 return selectGlobal(MIB, MRI); 957 case G_STORE: 958 case G_LOAD: { 959 const auto &MemOp = **I.memoperands_begin(); 960 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { 961 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n"); 962 return false; 963 } 964 965 unsigned Reg = I.getOperand(0).getReg(); 966 unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID(); 967 968 LLT ValTy = MRI.getType(Reg); 969 const auto ValSize = ValTy.getSizeInBits(); 970 971 assert((ValSize != 64 || STI.hasVFP2()) && 972 "Don't know how to load/store 64-bit value without VFP"); 973 974 const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize); 975 if (NewOpc == G_LOAD || NewOpc == G_STORE) 976 return false; 977 978 I.setDesc(TII.get(NewOpc)); 979 980 if (NewOpc == ARM::LDRH || NewOpc == ARM::STRH) 981 // LDRH has a funny addressing mode (there's already a FIXME for it). 982 MIB.addReg(0); 983 MIB.addImm(0).add(predOps(ARMCC::AL)); 984 break; 985 } 986 case G_MERGE_VALUES: { 987 if (!selectMergeValues(MIB, TII, MRI, TRI, RBI)) 988 return false; 989 break; 990 } 991 case G_UNMERGE_VALUES: { 992 if (!selectUnmergeValues(MIB, TII, MRI, TRI, RBI)) 993 return false; 994 break; 995 } 996 case G_BRCOND: { 997 if (!validReg(MRI, I.getOperand(0).getReg(), 1, ARM::GPRRegBankID)) { 998 LLVM_DEBUG(dbgs() << "Unsupported condition register for G_BRCOND"); 999 return false; 1000 } 1001 1002 // Set the flags. 1003 auto Test = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ARM::TSTri)) 1004 .addReg(I.getOperand(0).getReg()) 1005 .addImm(1) 1006 .add(predOps(ARMCC::AL)); 1007 if (!constrainSelectedInstRegOperands(*Test, TII, TRI, RBI)) 1008 return false; 1009 1010 // Branch conditionally. 1011 auto Branch = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ARM::Bcc)) 1012 .add(I.getOperand(1)) 1013 .add(predOps(ARMCC::NE, ARM::CPSR)); 1014 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI)) 1015 return false; 1016 I.eraseFromParent(); 1017 return true; 1018 } 1019 case G_PHI: { 1020 I.setDesc(TII.get(PHI)); 1021 1022 unsigned DstReg = I.getOperand(0).getReg(); 1023 const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI); 1024 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 1025 break; 1026 } 1027 1028 return true; 1029 } 1030 default: 1031 return false; 1032 } 1033 1034 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 1035 } 1036