1 //===- ARMInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for ARM. 10 /// \todo This should be generated by TableGen. 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMRegisterBankInfo.h" 14 #include "ARMSubtarget.h" 15 #include "ARMTargetMachine.h" 16 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 17 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 18 #include "llvm/CodeGen/MachineConstantPool.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/IR/IntrinsicsARM.h" 21 #include "llvm/Support/Debug.h" 22 23 #define DEBUG_TYPE "arm-isel" 24 25 using namespace llvm; 26 27 namespace { 28 29 #define GET_GLOBALISEL_PREDICATE_BITSET 30 #include "ARMGenGlobalISel.inc" 31 #undef GET_GLOBALISEL_PREDICATE_BITSET 32 33 class ARMInstructionSelector : public InstructionSelector { 34 public: 35 ARMInstructionSelector(const ARMBaseTargetMachine &TM, const ARMSubtarget &STI, 36 const ARMRegisterBankInfo &RBI); 37 38 bool select(MachineInstr &I) override; 39 static const char *getName() { return DEBUG_TYPE; } 40 41 private: 42 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 43 44 struct CmpConstants; 45 struct InsertInfo; 46 47 bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB, 48 MachineRegisterInfo &MRI) const; 49 50 // Helper for inserting a comparison sequence that sets \p ResReg to either 1 51 // if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or 52 // \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS). 53 bool insertComparison(CmpConstants Helper, InsertInfo I, unsigned ResReg, 54 ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg, 55 unsigned PrevRes) const; 56 57 // Set \p DestReg to \p Constant. 58 void putConstant(InsertInfo I, unsigned DestReg, unsigned Constant) const; 59 60 bool selectGlobal(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 61 bool selectSelect(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 62 bool selectShift(unsigned ShiftOpc, MachineInstrBuilder &MIB) const; 63 64 // Check if the types match and both operands have the expected size and 65 // register bank. 66 bool validOpRegPair(MachineRegisterInfo &MRI, unsigned LHS, unsigned RHS, 67 unsigned ExpectedSize, unsigned ExpectedRegBankID) const; 68 69 // Check if the register has the expected size and register bank. 70 bool validReg(MachineRegisterInfo &MRI, unsigned Reg, unsigned ExpectedSize, 71 unsigned ExpectedRegBankID) const; 72 73 const ARMBaseInstrInfo &TII; 74 const ARMBaseRegisterInfo &TRI; 75 const ARMBaseTargetMachine &TM; 76 const ARMRegisterBankInfo &RBI; 77 const ARMSubtarget &STI; 78 79 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel 80 // uses "STI." in the code generated by TableGen. If we want to reuse some of 81 // the custom C++ predicates written for DAGISel, we need to have both around. 82 const ARMSubtarget *Subtarget = &STI; 83 84 // Store the opcodes that we might need, so we don't have to check what kind 85 // of subtarget (ARM vs Thumb) we have all the time. 86 struct OpcodeCache { 87 unsigned ZEXT16; 88 unsigned SEXT16; 89 90 unsigned ZEXT8; 91 unsigned SEXT8; 92 93 // Used for implementing ZEXT/SEXT from i1 94 unsigned AND; 95 unsigned RSB; 96 97 unsigned STORE32; 98 unsigned LOAD32; 99 100 unsigned STORE16; 101 unsigned LOAD16; 102 103 unsigned STORE8; 104 unsigned LOAD8; 105 106 unsigned ADDrr; 107 unsigned ADDri; 108 109 // Used for G_ICMP 110 unsigned CMPrr; 111 unsigned MOVi; 112 unsigned MOVCCi; 113 114 // Used for G_SELECT 115 unsigned MOVCCr; 116 117 unsigned TSTri; 118 unsigned Bcc; 119 120 // Used for G_GLOBAL_VALUE 121 unsigned MOVi32imm; 122 unsigned ConstPoolLoad; 123 unsigned MOV_ga_pcrel; 124 unsigned LDRLIT_ga_pcrel; 125 unsigned LDRLIT_ga_abs; 126 127 OpcodeCache(const ARMSubtarget &STI); 128 } const Opcodes; 129 130 // Select the opcode for simple extensions (that translate to a single SXT/UXT 131 // instruction). Extension operations more complicated than that should not 132 // invoke this. Returns the original opcode if it doesn't know how to select a 133 // better one. 134 unsigned selectSimpleExtOpc(unsigned Opc, unsigned Size) const; 135 136 // Select the opcode for simple loads and stores. Returns the original opcode 137 // if it doesn't know how to select a better one. 138 unsigned selectLoadStoreOpCode(unsigned Opc, unsigned RegBank, 139 unsigned Size) const; 140 141 void renderVFPF32Imm(MachineInstrBuilder &New, const MachineInstr &Old) const; 142 void renderVFPF64Imm(MachineInstrBuilder &New, const MachineInstr &Old) const; 143 144 #define GET_GLOBALISEL_PREDICATES_DECL 145 #include "ARMGenGlobalISel.inc" 146 #undef GET_GLOBALISEL_PREDICATES_DECL 147 148 // We declare the temporaries used by selectImpl() in the class to minimize the 149 // cost of constructing placeholder values. 150 #define GET_GLOBALISEL_TEMPORARIES_DECL 151 #include "ARMGenGlobalISel.inc" 152 #undef GET_GLOBALISEL_TEMPORARIES_DECL 153 }; 154 } // end anonymous namespace 155 156 namespace llvm { 157 InstructionSelector * 158 createARMInstructionSelector(const ARMBaseTargetMachine &TM, 159 const ARMSubtarget &STI, 160 const ARMRegisterBankInfo &RBI) { 161 return new ARMInstructionSelector(TM, STI, RBI); 162 } 163 } 164 165 const unsigned zero_reg = 0; 166 167 #define GET_GLOBALISEL_IMPL 168 #include "ARMGenGlobalISel.inc" 169 #undef GET_GLOBALISEL_IMPL 170 171 ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine &TM, 172 const ARMSubtarget &STI, 173 const ARMRegisterBankInfo &RBI) 174 : InstructionSelector(), TII(*STI.getInstrInfo()), 175 TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI), STI(STI), Opcodes(STI), 176 #define GET_GLOBALISEL_PREDICATES_INIT 177 #include "ARMGenGlobalISel.inc" 178 #undef GET_GLOBALISEL_PREDICATES_INIT 179 #define GET_GLOBALISEL_TEMPORARIES_INIT 180 #include "ARMGenGlobalISel.inc" 181 #undef GET_GLOBALISEL_TEMPORARIES_INIT 182 { 183 } 184 185 static const TargetRegisterClass *guessRegClass(unsigned Reg, 186 MachineRegisterInfo &MRI, 187 const TargetRegisterInfo &TRI, 188 const RegisterBankInfo &RBI) { 189 const RegisterBank *RegBank = RBI.getRegBank(Reg, MRI, TRI); 190 assert(RegBank && "Can't get reg bank for virtual register"); 191 192 const unsigned Size = MRI.getType(Reg).getSizeInBits(); 193 assert((RegBank->getID() == ARM::GPRRegBankID || 194 RegBank->getID() == ARM::FPRRegBankID) && 195 "Unsupported reg bank"); 196 197 if (RegBank->getID() == ARM::FPRRegBankID) { 198 if (Size == 32) 199 return &ARM::SPRRegClass; 200 else if (Size == 64) 201 return &ARM::DPRRegClass; 202 else if (Size == 128) 203 return &ARM::QPRRegClass; 204 else 205 llvm_unreachable("Unsupported destination size"); 206 } 207 208 return &ARM::GPRRegClass; 209 } 210 211 static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, 212 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, 213 const RegisterBankInfo &RBI) { 214 Register DstReg = I.getOperand(0).getReg(); 215 if (Register::isPhysicalRegister(DstReg)) 216 return true; 217 218 const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI); 219 220 // No need to constrain SrcReg. It will get constrained when 221 // we hit another of its uses or its defs. 222 // Copies do not have constraints. 223 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 224 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 225 << " operand\n"); 226 return false; 227 } 228 return true; 229 } 230 231 static bool selectMergeValues(MachineInstrBuilder &MIB, 232 const ARMBaseInstrInfo &TII, 233 MachineRegisterInfo &MRI, 234 const TargetRegisterInfo &TRI, 235 const RegisterBankInfo &RBI) { 236 assert(TII.getSubtarget().hasVFP2Base() && "Can't select merge without VFP"); 237 238 // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs 239 // into one DPR. 240 Register VReg0 = MIB->getOperand(0).getReg(); 241 (void)VReg0; 242 assert(MRI.getType(VReg0).getSizeInBits() == 64 && 243 RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::FPRRegBankID && 244 "Unsupported operand for G_MERGE_VALUES"); 245 Register VReg1 = MIB->getOperand(1).getReg(); 246 (void)VReg1; 247 assert(MRI.getType(VReg1).getSizeInBits() == 32 && 248 RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && 249 "Unsupported operand for G_MERGE_VALUES"); 250 Register VReg2 = MIB->getOperand(2).getReg(); 251 (void)VReg2; 252 assert(MRI.getType(VReg2).getSizeInBits() == 32 && 253 RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::GPRRegBankID && 254 "Unsupported operand for G_MERGE_VALUES"); 255 256 MIB->setDesc(TII.get(ARM::VMOVDRR)); 257 MIB.add(predOps(ARMCC::AL)); 258 259 return true; 260 } 261 262 static bool selectUnmergeValues(MachineInstrBuilder &MIB, 263 const ARMBaseInstrInfo &TII, 264 MachineRegisterInfo &MRI, 265 const TargetRegisterInfo &TRI, 266 const RegisterBankInfo &RBI) { 267 assert(TII.getSubtarget().hasVFP2Base() && 268 "Can't select unmerge without VFP"); 269 270 // We only support G_UNMERGE_VALUES as a way to break up one DPR into two 271 // GPRs. 272 Register VReg0 = MIB->getOperand(0).getReg(); 273 (void)VReg0; 274 assert(MRI.getType(VReg0).getSizeInBits() == 32 && 275 RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::GPRRegBankID && 276 "Unsupported operand for G_UNMERGE_VALUES"); 277 Register VReg1 = MIB->getOperand(1).getReg(); 278 (void)VReg1; 279 assert(MRI.getType(VReg1).getSizeInBits() == 32 && 280 RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && 281 "Unsupported operand for G_UNMERGE_VALUES"); 282 Register VReg2 = MIB->getOperand(2).getReg(); 283 (void)VReg2; 284 assert(MRI.getType(VReg2).getSizeInBits() == 64 && 285 RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::FPRRegBankID && 286 "Unsupported operand for G_UNMERGE_VALUES"); 287 288 MIB->setDesc(TII.get(ARM::VMOVRRD)); 289 MIB.add(predOps(ARMCC::AL)); 290 291 return true; 292 } 293 294 ARMInstructionSelector::OpcodeCache::OpcodeCache(const ARMSubtarget &STI) { 295 bool isThumb = STI.isThumb(); 296 297 using namespace TargetOpcode; 298 299 #define STORE_OPCODE(VAR, OPC) VAR = isThumb ? ARM::t2##OPC : ARM::OPC 300 STORE_OPCODE(SEXT16, SXTH); 301 STORE_OPCODE(ZEXT16, UXTH); 302 303 STORE_OPCODE(SEXT8, SXTB); 304 STORE_OPCODE(ZEXT8, UXTB); 305 306 STORE_OPCODE(AND, ANDri); 307 STORE_OPCODE(RSB, RSBri); 308 309 STORE_OPCODE(STORE32, STRi12); 310 STORE_OPCODE(LOAD32, LDRi12); 311 312 // LDRH/STRH are special... 313 STORE16 = isThumb ? ARM::t2STRHi12 : ARM::STRH; 314 LOAD16 = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 315 316 STORE_OPCODE(STORE8, STRBi12); 317 STORE_OPCODE(LOAD8, LDRBi12); 318 319 STORE_OPCODE(ADDrr, ADDrr); 320 STORE_OPCODE(ADDri, ADDri); 321 322 STORE_OPCODE(CMPrr, CMPrr); 323 STORE_OPCODE(MOVi, MOVi); 324 STORE_OPCODE(MOVCCi, MOVCCi); 325 326 STORE_OPCODE(MOVCCr, MOVCCr); 327 328 STORE_OPCODE(TSTri, TSTri); 329 STORE_OPCODE(Bcc, Bcc); 330 331 STORE_OPCODE(MOVi32imm, MOVi32imm); 332 ConstPoolLoad = isThumb ? ARM::t2LDRpci : ARM::LDRi12; 333 STORE_OPCODE(MOV_ga_pcrel, MOV_ga_pcrel); 334 LDRLIT_ga_pcrel = isThumb ? ARM::tLDRLIT_ga_pcrel : ARM::LDRLIT_ga_pcrel; 335 LDRLIT_ga_abs = isThumb ? ARM::tLDRLIT_ga_abs : ARM::LDRLIT_ga_abs; 336 #undef MAP_OPCODE 337 } 338 339 unsigned ARMInstructionSelector::selectSimpleExtOpc(unsigned Opc, 340 unsigned Size) const { 341 using namespace TargetOpcode; 342 343 if (Size != 8 && Size != 16) 344 return Opc; 345 346 if (Opc == G_SEXT) 347 return Size == 8 ? Opcodes.SEXT8 : Opcodes.SEXT16; 348 349 if (Opc == G_ZEXT) 350 return Size == 8 ? Opcodes.ZEXT8 : Opcodes.ZEXT16; 351 352 return Opc; 353 } 354 355 unsigned ARMInstructionSelector::selectLoadStoreOpCode(unsigned Opc, 356 unsigned RegBank, 357 unsigned Size) const { 358 bool isStore = Opc == TargetOpcode::G_STORE; 359 360 if (RegBank == ARM::GPRRegBankID) { 361 switch (Size) { 362 case 1: 363 case 8: 364 return isStore ? Opcodes.STORE8 : Opcodes.LOAD8; 365 case 16: 366 return isStore ? Opcodes.STORE16 : Opcodes.LOAD16; 367 case 32: 368 return isStore ? Opcodes.STORE32 : Opcodes.LOAD32; 369 default: 370 return Opc; 371 } 372 } 373 374 if (RegBank == ARM::FPRRegBankID) { 375 switch (Size) { 376 case 32: 377 return isStore ? ARM::VSTRS : ARM::VLDRS; 378 case 64: 379 return isStore ? ARM::VSTRD : ARM::VLDRD; 380 default: 381 return Opc; 382 } 383 } 384 385 return Opc; 386 } 387 388 // When lowering comparisons, we sometimes need to perform two compares instead 389 // of just one. Get the condition codes for both comparisons. If only one is 390 // needed, the second member of the pair is ARMCC::AL. 391 static std::pair<ARMCC::CondCodes, ARMCC::CondCodes> 392 getComparePreds(CmpInst::Predicate Pred) { 393 std::pair<ARMCC::CondCodes, ARMCC::CondCodes> Preds = {ARMCC::AL, ARMCC::AL}; 394 switch (Pred) { 395 case CmpInst::FCMP_ONE: 396 Preds = {ARMCC::GT, ARMCC::MI}; 397 break; 398 case CmpInst::FCMP_UEQ: 399 Preds = {ARMCC::EQ, ARMCC::VS}; 400 break; 401 case CmpInst::ICMP_EQ: 402 case CmpInst::FCMP_OEQ: 403 Preds.first = ARMCC::EQ; 404 break; 405 case CmpInst::ICMP_SGT: 406 case CmpInst::FCMP_OGT: 407 Preds.first = ARMCC::GT; 408 break; 409 case CmpInst::ICMP_SGE: 410 case CmpInst::FCMP_OGE: 411 Preds.first = ARMCC::GE; 412 break; 413 case CmpInst::ICMP_UGT: 414 case CmpInst::FCMP_UGT: 415 Preds.first = ARMCC::HI; 416 break; 417 case CmpInst::FCMP_OLT: 418 Preds.first = ARMCC::MI; 419 break; 420 case CmpInst::ICMP_ULE: 421 case CmpInst::FCMP_OLE: 422 Preds.first = ARMCC::LS; 423 break; 424 case CmpInst::FCMP_ORD: 425 Preds.first = ARMCC::VC; 426 break; 427 case CmpInst::FCMP_UNO: 428 Preds.first = ARMCC::VS; 429 break; 430 case CmpInst::FCMP_UGE: 431 Preds.first = ARMCC::PL; 432 break; 433 case CmpInst::ICMP_SLT: 434 case CmpInst::FCMP_ULT: 435 Preds.first = ARMCC::LT; 436 break; 437 case CmpInst::ICMP_SLE: 438 case CmpInst::FCMP_ULE: 439 Preds.first = ARMCC::LE; 440 break; 441 case CmpInst::FCMP_UNE: 442 case CmpInst::ICMP_NE: 443 Preds.first = ARMCC::NE; 444 break; 445 case CmpInst::ICMP_UGE: 446 Preds.first = ARMCC::HS; 447 break; 448 case CmpInst::ICMP_ULT: 449 Preds.first = ARMCC::LO; 450 break; 451 default: 452 break; 453 } 454 assert(Preds.first != ARMCC::AL && "No comparisons needed?"); 455 return Preds; 456 } 457 458 struct ARMInstructionSelector::CmpConstants { 459 CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned SelectOpcode, 460 unsigned OpRegBank, unsigned OpSize) 461 : ComparisonOpcode(CmpOpcode), ReadFlagsOpcode(FlagsOpcode), 462 SelectResultOpcode(SelectOpcode), OperandRegBankID(OpRegBank), 463 OperandSize(OpSize) {} 464 465 // The opcode used for performing the comparison. 466 const unsigned ComparisonOpcode; 467 468 // The opcode used for reading the flags set by the comparison. May be 469 // ARM::INSTRUCTION_LIST_END if we don't need to read the flags. 470 const unsigned ReadFlagsOpcode; 471 472 // The opcode used for materializing the result of the comparison. 473 const unsigned SelectResultOpcode; 474 475 // The assumed register bank ID for the operands. 476 const unsigned OperandRegBankID; 477 478 // The assumed size in bits for the operands. 479 const unsigned OperandSize; 480 }; 481 482 struct ARMInstructionSelector::InsertInfo { 483 InsertInfo(MachineInstrBuilder &MIB) 484 : MBB(*MIB->getParent()), InsertBefore(std::next(MIB->getIterator())), 485 DbgLoc(MIB->getDebugLoc()) {} 486 487 MachineBasicBlock &MBB; 488 const MachineBasicBlock::instr_iterator InsertBefore; 489 const DebugLoc &DbgLoc; 490 }; 491 492 void ARMInstructionSelector::putConstant(InsertInfo I, unsigned DestReg, 493 unsigned Constant) const { 494 (void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Opcodes.MOVi)) 495 .addDef(DestReg) 496 .addImm(Constant) 497 .add(predOps(ARMCC::AL)) 498 .add(condCodeOp()); 499 } 500 501 bool ARMInstructionSelector::validOpRegPair(MachineRegisterInfo &MRI, 502 unsigned LHSReg, unsigned RHSReg, 503 unsigned ExpectedSize, 504 unsigned ExpectedRegBankID) const { 505 return MRI.getType(LHSReg) == MRI.getType(RHSReg) && 506 validReg(MRI, LHSReg, ExpectedSize, ExpectedRegBankID) && 507 validReg(MRI, RHSReg, ExpectedSize, ExpectedRegBankID); 508 } 509 510 bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg, 511 unsigned ExpectedSize, 512 unsigned ExpectedRegBankID) const { 513 if (MRI.getType(Reg).getSizeInBits() != ExpectedSize) { 514 LLVM_DEBUG(dbgs() << "Unexpected size for register"); 515 return false; 516 } 517 518 if (RBI.getRegBank(Reg, MRI, TRI)->getID() != ExpectedRegBankID) { 519 LLVM_DEBUG(dbgs() << "Unexpected register bank for register"); 520 return false; 521 } 522 523 return true; 524 } 525 526 bool ARMInstructionSelector::selectCmp(CmpConstants Helper, 527 MachineInstrBuilder &MIB, 528 MachineRegisterInfo &MRI) const { 529 const InsertInfo I(MIB); 530 531 auto ResReg = MIB->getOperand(0).getReg(); 532 if (!validReg(MRI, ResReg, 1, ARM::GPRRegBankID)) 533 return false; 534 535 auto Cond = 536 static_cast<CmpInst::Predicate>(MIB->getOperand(1).getPredicate()); 537 if (Cond == CmpInst::FCMP_TRUE || Cond == CmpInst::FCMP_FALSE) { 538 putConstant(I, ResReg, Cond == CmpInst::FCMP_TRUE ? 1 : 0); 539 MIB->eraseFromParent(); 540 return true; 541 } 542 543 auto LHSReg = MIB->getOperand(2).getReg(); 544 auto RHSReg = MIB->getOperand(3).getReg(); 545 if (!validOpRegPair(MRI, LHSReg, RHSReg, Helper.OperandSize, 546 Helper.OperandRegBankID)) 547 return false; 548 549 auto ARMConds = getComparePreds(Cond); 550 auto ZeroReg = MRI.createVirtualRegister(&ARM::GPRRegClass); 551 putConstant(I, ZeroReg, 0); 552 553 if (ARMConds.second == ARMCC::AL) { 554 // Simple case, we only need one comparison and we're done. 555 if (!insertComparison(Helper, I, ResReg, ARMConds.first, LHSReg, RHSReg, 556 ZeroReg)) 557 return false; 558 } else { 559 // Not so simple, we need two successive comparisons. 560 auto IntermediateRes = MRI.createVirtualRegister(&ARM::GPRRegClass); 561 if (!insertComparison(Helper, I, IntermediateRes, ARMConds.first, LHSReg, 562 RHSReg, ZeroReg)) 563 return false; 564 if (!insertComparison(Helper, I, ResReg, ARMConds.second, LHSReg, RHSReg, 565 IntermediateRes)) 566 return false; 567 } 568 569 MIB->eraseFromParent(); 570 return true; 571 } 572 573 bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I, 574 unsigned ResReg, 575 ARMCC::CondCodes Cond, 576 unsigned LHSReg, unsigned RHSReg, 577 unsigned PrevRes) const { 578 // Perform the comparison. 579 auto CmpI = 580 BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Helper.ComparisonOpcode)) 581 .addUse(LHSReg) 582 .addUse(RHSReg) 583 .add(predOps(ARMCC::AL)); 584 if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI)) 585 return false; 586 587 // Read the comparison flags (if necessary). 588 if (Helper.ReadFlagsOpcode != ARM::INSTRUCTION_LIST_END) { 589 auto ReadI = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, 590 TII.get(Helper.ReadFlagsOpcode)) 591 .add(predOps(ARMCC::AL)); 592 if (!constrainSelectedInstRegOperands(*ReadI, TII, TRI, RBI)) 593 return false; 594 } 595 596 // Select either 1 or the previous result based on the value of the flags. 597 auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, 598 TII.get(Helper.SelectResultOpcode)) 599 .addDef(ResReg) 600 .addUse(PrevRes) 601 .addImm(1) 602 .add(predOps(Cond, ARM::CPSR)); 603 if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI)) 604 return false; 605 606 return true; 607 } 608 609 bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, 610 MachineRegisterInfo &MRI) const { 611 if ((STI.isROPI() || STI.isRWPI()) && !STI.isTargetELF()) { 612 LLVM_DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n"); 613 return false; 614 } 615 616 auto GV = MIB->getOperand(1).getGlobal(); 617 if (GV->isThreadLocal()) { 618 LLVM_DEBUG(dbgs() << "TLS variables not supported yet\n"); 619 return false; 620 } 621 622 auto &MBB = *MIB->getParent(); 623 auto &MF = *MBB.getParent(); 624 625 bool UseMovt = STI.useMovt(); 626 627 unsigned Size = TM.getPointerSize(0); 628 unsigned Alignment = 4; 629 630 auto addOpsForConstantPoolLoad = [&MF, Alignment, 631 Size](MachineInstrBuilder &MIB, 632 const GlobalValue *GV, bool IsSBREL) { 633 assert((MIB->getOpcode() == ARM::LDRi12 || 634 MIB->getOpcode() == ARM::t2LDRpci) && 635 "Unsupported instruction"); 636 auto ConstPool = MF.getConstantPool(); 637 auto CPIndex = 638 // For SB relative entries we need a target-specific constant pool. 639 // Otherwise, just use a regular constant pool entry. 640 IsSBREL 641 ? ConstPool->getConstantPoolIndex( 642 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment) 643 : ConstPool->getConstantPoolIndex(GV, Alignment); 644 MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) 645 .addMemOperand(MF.getMachineMemOperand( 646 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad, 647 Size, Alignment)); 648 if (MIB->getOpcode() == ARM::LDRi12) 649 MIB.addImm(0); 650 MIB.add(predOps(ARMCC::AL)); 651 }; 652 653 auto addGOTMemOperand = [this, &MF, Alignment](MachineInstrBuilder &MIB) { 654 MIB.addMemOperand(MF.getMachineMemOperand( 655 MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, 656 TM.getProgramPointerSize(), Alignment)); 657 }; 658 659 if (TM.isPositionIndependent()) { 660 bool Indirect = STI.isGVIndirectSymbol(GV); 661 662 // For ARM mode, we have different pseudoinstructions for direct accesses 663 // and indirect accesses, and the ones for indirect accesses include the 664 // load from GOT. For Thumb mode, we use the same pseudoinstruction for both 665 // direct and indirect accesses, and we need to manually generate the load 666 // from GOT. 667 bool UseOpcodeThatLoads = Indirect && !STI.isThumb(); 668 669 // FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't 670 // support it yet. See PR28229. 671 unsigned Opc = 672 UseMovt && !STI.isTargetELF() 673 ? (UseOpcodeThatLoads ? (unsigned)ARM::MOV_ga_pcrel_ldr 674 : Opcodes.MOV_ga_pcrel) 675 : (UseOpcodeThatLoads ? (unsigned)ARM::LDRLIT_ga_pcrel_ldr 676 : Opcodes.LDRLIT_ga_pcrel); 677 MIB->setDesc(TII.get(Opc)); 678 679 int TargetFlags = ARMII::MO_NO_FLAG; 680 if (STI.isTargetDarwin()) 681 TargetFlags |= ARMII::MO_NONLAZY; 682 if (STI.isGVInGOT(GV)) 683 TargetFlags |= ARMII::MO_GOT; 684 MIB->getOperand(1).setTargetFlags(TargetFlags); 685 686 if (Indirect) { 687 if (!UseOpcodeThatLoads) { 688 auto ResultReg = MIB->getOperand(0).getReg(); 689 auto AddressReg = MRI.createVirtualRegister(&ARM::GPRRegClass); 690 691 MIB->getOperand(0).setReg(AddressReg); 692 693 auto InsertBefore = std::next(MIB->getIterator()); 694 auto MIBLoad = BuildMI(MBB, InsertBefore, MIB->getDebugLoc(), 695 TII.get(Opcodes.LOAD32)) 696 .addDef(ResultReg) 697 .addReg(AddressReg) 698 .addImm(0) 699 .add(predOps(ARMCC::AL)); 700 addGOTMemOperand(MIBLoad); 701 702 if (!constrainSelectedInstRegOperands(*MIBLoad, TII, TRI, RBI)) 703 return false; 704 } else { 705 addGOTMemOperand(MIB); 706 } 707 } 708 709 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 710 } 711 712 bool isReadOnly = STI.getTargetLowering()->isReadOnly(GV); 713 if (STI.isROPI() && isReadOnly) { 714 unsigned Opc = UseMovt ? Opcodes.MOV_ga_pcrel : Opcodes.LDRLIT_ga_pcrel; 715 MIB->setDesc(TII.get(Opc)); 716 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 717 } 718 if (STI.isRWPI() && !isReadOnly) { 719 auto Offset = MRI.createVirtualRegister(&ARM::GPRRegClass); 720 MachineInstrBuilder OffsetMIB; 721 if (UseMovt) { 722 OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), 723 TII.get(Opcodes.MOVi32imm), Offset); 724 OffsetMIB.addGlobalAddress(GV, /*Offset*/ 0, ARMII::MO_SBREL); 725 } else { 726 // Load the offset from the constant pool. 727 OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), 728 TII.get(Opcodes.ConstPoolLoad), Offset); 729 addOpsForConstantPoolLoad(OffsetMIB, GV, /*IsSBREL*/ true); 730 } 731 if (!constrainSelectedInstRegOperands(*OffsetMIB, TII, TRI, RBI)) 732 return false; 733 734 // Add the offset to the SB register. 735 MIB->setDesc(TII.get(Opcodes.ADDrr)); 736 MIB->RemoveOperand(1); 737 MIB.addReg(ARM::R9) // FIXME: don't hardcode R9 738 .addReg(Offset) 739 .add(predOps(ARMCC::AL)) 740 .add(condCodeOp()); 741 742 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 743 } 744 745 if (STI.isTargetELF()) { 746 if (UseMovt) { 747 MIB->setDesc(TII.get(Opcodes.MOVi32imm)); 748 } else { 749 // Load the global's address from the constant pool. 750 MIB->setDesc(TII.get(Opcodes.ConstPoolLoad)); 751 MIB->RemoveOperand(1); 752 addOpsForConstantPoolLoad(MIB, GV, /*IsSBREL*/ false); 753 } 754 } else if (STI.isTargetMachO()) { 755 if (UseMovt) 756 MIB->setDesc(TII.get(Opcodes.MOVi32imm)); 757 else 758 MIB->setDesc(TII.get(Opcodes.LDRLIT_ga_abs)); 759 } else { 760 LLVM_DEBUG(dbgs() << "Object format not supported yet\n"); 761 return false; 762 } 763 764 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 765 } 766 767 bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB, 768 MachineRegisterInfo &MRI) const { 769 auto &MBB = *MIB->getParent(); 770 auto InsertBefore = std::next(MIB->getIterator()); 771 auto &DbgLoc = MIB->getDebugLoc(); 772 773 // Compare the condition to 1. 774 auto CondReg = MIB->getOperand(1).getReg(); 775 assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) && 776 "Unsupported types for select operation"); 777 auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.TSTri)) 778 .addUse(CondReg) 779 .addImm(1) 780 .add(predOps(ARMCC::AL)); 781 if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI)) 782 return false; 783 784 // Move a value into the result register based on the result of the 785 // comparison. 786 auto ResReg = MIB->getOperand(0).getReg(); 787 auto TrueReg = MIB->getOperand(2).getReg(); 788 auto FalseReg = MIB->getOperand(3).getReg(); 789 assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) && 790 validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) && 791 "Unsupported types for select operation"); 792 auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.MOVCCr)) 793 .addDef(ResReg) 794 .addUse(TrueReg) 795 .addUse(FalseReg) 796 .add(predOps(ARMCC::EQ, ARM::CPSR)); 797 if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI)) 798 return false; 799 800 MIB->eraseFromParent(); 801 return true; 802 } 803 804 bool ARMInstructionSelector::selectShift(unsigned ShiftOpc, 805 MachineInstrBuilder &MIB) const { 806 assert(!STI.isThumb() && "Unsupported subtarget"); 807 MIB->setDesc(TII.get(ARM::MOVsr)); 808 MIB.addImm(ShiftOpc); 809 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 810 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 811 } 812 813 void ARMInstructionSelector::renderVFPF32Imm( 814 MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst) const { 815 assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT && 816 "Expected G_FCONSTANT"); 817 818 APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF(); 819 int FPImmEncoding = ARM_AM::getFP32Imm(FPImmValue); 820 assert(FPImmEncoding != -1 && "Invalid immediate value"); 821 822 NewInstBuilder.addImm(FPImmEncoding); 823 } 824 825 void ARMInstructionSelector::renderVFPF64Imm( 826 MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst) const { 827 assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT && 828 "Expected G_FCONSTANT"); 829 830 APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF(); 831 int FPImmEncoding = ARM_AM::getFP64Imm(FPImmValue); 832 assert(FPImmEncoding != -1 && "Invalid immediate value"); 833 834 NewInstBuilder.addImm(FPImmEncoding); 835 } 836 837 bool ARMInstructionSelector::select(MachineInstr &I) { 838 assert(I.getParent() && "Instruction should be in a basic block!"); 839 assert(I.getParent()->getParent() && "Instruction should be in a function!"); 840 841 auto &MBB = *I.getParent(); 842 auto &MF = *MBB.getParent(); 843 auto &MRI = MF.getRegInfo(); 844 845 if (!isPreISelGenericOpcode(I.getOpcode())) { 846 if (I.isCopy()) 847 return selectCopy(I, TII, MRI, TRI, RBI); 848 849 return true; 850 } 851 852 using namespace TargetOpcode; 853 854 if (selectImpl(I, *CoverageInfo)) 855 return true; 856 857 MachineInstrBuilder MIB{MF, I}; 858 bool isSExt = false; 859 860 switch (I.getOpcode()) { 861 case G_SEXT: 862 isSExt = true; 863 LLVM_FALLTHROUGH; 864 case G_ZEXT: { 865 assert(MRI.getType(I.getOperand(0).getReg()).getSizeInBits() <= 32 && 866 "Unsupported destination size for extension"); 867 868 LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); 869 unsigned SrcSize = SrcTy.getSizeInBits(); 870 switch (SrcSize) { 871 case 1: { 872 // ZExt boils down to & 0x1; for SExt we also subtract that from 0 873 I.setDesc(TII.get(Opcodes.AND)); 874 MIB.addImm(1).add(predOps(ARMCC::AL)).add(condCodeOp()); 875 876 if (isSExt) { 877 Register SExtResult = I.getOperand(0).getReg(); 878 879 // Use a new virtual register for the result of the AND 880 Register AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass); 881 I.getOperand(0).setReg(AndResult); 882 883 auto InsertBefore = std::next(I.getIterator()); 884 auto SubI = 885 BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.RSB)) 886 .addDef(SExtResult) 887 .addUse(AndResult) 888 .addImm(0) 889 .add(predOps(ARMCC::AL)) 890 .add(condCodeOp()); 891 if (!constrainSelectedInstRegOperands(*SubI, TII, TRI, RBI)) 892 return false; 893 } 894 break; 895 } 896 case 8: 897 case 16: { 898 unsigned NewOpc = selectSimpleExtOpc(I.getOpcode(), SrcSize); 899 if (NewOpc == I.getOpcode()) 900 return false; 901 I.setDesc(TII.get(NewOpc)); 902 MIB.addImm(0).add(predOps(ARMCC::AL)); 903 break; 904 } 905 default: 906 LLVM_DEBUG(dbgs() << "Unsupported source size for extension"); 907 return false; 908 } 909 break; 910 } 911 case G_ANYEXT: 912 case G_TRUNC: { 913 // The high bits are undefined, so there's nothing special to do, just 914 // treat it as a copy. 915 auto SrcReg = I.getOperand(1).getReg(); 916 auto DstReg = I.getOperand(0).getReg(); 917 918 const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); 919 const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); 920 921 if (SrcRegBank.getID() == ARM::FPRRegBankID) { 922 // This should only happen in the obscure case where we have put a 64-bit 923 // integer into a D register. Get it out of there and keep only the 924 // interesting part. 925 assert(I.getOpcode() == G_TRUNC && "Unsupported operand for G_ANYEXT"); 926 assert(DstRegBank.getID() == ARM::GPRRegBankID && 927 "Unsupported combination of register banks"); 928 assert(MRI.getType(SrcReg).getSizeInBits() == 64 && "Unsupported size"); 929 assert(MRI.getType(DstReg).getSizeInBits() <= 32 && "Unsupported size"); 930 931 Register IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass); 932 auto InsertBefore = std::next(I.getIterator()); 933 auto MovI = 934 BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::VMOVRRD)) 935 .addDef(DstReg) 936 .addDef(IgnoredBits) 937 .addUse(SrcReg) 938 .add(predOps(ARMCC::AL)); 939 if (!constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI)) 940 return false; 941 942 MIB->eraseFromParent(); 943 return true; 944 } 945 946 if (SrcRegBank.getID() != DstRegBank.getID()) { 947 LLVM_DEBUG( 948 dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n"); 949 return false; 950 } 951 952 if (SrcRegBank.getID() != ARM::GPRRegBankID) { 953 LLVM_DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n"); 954 return false; 955 } 956 957 I.setDesc(TII.get(COPY)); 958 return selectCopy(I, TII, MRI, TRI, RBI); 959 } 960 case G_CONSTANT: { 961 if (!MRI.getType(I.getOperand(0).getReg()).isPointer()) { 962 // Non-pointer constants should be handled by TableGen. 963 LLVM_DEBUG(dbgs() << "Unsupported constant type\n"); 964 return false; 965 } 966 967 auto &Val = I.getOperand(1); 968 if (Val.isCImm()) { 969 if (!Val.getCImm()->isZero()) { 970 LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n"); 971 return false; 972 } 973 Val.ChangeToImmediate(0); 974 } else { 975 assert(Val.isImm() && "Unexpected operand for G_CONSTANT"); 976 if (Val.getImm() != 0) { 977 LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n"); 978 return false; 979 } 980 } 981 982 assert(!STI.isThumb() && "Unsupported subtarget"); 983 I.setDesc(TII.get(ARM::MOVi)); 984 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 985 break; 986 } 987 case G_FCONSTANT: { 988 // Load from constant pool 989 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits() / 8; 990 unsigned Alignment = Size; 991 992 assert((Size == 4 || Size == 8) && "Unsupported FP constant type"); 993 auto LoadOpcode = Size == 4 ? ARM::VLDRS : ARM::VLDRD; 994 995 auto ConstPool = MF.getConstantPool(); 996 auto CPIndex = 997 ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(), Alignment); 998 MIB->setDesc(TII.get(LoadOpcode)); 999 MIB->RemoveOperand(1); 1000 MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) 1001 .addMemOperand( 1002 MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF), 1003 MachineMemOperand::MOLoad, Size, Alignment)) 1004 .addImm(0) 1005 .add(predOps(ARMCC::AL)); 1006 break; 1007 } 1008 case G_INTTOPTR: 1009 case G_PTRTOINT: { 1010 auto SrcReg = I.getOperand(1).getReg(); 1011 auto DstReg = I.getOperand(0).getReg(); 1012 1013 const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); 1014 const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); 1015 1016 if (SrcRegBank.getID() != DstRegBank.getID()) { 1017 LLVM_DEBUG( 1018 dbgs() 1019 << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n"); 1020 return false; 1021 } 1022 1023 if (SrcRegBank.getID() != ARM::GPRRegBankID) { 1024 LLVM_DEBUG( 1025 dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n"); 1026 return false; 1027 } 1028 1029 I.setDesc(TII.get(COPY)); 1030 return selectCopy(I, TII, MRI, TRI, RBI); 1031 } 1032 case G_SELECT: 1033 return selectSelect(MIB, MRI); 1034 case G_ICMP: { 1035 CmpConstants Helper(Opcodes.CMPrr, ARM::INSTRUCTION_LIST_END, 1036 Opcodes.MOVCCi, ARM::GPRRegBankID, 32); 1037 return selectCmp(Helper, MIB, MRI); 1038 } 1039 case G_FCMP: { 1040 assert(STI.hasVFP2Base() && "Can't select fcmp without VFP"); 1041 1042 Register OpReg = I.getOperand(2).getReg(); 1043 unsigned Size = MRI.getType(OpReg).getSizeInBits(); 1044 1045 if (Size == 64 && !STI.hasFP64()) { 1046 LLVM_DEBUG(dbgs() << "Subtarget only supports single precision"); 1047 return false; 1048 } 1049 if (Size != 32 && Size != 64) { 1050 LLVM_DEBUG(dbgs() << "Unsupported size for G_FCMP operand"); 1051 return false; 1052 } 1053 1054 CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT, 1055 Opcodes.MOVCCi, ARM::FPRRegBankID, Size); 1056 return selectCmp(Helper, MIB, MRI); 1057 } 1058 case G_LSHR: 1059 return selectShift(ARM_AM::ShiftOpc::lsr, MIB); 1060 case G_ASHR: 1061 return selectShift(ARM_AM::ShiftOpc::asr, MIB); 1062 case G_SHL: { 1063 return selectShift(ARM_AM::ShiftOpc::lsl, MIB); 1064 } 1065 case G_PTR_ADD: 1066 I.setDesc(TII.get(Opcodes.ADDrr)); 1067 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 1068 break; 1069 case G_FRAME_INDEX: 1070 // Add 0 to the given frame index and hope it will eventually be folded into 1071 // the user(s). 1072 I.setDesc(TII.get(Opcodes.ADDri)); 1073 MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp()); 1074 break; 1075 case G_GLOBAL_VALUE: 1076 return selectGlobal(MIB, MRI); 1077 case G_STORE: 1078 case G_LOAD: { 1079 const auto &MemOp = **I.memoperands_begin(); 1080 if (MemOp.isAtomic()) { 1081 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n"); 1082 return false; 1083 } 1084 1085 Register Reg = I.getOperand(0).getReg(); 1086 unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID(); 1087 1088 LLT ValTy = MRI.getType(Reg); 1089 const auto ValSize = ValTy.getSizeInBits(); 1090 1091 assert((ValSize != 64 || STI.hasVFP2Base()) && 1092 "Don't know how to load/store 64-bit value without VFP"); 1093 1094 const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize); 1095 if (NewOpc == G_LOAD || NewOpc == G_STORE) 1096 return false; 1097 1098 if (ValSize == 1 && NewOpc == Opcodes.STORE8) { 1099 // Before storing a 1-bit value, make sure to clear out any unneeded bits. 1100 Register OriginalValue = I.getOperand(0).getReg(); 1101 1102 Register ValueToStore = MRI.createVirtualRegister(&ARM::GPRRegClass); 1103 I.getOperand(0).setReg(ValueToStore); 1104 1105 auto InsertBefore = I.getIterator(); 1106 auto AndI = BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.AND)) 1107 .addDef(ValueToStore) 1108 .addUse(OriginalValue) 1109 .addImm(1) 1110 .add(predOps(ARMCC::AL)) 1111 .add(condCodeOp()); 1112 if (!constrainSelectedInstRegOperands(*AndI, TII, TRI, RBI)) 1113 return false; 1114 } 1115 1116 I.setDesc(TII.get(NewOpc)); 1117 1118 if (NewOpc == ARM::LDRH || NewOpc == ARM::STRH) 1119 // LDRH has a funny addressing mode (there's already a FIXME for it). 1120 MIB.addReg(0); 1121 MIB.addImm(0).add(predOps(ARMCC::AL)); 1122 break; 1123 } 1124 case G_MERGE_VALUES: { 1125 if (!selectMergeValues(MIB, TII, MRI, TRI, RBI)) 1126 return false; 1127 break; 1128 } 1129 case G_UNMERGE_VALUES: { 1130 if (!selectUnmergeValues(MIB, TII, MRI, TRI, RBI)) 1131 return false; 1132 break; 1133 } 1134 case G_BRCOND: { 1135 if (!validReg(MRI, I.getOperand(0).getReg(), 1, ARM::GPRRegBankID)) { 1136 LLVM_DEBUG(dbgs() << "Unsupported condition register for G_BRCOND"); 1137 return false; 1138 } 1139 1140 // Set the flags. 1141 auto Test = 1142 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.TSTri)) 1143 .addReg(I.getOperand(0).getReg()) 1144 .addImm(1) 1145 .add(predOps(ARMCC::AL)); 1146 if (!constrainSelectedInstRegOperands(*Test, TII, TRI, RBI)) 1147 return false; 1148 1149 // Branch conditionally. 1150 auto Branch = 1151 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.Bcc)) 1152 .add(I.getOperand(1)) 1153 .add(predOps(ARMCC::NE, ARM::CPSR)); 1154 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI)) 1155 return false; 1156 I.eraseFromParent(); 1157 return true; 1158 } 1159 case G_PHI: { 1160 I.setDesc(TII.get(PHI)); 1161 1162 Register DstReg = I.getOperand(0).getReg(); 1163 const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI); 1164 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 1165 break; 1166 } 1167 1168 return true; 1169 } 1170 default: 1171 return false; 1172 } 1173 1174 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 1175 } 1176