1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// Mips. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/MipsInstPrinter.h" 15 #include "MipsMachineFunction.h" 16 #include "MipsRegisterBankInfo.h" 17 #include "MipsTargetMachine.h" 18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 20 21 #define DEBUG_TYPE "mips-isel" 22 23 using namespace llvm; 24 25 namespace { 26 27 #define GET_GLOBALISEL_PREDICATE_BITSET 28 #include "MipsGenGlobalISel.inc" 29 #undef GET_GLOBALISEL_PREDICATE_BITSET 30 31 class MipsInstructionSelector : public InstructionSelector { 32 public: 33 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI, 34 const MipsRegisterBankInfo &RBI); 35 36 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override; 37 static const char *getName() { return DEBUG_TYPE; } 38 39 private: 40 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 41 bool materialize32BitImm(Register DestReg, APInt Imm, 42 MachineIRBuilder &B) const; 43 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const; 44 const TargetRegisterClass * 45 getRegClassForTypeOnBank(unsigned OpSize, const RegisterBank &RB, 46 const RegisterBankInfo &RBI) const; 47 48 const MipsTargetMachine &TM; 49 const MipsSubtarget &STI; 50 const MipsInstrInfo &TII; 51 const MipsRegisterInfo &TRI; 52 const MipsRegisterBankInfo &RBI; 53 54 #define GET_GLOBALISEL_PREDICATES_DECL 55 #include "MipsGenGlobalISel.inc" 56 #undef GET_GLOBALISEL_PREDICATES_DECL 57 58 #define GET_GLOBALISEL_TEMPORARIES_DECL 59 #include "MipsGenGlobalISel.inc" 60 #undef GET_GLOBALISEL_TEMPORARIES_DECL 61 }; 62 63 } // end anonymous namespace 64 65 #define GET_GLOBALISEL_IMPL 66 #include "MipsGenGlobalISel.inc" 67 #undef GET_GLOBALISEL_IMPL 68 69 MipsInstructionSelector::MipsInstructionSelector( 70 const MipsTargetMachine &TM, const MipsSubtarget &STI, 71 const MipsRegisterBankInfo &RBI) 72 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()), 73 TRI(*STI.getRegisterInfo()), RBI(RBI), 74 75 #define GET_GLOBALISEL_PREDICATES_INIT 76 #include "MipsGenGlobalISel.inc" 77 #undef GET_GLOBALISEL_PREDICATES_INIT 78 #define GET_GLOBALISEL_TEMPORARIES_INIT 79 #include "MipsGenGlobalISel.inc" 80 #undef GET_GLOBALISEL_TEMPORARIES_INIT 81 { 82 } 83 84 bool MipsInstructionSelector::selectCopy(MachineInstr &I, 85 MachineRegisterInfo &MRI) const { 86 Register DstReg = I.getOperand(0).getReg(); 87 if (Register::isPhysicalRegister(DstReg)) 88 return true; 89 90 const RegisterBank *RegBank = RBI.getRegBank(DstReg, MRI, TRI); 91 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); 92 93 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 94 if (RegBank->getID() == Mips::FPRBRegBankID) { 95 if (DstSize == 32) 96 RC = &Mips::FGR32RegClass; 97 else if (DstSize == 64) 98 RC = STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 99 else 100 llvm_unreachable("Unsupported destination size"); 101 } 102 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 103 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 104 << " operand\n"); 105 return false; 106 } 107 return true; 108 } 109 110 const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank( 111 unsigned OpSize, const RegisterBank &RB, 112 const RegisterBankInfo &RBI) const { 113 if (RB.getID() == Mips::GPRBRegBankID) 114 return &Mips::GPR32RegClass; 115 116 if (RB.getID() == Mips::FPRBRegBankID) 117 return OpSize == 32 118 ? &Mips::FGR32RegClass 119 : STI.hasMips32r6() || STI.isFP64bit() ? &Mips::FGR64RegClass 120 : &Mips::AFGR64RegClass; 121 122 llvm_unreachable("getRegClassForTypeOnBank can't find register class."); 123 return nullptr; 124 } 125 126 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm, 127 MachineIRBuilder &B) const { 128 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size."); 129 // Ori zero extends immediate. Used for values with zeros in high 16 bits. 130 if (Imm.getHiBits(16).isNullValue()) { 131 MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)}) 132 .addImm(Imm.getLoBits(16).getLimitedValue()); 133 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 134 } 135 // Lui places immediate in high 16 bits and sets low 16 bits to zero. 136 if (Imm.getLoBits(16).isNullValue()) { 137 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {}) 138 .addImm(Imm.getHiBits(16).getLimitedValue()); 139 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 140 } 141 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits. 142 if (Imm.isSignedIntN(16)) { 143 MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)}) 144 .addImm(Imm.getLoBits(16).getLimitedValue()); 145 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 146 } 147 // Values that cannot be materialized with single immediate instruction. 148 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass); 149 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {}) 150 .addImm(Imm.getHiBits(16).getLimitedValue()); 151 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg}) 152 .addImm(Imm.getLoBits(16).getLimitedValue()); 153 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) 154 return false; 155 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI)) 156 return false; 157 return true; 158 } 159 160 /// Returning Opc indicates that we failed to select MIPS instruction opcode. 161 static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned MemSizeInBytes, 162 unsigned RegBank, bool isFP64) { 163 bool isStore = Opc == TargetOpcode::G_STORE; 164 if (RegBank == Mips::GPRBRegBankID) { 165 if (isStore) 166 switch (MemSizeInBytes) { 167 case 4: 168 return Mips::SW; 169 case 2: 170 return Mips::SH; 171 case 1: 172 return Mips::SB; 173 default: 174 return Opc; 175 } 176 else 177 // Unspecified extending load is selected into zeroExtending load. 178 switch (MemSizeInBytes) { 179 case 4: 180 return Mips::LW; 181 case 2: 182 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu; 183 case 1: 184 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu; 185 default: 186 return Opc; 187 } 188 } 189 190 if (RegBank == Mips::FPRBRegBankID) { 191 switch (MemSizeInBytes) { 192 case 4: 193 return isStore ? Mips::SWC1 : Mips::LWC1; 194 case 8: 195 if (isFP64) 196 return isStore ? Mips::SDC164 : Mips::LDC164; 197 else 198 return isStore ? Mips::SDC1 : Mips::LDC1; 199 default: 200 return Opc; 201 } 202 } 203 return Opc; 204 } 205 206 bool MipsInstructionSelector::select(MachineInstr &I, 207 CodeGenCoverage &CoverageInfo) const { 208 209 MachineBasicBlock &MBB = *I.getParent(); 210 MachineFunction &MF = *MBB.getParent(); 211 MachineRegisterInfo &MRI = MF.getRegInfo(); 212 213 if (!isPreISelGenericOpcode(I.getOpcode())) { 214 if (I.isCopy()) 215 return selectCopy(I, MRI); 216 217 return true; 218 } 219 220 if (I.getOpcode() == Mips::G_MUL) { 221 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL)) 222 .add(I.getOperand(0)) 223 .add(I.getOperand(1)) 224 .add(I.getOperand(2)); 225 if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI)) 226 return false; 227 Mul->getOperand(3).setIsDead(true); 228 Mul->getOperand(4).setIsDead(true); 229 230 I.eraseFromParent(); 231 return true; 232 } 233 234 if (selectImpl(I, CoverageInfo)) 235 return true; 236 237 MachineInstr *MI = nullptr; 238 using namespace TargetOpcode; 239 240 switch (I.getOpcode()) { 241 case G_UMULH: { 242 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); 243 MachineInstr *PseudoMULTu, *PseudoMove; 244 245 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu)) 246 .addDef(PseudoMULTuReg) 247 .add(I.getOperand(1)) 248 .add(I.getOperand(2)); 249 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI)) 250 return false; 251 252 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI)) 253 .addDef(I.getOperand(0).getReg()) 254 .addUse(PseudoMULTuReg); 255 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) 256 return false; 257 258 I.eraseFromParent(); 259 return true; 260 } 261 case G_GEP: { 262 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu)) 263 .add(I.getOperand(0)) 264 .add(I.getOperand(1)) 265 .add(I.getOperand(2)); 266 break; 267 } 268 case G_INTTOPTR: 269 case G_PTRTOINT: { 270 I.setDesc(TII.get(COPY)); 271 return selectCopy(I, MRI); 272 } 273 case G_FRAME_INDEX: { 274 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 275 .add(I.getOperand(0)) 276 .add(I.getOperand(1)) 277 .addImm(0); 278 break; 279 } 280 case G_BRCOND: { 281 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE)) 282 .add(I.getOperand(0)) 283 .addUse(Mips::ZERO) 284 .add(I.getOperand(1)); 285 break; 286 } 287 case G_PHI: { 288 const Register DestReg = I.getOperand(0).getReg(); 289 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits(); 290 291 const TargetRegisterClass *DefRC = nullptr; 292 if (Register::isPhysicalRegister(DestReg)) 293 DefRC = TRI.getRegClass(DestReg); 294 else 295 DefRC = getRegClassForTypeOnBank(OpSize, 296 *RBI.getRegBank(DestReg, MRI, TRI), RBI); 297 298 I.setDesc(TII.get(TargetOpcode::PHI)); 299 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI); 300 } 301 case G_STORE: 302 case G_LOAD: 303 case G_ZEXTLOAD: 304 case G_SEXTLOAD: { 305 const Register DestReg = I.getOperand(0).getReg(); 306 const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID(); 307 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits(); 308 const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize(); 309 310 if (DestRegBank == Mips::GPRBRegBankID && OpSize != 32) 311 return false; 312 313 if (DestRegBank == Mips::FPRBRegBankID && OpSize != 32 && OpSize != 64) 314 return false; 315 316 const unsigned NewOpc = selectLoadStoreOpCode( 317 I.getOpcode(), OpMemSizeInBytes, DestRegBank, STI.isFP64bit()); 318 if (NewOpc == I.getOpcode()) 319 return false; 320 321 MachineOperand BaseAddr = I.getOperand(1); 322 int64_t SignedOffset = 0; 323 // Try to fold load/store + G_GEP + G_CONSTANT 324 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate 325 // %Addr:(p0) = G_GEP %BaseAddr, %SignedOffset 326 // %LoadResult/%StoreSrc = load/store %Addr(p0) 327 // into: 328 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate 329 330 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg()); 331 if (Addr->getOpcode() == G_GEP) { 332 MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg()); 333 if (Offset->getOpcode() == G_CONSTANT) { 334 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue(); 335 if (OffsetValue.isSignedIntN(16)) { 336 BaseAddr = Addr->getOperand(1); 337 SignedOffset = OffsetValue.getSExtValue(); 338 } 339 } 340 } 341 342 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc)) 343 .add(I.getOperand(0)) 344 .add(BaseAddr) 345 .addImm(SignedOffset) 346 .addMemOperand(*I.memoperands_begin()); 347 break; 348 } 349 case G_UDIV: 350 case G_UREM: 351 case G_SDIV: 352 case G_SREM: { 353 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); 354 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV; 355 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV; 356 357 MachineInstr *PseudoDIV, *PseudoMove; 358 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(), 359 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV)) 360 .addDef(HILOReg) 361 .add(I.getOperand(1)) 362 .add(I.getOperand(2)); 363 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI)) 364 return false; 365 366 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), 367 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI)) 368 .addDef(I.getOperand(0).getReg()) 369 .addUse(HILOReg); 370 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) 371 return false; 372 373 I.eraseFromParent(); 374 return true; 375 } 376 case G_SELECT: { 377 // Handle operands with pointer type. 378 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I)) 379 .add(I.getOperand(0)) 380 .add(I.getOperand(2)) 381 .add(I.getOperand(1)) 382 .add(I.getOperand(3)); 383 break; 384 } 385 case G_CONSTANT: { 386 MachineIRBuilder B(I); 387 if (!materialize32BitImm(I.getOperand(0).getReg(), 388 I.getOperand(1).getCImm()->getValue(), B)) 389 return false; 390 391 I.eraseFromParent(); 392 return true; 393 } 394 case G_FCONSTANT: { 395 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF(); 396 APInt APImm = FPimm.bitcastToAPInt(); 397 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 398 399 if (Size == 32) { 400 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 401 MachineIRBuilder B(I); 402 if (!materialize32BitImm(GPRReg, APImm, B)) 403 return false; 404 405 MachineInstrBuilder MTC1 = 406 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg}); 407 if (!MTC1.constrainAllUses(TII, TRI, RBI)) 408 return false; 409 } 410 if (Size == 64) { 411 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass); 412 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass); 413 MachineIRBuilder B(I); 414 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B)) 415 return false; 416 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B)) 417 return false; 418 419 MachineInstrBuilder PairF64 = B.buildInstr( 420 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64, 421 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh}); 422 if (!PairF64.constrainAllUses(TII, TRI, RBI)) 423 return false; 424 } 425 426 I.eraseFromParent(); 427 return true; 428 } 429 case G_FABS: { 430 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 431 unsigned FABSOpcode = 432 Size == 32 ? Mips::FABS_S 433 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32; 434 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode)) 435 .add(I.getOperand(0)) 436 .add(I.getOperand(1)); 437 break; 438 } 439 case G_FPTOSI: { 440 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits(); 441 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 442 (void)ToSize; 443 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI"); 444 assert((FromSize == 32 || FromSize == 64) && 445 "Unsupported floating point size for G_FPTOSI"); 446 447 unsigned Opcode; 448 if (FromSize == 32) 449 Opcode = Mips::TRUNC_W_S; 450 else 451 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32; 452 unsigned ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass); 453 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode)) 454 .addDef(ResultInFPR) 455 .addUse(I.getOperand(1).getReg()); 456 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI)) 457 return false; 458 459 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1)) 460 .addDef(I.getOperand(0).getReg()) 461 .addUse(ResultInFPR); 462 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) 463 return false; 464 465 I.eraseFromParent(); 466 return true; 467 } 468 case G_GLOBAL_VALUE: { 469 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal(); 470 if (MF.getTarget().isPositionIndependent()) { 471 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW)) 472 .addDef(I.getOperand(0).getReg()) 473 .addReg(MF.getInfo<MipsFunctionInfo>() 474 ->getGlobalBaseRegForGlobalISel()) 475 .addGlobalAddress(GVal); 476 // Global Values that don't have local linkage are handled differently 477 // when they are part of call sequence. MipsCallLowering::lowerCall 478 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds 479 // MO_GOT_CALL flag when Callee doesn't have local linkage. 480 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL) 481 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL); 482 else 483 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT); 484 LWGOT->addMemOperand( 485 MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), 486 MachineMemOperand::MOLoad, 4, 4)); 487 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI)) 488 return false; 489 490 if (GVal->hasLocalLinkage()) { 491 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass); 492 LWGOT->getOperand(0).setReg(LWGOTDef); 493 494 MachineInstr *ADDiu = 495 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 496 .addDef(I.getOperand(0).getReg()) 497 .addReg(LWGOTDef) 498 .addGlobalAddress(GVal); 499 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); 500 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) 501 return false; 502 } 503 } else { 504 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 505 506 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) 507 .addDef(LUiReg) 508 .addGlobalAddress(GVal); 509 LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI); 510 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) 511 return false; 512 513 MachineInstr *ADDiu = 514 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 515 .addDef(I.getOperand(0).getReg()) 516 .addUse(LUiReg) 517 .addGlobalAddress(GVal); 518 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); 519 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) 520 return false; 521 } 522 I.eraseFromParent(); 523 return true; 524 } 525 case G_ICMP: { 526 struct Instr { 527 unsigned Opcode; 528 Register Def, LHS, RHS; 529 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS) 530 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){}; 531 532 bool hasImm() const { 533 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi) 534 return true; 535 return false; 536 } 537 }; 538 539 SmallVector<struct Instr, 2> Instructions; 540 Register ICMPReg = I.getOperand(0).getReg(); 541 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass); 542 Register LHS = I.getOperand(2).getReg(); 543 Register RHS = I.getOperand(3).getReg(); 544 CmpInst::Predicate Cond = 545 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate()); 546 547 switch (Cond) { 548 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1 549 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); 550 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1); 551 break; 552 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS) 553 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); 554 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp); 555 break; 556 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS 557 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS); 558 break; 559 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS) 560 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS); 561 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 562 break; 563 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS 564 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS); 565 break; 566 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS) 567 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS); 568 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 569 break; 570 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS 571 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS); 572 break; 573 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS) 574 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS); 575 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 576 break; 577 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS 578 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS); 579 break; 580 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS) 581 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS); 582 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 583 break; 584 default: 585 return false; 586 } 587 588 MachineIRBuilder B(I); 589 for (const struct Instr &Instruction : Instructions) { 590 MachineInstrBuilder MIB = B.buildInstr( 591 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS}); 592 593 if (Instruction.hasImm()) 594 MIB.addImm(Instruction.RHS); 595 else 596 MIB.addUse(Instruction.RHS); 597 598 if (!MIB.constrainAllUses(TII, TRI, RBI)) 599 return false; 600 } 601 602 I.eraseFromParent(); 603 return true; 604 } 605 case G_FCMP: { 606 unsigned MipsFCMPCondCode; 607 bool isLogicallyNegated; 608 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>( 609 I.getOperand(1).getPredicate())) { 610 case CmpInst::FCMP_UNO: // Unordered 611 case CmpInst::FCMP_ORD: // Ordered (OR) 612 MipsFCMPCondCode = Mips::FCOND_UN; 613 isLogicallyNegated = Cond != CmpInst::FCMP_UNO; 614 break; 615 case CmpInst::FCMP_OEQ: // Equal 616 case CmpInst::FCMP_UNE: // Not Equal (NEQ) 617 MipsFCMPCondCode = Mips::FCOND_OEQ; 618 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ; 619 break; 620 case CmpInst::FCMP_UEQ: // Unordered or Equal 621 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL) 622 MipsFCMPCondCode = Mips::FCOND_UEQ; 623 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ; 624 break; 625 case CmpInst::FCMP_OLT: // Ordered or Less Than 626 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE) 627 MipsFCMPCondCode = Mips::FCOND_OLT; 628 isLogicallyNegated = Cond != CmpInst::FCMP_OLT; 629 break; 630 case CmpInst::FCMP_ULT: // Unordered or Less Than 631 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE) 632 MipsFCMPCondCode = Mips::FCOND_ULT; 633 isLogicallyNegated = Cond != CmpInst::FCMP_ULT; 634 break; 635 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal 636 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT) 637 MipsFCMPCondCode = Mips::FCOND_OLE; 638 isLogicallyNegated = Cond != CmpInst::FCMP_OLE; 639 break; 640 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal 641 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT) 642 MipsFCMPCondCode = Mips::FCOND_ULE; 643 isLogicallyNegated = Cond != CmpInst::FCMP_ULE; 644 break; 645 default: 646 return false; 647 } 648 649 // Default compare result in gpr register will be `true`. 650 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false 651 // using MOVF_I. When orignal predicate (Cond) is logically negated 652 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used. 653 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I; 654 655 unsigned TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 656 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 657 .addDef(TrueInReg) 658 .addUse(Mips::ZERO) 659 .addImm(1); 660 661 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); 662 unsigned FCMPOpcode = 663 Size == 32 ? Mips::FCMP_S32 664 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32; 665 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode)) 666 .addUse(I.getOperand(2).getReg()) 667 .addUse(I.getOperand(3).getReg()) 668 .addImm(MipsFCMPCondCode); 669 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI)) 670 return false; 671 672 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode)) 673 .addDef(I.getOperand(0).getReg()) 674 .addUse(Mips::ZERO) 675 .addUse(Mips::FCC0) 676 .addUse(TrueInReg); 677 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) 678 return false; 679 680 I.eraseFromParent(); 681 return true; 682 } 683 default: 684 return false; 685 } 686 687 I.eraseFromParent(); 688 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); 689 } 690 691 namespace llvm { 692 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM, 693 MipsSubtarget &Subtarget, 694 MipsRegisterBankInfo &RBI) { 695 return new MipsInstructionSelector(TM, Subtarget, RBI); 696 } 697 } // end namespace llvm 698