1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// Mips. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/MipsInstPrinter.h" 15 #include "MipsMachineFunction.h" 16 #include "MipsRegisterBankInfo.h" 17 #include "MipsTargetMachine.h" 18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 20 21 #define DEBUG_TYPE "mips-isel" 22 23 using namespace llvm; 24 25 namespace { 26 27 #define GET_GLOBALISEL_PREDICATE_BITSET 28 #include "MipsGenGlobalISel.inc" 29 #undef GET_GLOBALISEL_PREDICATE_BITSET 30 31 class MipsInstructionSelector : public InstructionSelector { 32 public: 33 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI, 34 const MipsRegisterBankInfo &RBI); 35 36 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override; 37 static const char *getName() { return DEBUG_TYPE; } 38 39 private: 40 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 41 bool materialize32BitImm(Register DestReg, APInt Imm, 42 MachineIRBuilder &B) const; 43 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const; 44 45 const MipsTargetMachine &TM; 46 const MipsSubtarget &STI; 47 const MipsInstrInfo &TII; 48 const MipsRegisterInfo &TRI; 49 const MipsRegisterBankInfo &RBI; 50 51 #define GET_GLOBALISEL_PREDICATES_DECL 52 #include "MipsGenGlobalISel.inc" 53 #undef GET_GLOBALISEL_PREDICATES_DECL 54 55 #define GET_GLOBALISEL_TEMPORARIES_DECL 56 #include "MipsGenGlobalISel.inc" 57 #undef GET_GLOBALISEL_TEMPORARIES_DECL 58 }; 59 60 } // end anonymous namespace 61 62 #define GET_GLOBALISEL_IMPL 63 #include "MipsGenGlobalISel.inc" 64 #undef GET_GLOBALISEL_IMPL 65 66 MipsInstructionSelector::MipsInstructionSelector( 67 const MipsTargetMachine &TM, const MipsSubtarget &STI, 68 const MipsRegisterBankInfo &RBI) 69 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()), 70 TRI(*STI.getRegisterInfo()), RBI(RBI), 71 72 #define GET_GLOBALISEL_PREDICATES_INIT 73 #include "MipsGenGlobalISel.inc" 74 #undef GET_GLOBALISEL_PREDICATES_INIT 75 #define GET_GLOBALISEL_TEMPORARIES_INIT 76 #include "MipsGenGlobalISel.inc" 77 #undef GET_GLOBALISEL_TEMPORARIES_INIT 78 { 79 } 80 81 bool MipsInstructionSelector::selectCopy(MachineInstr &I, 82 MachineRegisterInfo &MRI) const { 83 Register DstReg = I.getOperand(0).getReg(); 84 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 85 return true; 86 87 const RegisterBank *RegBank = RBI.getRegBank(DstReg, MRI, TRI); 88 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); 89 90 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 91 if (RegBank->getID() == Mips::FPRBRegBankID) { 92 if (DstSize == 32) 93 RC = &Mips::FGR32RegClass; 94 else if (DstSize == 64) 95 RC = STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 96 else 97 llvm_unreachable("Unsupported destination size"); 98 } 99 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 100 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 101 << " operand\n"); 102 return false; 103 } 104 return true; 105 } 106 107 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm, 108 MachineIRBuilder &B) const { 109 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size."); 110 // Ori zero extends immediate. Used for values with zeros in high 16 bits. 111 if (Imm.getHiBits(16).isNullValue()) { 112 MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)}) 113 .addImm(Imm.getLoBits(16).getLimitedValue()); 114 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 115 } 116 // Lui places immediate in high 16 bits and sets low 16 bits to zero. 117 if (Imm.getLoBits(16).isNullValue()) { 118 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {}) 119 .addImm(Imm.getHiBits(16).getLimitedValue()); 120 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 121 } 122 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits. 123 if (Imm.isSignedIntN(16)) { 124 MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)}) 125 .addImm(Imm.getLoBits(16).getLimitedValue()); 126 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 127 } 128 // Values that cannot be materialized with single immediate instruction. 129 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass); 130 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {}) 131 .addImm(Imm.getHiBits(16).getLimitedValue()); 132 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg}) 133 .addImm(Imm.getLoBits(16).getLimitedValue()); 134 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) 135 return false; 136 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI)) 137 return false; 138 return true; 139 } 140 141 /// Returning Opc indicates that we failed to select MIPS instruction opcode. 142 static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned MemSizeInBytes) { 143 if (Opc == TargetOpcode::G_STORE) 144 switch (MemSizeInBytes) { 145 case 4: 146 return Mips::SW; 147 case 2: 148 return Mips::SH; 149 case 1: 150 return Mips::SB; 151 default: 152 return Opc; 153 } 154 else 155 // Unspecified extending load is selected into zeroExtending load. 156 switch (MemSizeInBytes) { 157 case 4: 158 return Mips::LW; 159 case 2: 160 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu; 161 case 1: 162 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu; 163 default: 164 return Opc; 165 } 166 } 167 168 bool MipsInstructionSelector::select(MachineInstr &I, 169 CodeGenCoverage &CoverageInfo) const { 170 171 MachineBasicBlock &MBB = *I.getParent(); 172 MachineFunction &MF = *MBB.getParent(); 173 MachineRegisterInfo &MRI = MF.getRegInfo(); 174 175 if (!isPreISelGenericOpcode(I.getOpcode())) { 176 if (I.isCopy()) 177 return selectCopy(I, MRI); 178 179 return true; 180 } 181 182 if (I.getOpcode() == Mips::G_MUL) { 183 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL)) 184 .add(I.getOperand(0)) 185 .add(I.getOperand(1)) 186 .add(I.getOperand(2)); 187 if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI)) 188 return false; 189 Mul->getOperand(3).setIsDead(true); 190 Mul->getOperand(4).setIsDead(true); 191 192 I.eraseFromParent(); 193 return true; 194 } 195 196 if (selectImpl(I, CoverageInfo)) 197 return true; 198 199 MachineInstr *MI = nullptr; 200 using namespace TargetOpcode; 201 202 switch (I.getOpcode()) { 203 case G_UMULH: { 204 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); 205 MachineInstr *PseudoMULTu, *PseudoMove; 206 207 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu)) 208 .addDef(PseudoMULTuReg) 209 .add(I.getOperand(1)) 210 .add(I.getOperand(2)); 211 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI)) 212 return false; 213 214 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI)) 215 .addDef(I.getOperand(0).getReg()) 216 .addUse(PseudoMULTuReg); 217 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) 218 return false; 219 220 I.eraseFromParent(); 221 return true; 222 } 223 case G_GEP: { 224 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu)) 225 .add(I.getOperand(0)) 226 .add(I.getOperand(1)) 227 .add(I.getOperand(2)); 228 break; 229 } 230 case G_FRAME_INDEX: { 231 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 232 .add(I.getOperand(0)) 233 .add(I.getOperand(1)) 234 .addImm(0); 235 break; 236 } 237 case G_BRCOND: { 238 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE)) 239 .add(I.getOperand(0)) 240 .addUse(Mips::ZERO) 241 .add(I.getOperand(1)); 242 break; 243 } 244 case G_PHI: { 245 const Register DestReg = I.getOperand(0).getReg(); 246 const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID(); 247 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits(); 248 249 if (DestRegBank != Mips::GPRBRegBankID || OpSize != 32) 250 return false; 251 252 const TargetRegisterClass *DefRC = &Mips::GPR32RegClass; 253 I.setDesc(TII.get(TargetOpcode::PHI)); 254 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI); 255 } 256 case G_STORE: 257 case G_LOAD: 258 case G_ZEXTLOAD: 259 case G_SEXTLOAD: { 260 const Register DestReg = I.getOperand(0).getReg(); 261 const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID(); 262 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits(); 263 const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize(); 264 265 if (DestRegBank != Mips::GPRBRegBankID || OpSize != 32) 266 return false; 267 268 const unsigned NewOpc = 269 selectLoadStoreOpCode(I.getOpcode(), OpMemSizeInBytes); 270 if (NewOpc == I.getOpcode()) 271 return false; 272 273 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc)) 274 .add(I.getOperand(0)) 275 .add(I.getOperand(1)) 276 .addImm(0) 277 .addMemOperand(*I.memoperands_begin()); 278 break; 279 } 280 case G_UDIV: 281 case G_UREM: 282 case G_SDIV: 283 case G_SREM: { 284 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); 285 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV; 286 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV; 287 288 MachineInstr *PseudoDIV, *PseudoMove; 289 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(), 290 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV)) 291 .addDef(HILOReg) 292 .add(I.getOperand(1)) 293 .add(I.getOperand(2)); 294 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI)) 295 return false; 296 297 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), 298 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI)) 299 .addDef(I.getOperand(0).getReg()) 300 .addUse(HILOReg); 301 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) 302 return false; 303 304 I.eraseFromParent(); 305 return true; 306 } 307 case G_SELECT: { 308 // Handle operands with pointer type. 309 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I)) 310 .add(I.getOperand(0)) 311 .add(I.getOperand(2)) 312 .add(I.getOperand(1)) 313 .add(I.getOperand(3)); 314 break; 315 } 316 case G_CONSTANT: { 317 MachineIRBuilder B(I); 318 if (!materialize32BitImm(I.getOperand(0).getReg(), 319 I.getOperand(1).getCImm()->getValue(), B)) 320 return false; 321 322 I.eraseFromParent(); 323 return true; 324 } 325 case G_FCONSTANT: { 326 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF(); 327 APInt APImm = FPimm.bitcastToAPInt(); 328 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 329 330 if (Size == 32) { 331 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 332 MachineIRBuilder B(I); 333 if (!materialize32BitImm(GPRReg, APImm, B)) 334 return false; 335 336 MachineInstrBuilder MTC1 = 337 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg}); 338 if (!MTC1.constrainAllUses(TII, TRI, RBI)) 339 return false; 340 } 341 if (Size == 64) { 342 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass); 343 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass); 344 MachineIRBuilder B(I); 345 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B)) 346 return false; 347 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B)) 348 return false; 349 350 MachineInstrBuilder PairF64 = B.buildInstr( 351 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64, 352 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh}); 353 if (!PairF64.constrainAllUses(TII, TRI, RBI)) 354 return false; 355 } 356 357 I.eraseFromParent(); 358 return true; 359 } 360 case G_FABS: { 361 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 362 unsigned FABSOpcode = 363 Size == 32 ? Mips::FABS_S 364 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32; 365 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode)) 366 .add(I.getOperand(0)) 367 .add(I.getOperand(1)); 368 break; 369 } 370 case G_FPTOSI: { 371 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits(); 372 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 373 (void)ToSize; 374 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI"); 375 assert((FromSize == 32 || FromSize == 64) && 376 "Unsupported floating point size for G_FPTOSI"); 377 378 unsigned Opcode; 379 if (FromSize == 32) 380 Opcode = Mips::TRUNC_W_S; 381 else 382 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32; 383 unsigned ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass); 384 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode)) 385 .addDef(ResultInFPR) 386 .addUse(I.getOperand(1).getReg()); 387 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI)) 388 return false; 389 390 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1)) 391 .addDef(I.getOperand(0).getReg()) 392 .addUse(ResultInFPR); 393 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) 394 return false; 395 396 I.eraseFromParent(); 397 return true; 398 } 399 case G_GLOBAL_VALUE: { 400 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal(); 401 if (MF.getTarget().isPositionIndependent()) { 402 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW)) 403 .addDef(I.getOperand(0).getReg()) 404 .addReg(MF.getInfo<MipsFunctionInfo>() 405 ->getGlobalBaseRegForGlobalISel()) 406 .addGlobalAddress(GVal); 407 // Global Values that don't have local linkage are handled differently 408 // when they are part of call sequence. MipsCallLowering::lowerCall 409 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds 410 // MO_GOT_CALL flag when Callee doesn't have local linkage. 411 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL) 412 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL); 413 else 414 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT); 415 LWGOT->addMemOperand( 416 MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), 417 MachineMemOperand::MOLoad, 4, 4)); 418 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI)) 419 return false; 420 421 if (GVal->hasLocalLinkage()) { 422 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass); 423 LWGOT->getOperand(0).setReg(LWGOTDef); 424 425 MachineInstr *ADDiu = 426 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 427 .addDef(I.getOperand(0).getReg()) 428 .addReg(LWGOTDef) 429 .addGlobalAddress(GVal); 430 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); 431 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) 432 return false; 433 } 434 } else { 435 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 436 437 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) 438 .addDef(LUiReg) 439 .addGlobalAddress(GVal); 440 LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI); 441 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) 442 return false; 443 444 MachineInstr *ADDiu = 445 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 446 .addDef(I.getOperand(0).getReg()) 447 .addUse(LUiReg) 448 .addGlobalAddress(GVal); 449 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); 450 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) 451 return false; 452 } 453 I.eraseFromParent(); 454 return true; 455 } 456 case G_ICMP: { 457 struct Instr { 458 unsigned Opcode; 459 Register Def, LHS, RHS; 460 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS) 461 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){}; 462 463 bool hasImm() const { 464 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi) 465 return true; 466 return false; 467 } 468 }; 469 470 SmallVector<struct Instr, 2> Instructions; 471 Register ICMPReg = I.getOperand(0).getReg(); 472 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass); 473 Register LHS = I.getOperand(2).getReg(); 474 Register RHS = I.getOperand(3).getReg(); 475 CmpInst::Predicate Cond = 476 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate()); 477 478 switch (Cond) { 479 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1 480 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); 481 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1); 482 break; 483 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS) 484 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); 485 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp); 486 break; 487 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS 488 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS); 489 break; 490 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS) 491 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS); 492 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 493 break; 494 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS 495 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS); 496 break; 497 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS) 498 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS); 499 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 500 break; 501 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS 502 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS); 503 break; 504 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS) 505 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS); 506 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 507 break; 508 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS 509 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS); 510 break; 511 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS) 512 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS); 513 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 514 break; 515 default: 516 return false; 517 } 518 519 MachineIRBuilder B(I); 520 for (const struct Instr &Instruction : Instructions) { 521 MachineInstrBuilder MIB = B.buildInstr( 522 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS}); 523 524 if (Instruction.hasImm()) 525 MIB.addImm(Instruction.RHS); 526 else 527 MIB.addUse(Instruction.RHS); 528 529 if (!MIB.constrainAllUses(TII, TRI, RBI)) 530 return false; 531 } 532 533 I.eraseFromParent(); 534 return true; 535 } 536 case G_FCMP: { 537 unsigned MipsFCMPCondCode; 538 bool isLogicallyNegated; 539 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>( 540 I.getOperand(1).getPredicate())) { 541 case CmpInst::FCMP_UNO: // Unordered 542 case CmpInst::FCMP_ORD: // Ordered (OR) 543 MipsFCMPCondCode = Mips::FCOND_UN; 544 isLogicallyNegated = Cond != CmpInst::FCMP_UNO; 545 break; 546 case CmpInst::FCMP_OEQ: // Equal 547 case CmpInst::FCMP_UNE: // Not Equal (NEQ) 548 MipsFCMPCondCode = Mips::FCOND_OEQ; 549 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ; 550 break; 551 case CmpInst::FCMP_UEQ: // Unordered or Equal 552 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL) 553 MipsFCMPCondCode = Mips::FCOND_UEQ; 554 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ; 555 break; 556 case CmpInst::FCMP_OLT: // Ordered or Less Than 557 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE) 558 MipsFCMPCondCode = Mips::FCOND_OLT; 559 isLogicallyNegated = Cond != CmpInst::FCMP_OLT; 560 break; 561 case CmpInst::FCMP_ULT: // Unordered or Less Than 562 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE) 563 MipsFCMPCondCode = Mips::FCOND_ULT; 564 isLogicallyNegated = Cond != CmpInst::FCMP_ULT; 565 break; 566 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal 567 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT) 568 MipsFCMPCondCode = Mips::FCOND_OLE; 569 isLogicallyNegated = Cond != CmpInst::FCMP_OLE; 570 break; 571 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal 572 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT) 573 MipsFCMPCondCode = Mips::FCOND_ULE; 574 isLogicallyNegated = Cond != CmpInst::FCMP_ULE; 575 break; 576 default: 577 return false; 578 } 579 580 // Default compare result in gpr register will be `true`. 581 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false 582 // using MOVF_I. When orignal predicate (Cond) is logically negated 583 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used. 584 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I; 585 586 unsigned TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 587 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 588 .addDef(TrueInReg) 589 .addUse(Mips::ZERO) 590 .addImm(1); 591 592 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); 593 unsigned FCMPOpcode = 594 Size == 32 ? Mips::FCMP_S32 595 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32; 596 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode)) 597 .addUse(I.getOperand(2).getReg()) 598 .addUse(I.getOperand(3).getReg()) 599 .addImm(MipsFCMPCondCode); 600 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI)) 601 return false; 602 603 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode)) 604 .addDef(I.getOperand(0).getReg()) 605 .addUse(Mips::ZERO) 606 .addUse(Mips::FCC0) 607 .addUse(TrueInReg); 608 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) 609 return false; 610 611 I.eraseFromParent(); 612 return true; 613 } 614 default: 615 return false; 616 } 617 618 I.eraseFromParent(); 619 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); 620 } 621 622 namespace llvm { 623 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM, 624 MipsSubtarget &Subtarget, 625 MipsRegisterBankInfo &RBI) { 626 return new MipsInstructionSelector(TM, Subtarget, RBI); 627 } 628 } // end namespace llvm 629