1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// Mips. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/MipsInstPrinter.h" 15 #include "MipsMachineFunction.h" 16 #include "MipsRegisterBankInfo.h" 17 #include "MipsTargetMachine.h" 18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 20 #include "llvm/CodeGen/MachineJumpTableInfo.h" 21 22 #define DEBUG_TYPE "mips-isel" 23 24 using namespace llvm; 25 26 namespace { 27 28 #define GET_GLOBALISEL_PREDICATE_BITSET 29 #include "MipsGenGlobalISel.inc" 30 #undef GET_GLOBALISEL_PREDICATE_BITSET 31 32 class MipsInstructionSelector : public InstructionSelector { 33 public: 34 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI, 35 const MipsRegisterBankInfo &RBI); 36 37 bool select(MachineInstr &I) override; 38 static const char *getName() { return DEBUG_TYPE; } 39 40 private: 41 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 42 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const; 43 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const; 44 bool materialize32BitImm(Register DestReg, APInt Imm, 45 MachineIRBuilder &B) const; 46 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const; 47 const TargetRegisterClass * 48 getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const; 49 unsigned selectLoadStoreOpCode(MachineInstr &I, 50 MachineRegisterInfo &MRI) const; 51 52 const MipsTargetMachine &TM; 53 const MipsSubtarget &STI; 54 const MipsInstrInfo &TII; 55 const MipsRegisterInfo &TRI; 56 const MipsRegisterBankInfo &RBI; 57 58 #define GET_GLOBALISEL_PREDICATES_DECL 59 #include "MipsGenGlobalISel.inc" 60 #undef GET_GLOBALISEL_PREDICATES_DECL 61 62 #define GET_GLOBALISEL_TEMPORARIES_DECL 63 #include "MipsGenGlobalISel.inc" 64 #undef GET_GLOBALISEL_TEMPORARIES_DECL 65 }; 66 67 } // end anonymous namespace 68 69 #define GET_GLOBALISEL_IMPL 70 #include "MipsGenGlobalISel.inc" 71 #undef GET_GLOBALISEL_IMPL 72 73 MipsInstructionSelector::MipsInstructionSelector( 74 const MipsTargetMachine &TM, const MipsSubtarget &STI, 75 const MipsRegisterBankInfo &RBI) 76 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()), 77 TRI(*STI.getRegisterInfo()), RBI(RBI), 78 79 #define GET_GLOBALISEL_PREDICATES_INIT 80 #include "MipsGenGlobalISel.inc" 81 #undef GET_GLOBALISEL_PREDICATES_INIT 82 #define GET_GLOBALISEL_TEMPORARIES_INIT 83 #include "MipsGenGlobalISel.inc" 84 #undef GET_GLOBALISEL_TEMPORARIES_INIT 85 { 86 } 87 88 bool MipsInstructionSelector::isRegInGprb(Register Reg, 89 MachineRegisterInfo &MRI) const { 90 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID; 91 } 92 93 bool MipsInstructionSelector::isRegInFprb(Register Reg, 94 MachineRegisterInfo &MRI) const { 95 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID; 96 } 97 98 bool MipsInstructionSelector::selectCopy(MachineInstr &I, 99 MachineRegisterInfo &MRI) const { 100 Register DstReg = I.getOperand(0).getReg(); 101 if (Register::isPhysicalRegister(DstReg)) 102 return true; 103 104 const TargetRegisterClass *RC = getRegClassForTypeOnBank(DstReg, MRI); 105 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 106 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 107 << " operand\n"); 108 return false; 109 } 110 return true; 111 } 112 113 const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank( 114 Register Reg, MachineRegisterInfo &MRI) const { 115 const LLT Ty = MRI.getType(Reg); 116 const unsigned TySize = Ty.getSizeInBits(); 117 118 if (isRegInGprb(Reg, MRI)) { 119 assert((Ty.isScalar() || Ty.isPointer()) && TySize == 32 && 120 "Register class not available for LLT, register bank combination"); 121 return &Mips::GPR32RegClass; 122 } 123 124 if (isRegInFprb(Reg, MRI)) { 125 if (Ty.isScalar()) { 126 assert((TySize == 32 || TySize == 64) && 127 "Register class not available for LLT, register bank combination"); 128 if (TySize == 32) 129 return &Mips::FGR32RegClass; 130 return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 131 } 132 } 133 134 llvm_unreachable("Unsupported register bank."); 135 } 136 137 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm, 138 MachineIRBuilder &B) const { 139 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size."); 140 // Ori zero extends immediate. Used for values with zeros in high 16 bits. 141 if (Imm.getHiBits(16).isNullValue()) { 142 MachineInstr *Inst = 143 B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)}) 144 .addImm(Imm.getLoBits(16).getLimitedValue()); 145 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 146 } 147 // Lui places immediate in high 16 bits and sets low 16 bits to zero. 148 if (Imm.getLoBits(16).isNullValue()) { 149 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {}) 150 .addImm(Imm.getHiBits(16).getLimitedValue()); 151 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 152 } 153 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits. 154 if (Imm.isSignedIntN(16)) { 155 MachineInstr *Inst = 156 B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)}) 157 .addImm(Imm.getLoBits(16).getLimitedValue()); 158 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 159 } 160 // Values that cannot be materialized with single immediate instruction. 161 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass); 162 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {}) 163 .addImm(Imm.getHiBits(16).getLimitedValue()); 164 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg}) 165 .addImm(Imm.getLoBits(16).getLimitedValue()); 166 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) 167 return false; 168 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI)) 169 return false; 170 return true; 171 } 172 173 /// When I.getOpcode() is returned, we failed to select MIPS instruction opcode. 174 unsigned 175 MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I, 176 MachineRegisterInfo &MRI) const { 177 const Register ValueReg = I.getOperand(0).getReg(); 178 const LLT Ty = MRI.getType(ValueReg); 179 const unsigned TySize = Ty.getSizeInBits(); 180 const unsigned MemSizeInBytes = (*I.memoperands_begin())->getSize(); 181 unsigned Opc = I.getOpcode(); 182 const bool isStore = Opc == TargetOpcode::G_STORE; 183 184 if (isRegInGprb(ValueReg, MRI)) { 185 assert(((Ty.isScalar() && TySize == 32) || 186 (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) && 187 "Unsupported register bank, LLT, MemSizeInBytes combination"); 188 (void)TySize; 189 if (isStore) 190 switch (MemSizeInBytes) { 191 case 4: 192 return Mips::SW; 193 case 2: 194 return Mips::SH; 195 case 1: 196 return Mips::SB; 197 default: 198 return Opc; 199 } 200 else 201 // Unspecified extending load is selected into zeroExtending load. 202 switch (MemSizeInBytes) { 203 case 4: 204 return Mips::LW; 205 case 2: 206 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu; 207 case 1: 208 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu; 209 default: 210 return Opc; 211 } 212 } 213 214 if (isRegInFprb(ValueReg, MRI)) { 215 if (Ty.isScalar()) { 216 assert(((TySize == 32 && MemSizeInBytes == 4) || 217 (TySize == 64 && MemSizeInBytes == 8)) && 218 "Unsupported register bank, LLT, MemSizeInBytes combination"); 219 220 if (MemSizeInBytes == 4) 221 return isStore ? Mips::SWC1 : Mips::LWC1; 222 223 if (STI.isFP64bit()) 224 return isStore ? Mips::SDC164 : Mips::LDC164; 225 return isStore ? Mips::SDC1 : Mips::LDC1; 226 } 227 228 if (Ty.isVector()) { 229 assert(STI.hasMSA() && "Vector instructions require target with MSA."); 230 assert((TySize == 128 && MemSizeInBytes == 16) && 231 "Unsupported register bank, LLT, MemSizeInBytes combination"); 232 switch (Ty.getElementType().getSizeInBits()) { 233 case 8: 234 return isStore ? Mips::ST_B : Mips::LD_B; 235 case 16: 236 return isStore ? Mips::ST_H : Mips::LD_H; 237 case 32: 238 return isStore ? Mips::ST_W : Mips::LD_W; 239 case 64: 240 return isStore ? Mips::ST_D : Mips::LD_D; 241 default: 242 return Opc; 243 } 244 } 245 } 246 247 return Opc; 248 } 249 250 bool MipsInstructionSelector::select(MachineInstr &I) { 251 252 MachineBasicBlock &MBB = *I.getParent(); 253 MachineFunction &MF = *MBB.getParent(); 254 MachineRegisterInfo &MRI = MF.getRegInfo(); 255 256 if (!isPreISelGenericOpcode(I.getOpcode())) { 257 if (I.isCopy()) 258 return selectCopy(I, MRI); 259 260 return true; 261 } 262 263 if (I.getOpcode() == Mips::G_MUL && 264 isRegInGprb(I.getOperand(0).getReg(), MRI)) { 265 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL)) 266 .add(I.getOperand(0)) 267 .add(I.getOperand(1)) 268 .add(I.getOperand(2)); 269 if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI)) 270 return false; 271 Mul->getOperand(3).setIsDead(true); 272 Mul->getOperand(4).setIsDead(true); 273 274 I.eraseFromParent(); 275 return true; 276 } 277 278 if (selectImpl(I, *CoverageInfo)) 279 return true; 280 281 MachineInstr *MI = nullptr; 282 using namespace TargetOpcode; 283 284 switch (I.getOpcode()) { 285 case G_UMULH: { 286 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); 287 MachineInstr *PseudoMULTu, *PseudoMove; 288 289 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu)) 290 .addDef(PseudoMULTuReg) 291 .add(I.getOperand(1)) 292 .add(I.getOperand(2)); 293 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI)) 294 return false; 295 296 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI)) 297 .addDef(I.getOperand(0).getReg()) 298 .addUse(PseudoMULTuReg); 299 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) 300 return false; 301 302 I.eraseFromParent(); 303 return true; 304 } 305 case G_PTR_ADD: { 306 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu)) 307 .add(I.getOperand(0)) 308 .add(I.getOperand(1)) 309 .add(I.getOperand(2)); 310 break; 311 } 312 case G_INTTOPTR: 313 case G_PTRTOINT: { 314 I.setDesc(TII.get(COPY)); 315 return selectCopy(I, MRI); 316 } 317 case G_FRAME_INDEX: { 318 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 319 .add(I.getOperand(0)) 320 .add(I.getOperand(1)) 321 .addImm(0); 322 break; 323 } 324 case G_BRCOND: { 325 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE)) 326 .add(I.getOperand(0)) 327 .addUse(Mips::ZERO) 328 .add(I.getOperand(1)); 329 break; 330 } 331 case G_BRJT: { 332 unsigned EntrySize = 333 MF.getJumpTableInfo()->getEntrySize(MF.getDataLayout()); 334 assert(isPowerOf2_32(EntrySize) && 335 "Non-power-of-two jump-table entry size not supported."); 336 337 Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass); 338 MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL)) 339 .addDef(JTIndex) 340 .addUse(I.getOperand(2).getReg()) 341 .addImm(Log2_32(EntrySize)); 342 if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI)) 343 return false; 344 345 Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass); 346 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu)) 347 .addDef(DestAddress) 348 .addUse(I.getOperand(0).getReg()) 349 .addUse(JTIndex); 350 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI)) 351 return false; 352 353 Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass); 354 MachineInstr *LW = 355 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW)) 356 .addDef(Dest) 357 .addUse(DestAddress) 358 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO) 359 .addMemOperand(MF.getMachineMemOperand( 360 MachinePointerInfo(), MachineMemOperand::MOLoad, 4, 4)); 361 if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI)) 362 return false; 363 364 if (MF.getTarget().isPositionIndependent()) { 365 Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass); 366 LW->getOperand(0).setReg(DestTmp); 367 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu)) 368 .addDef(Dest) 369 .addUse(DestTmp) 370 .addUse(MF.getInfo<MipsFunctionInfo>() 371 ->getGlobalBaseRegForGlobalISel()); 372 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI)) 373 return false; 374 } 375 376 MachineInstr *Branch = 377 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch)) 378 .addUse(Dest); 379 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI)) 380 return false; 381 382 I.eraseFromParent(); 383 return true; 384 } 385 case G_BRINDIRECT: { 386 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch)) 387 .add(I.getOperand(0)); 388 break; 389 } 390 case G_PHI: { 391 const Register DestReg = I.getOperand(0).getReg(); 392 393 const TargetRegisterClass *DefRC = nullptr; 394 if (Register::isPhysicalRegister(DestReg)) 395 DefRC = TRI.getRegClass(DestReg); 396 else 397 DefRC = getRegClassForTypeOnBank(DestReg, MRI); 398 399 I.setDesc(TII.get(TargetOpcode::PHI)); 400 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI); 401 } 402 case G_STORE: 403 case G_LOAD: 404 case G_ZEXTLOAD: 405 case G_SEXTLOAD: { 406 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI); 407 if (NewOpc == I.getOpcode()) 408 return false; 409 410 MachineOperand BaseAddr = I.getOperand(1); 411 int64_t SignedOffset = 0; 412 // Try to fold load/store + G_PTR_ADD + G_CONSTANT 413 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate 414 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset 415 // %LoadResult/%StoreSrc = load/store %Addr(p0) 416 // into: 417 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate 418 419 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg()); 420 if (Addr->getOpcode() == G_PTR_ADD) { 421 MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg()); 422 if (Offset->getOpcode() == G_CONSTANT) { 423 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue(); 424 if (OffsetValue.isSignedIntN(16)) { 425 BaseAddr = Addr->getOperand(1); 426 SignedOffset = OffsetValue.getSExtValue(); 427 } 428 } 429 } 430 431 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc)) 432 .add(I.getOperand(0)) 433 .add(BaseAddr) 434 .addImm(SignedOffset) 435 .addMemOperand(*I.memoperands_begin()); 436 break; 437 } 438 case G_UDIV: 439 case G_UREM: 440 case G_SDIV: 441 case G_SREM: { 442 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); 443 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV; 444 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV; 445 446 MachineInstr *PseudoDIV, *PseudoMove; 447 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(), 448 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV)) 449 .addDef(HILOReg) 450 .add(I.getOperand(1)) 451 .add(I.getOperand(2)); 452 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI)) 453 return false; 454 455 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), 456 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI)) 457 .addDef(I.getOperand(0).getReg()) 458 .addUse(HILOReg); 459 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) 460 return false; 461 462 I.eraseFromParent(); 463 return true; 464 } 465 case G_SELECT: { 466 // Handle operands with pointer type. 467 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I)) 468 .add(I.getOperand(0)) 469 .add(I.getOperand(2)) 470 .add(I.getOperand(1)) 471 .add(I.getOperand(3)); 472 break; 473 } 474 case G_IMPLICIT_DEF: { 475 Register Dst = I.getOperand(0).getReg(); 476 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF)) 477 .addDef(Dst); 478 479 // Set class based on register bank, there can be fpr and gpr implicit def. 480 MRI.setRegClass(Dst, getRegClassForTypeOnBank(Dst, MRI)); 481 break; 482 } 483 case G_CONSTANT: { 484 MachineIRBuilder B(I); 485 if (!materialize32BitImm(I.getOperand(0).getReg(), 486 I.getOperand(1).getCImm()->getValue(), B)) 487 return false; 488 489 I.eraseFromParent(); 490 return true; 491 } 492 case G_FCONSTANT: { 493 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF(); 494 APInt APImm = FPimm.bitcastToAPInt(); 495 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 496 497 if (Size == 32) { 498 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 499 MachineIRBuilder B(I); 500 if (!materialize32BitImm(GPRReg, APImm, B)) 501 return false; 502 503 MachineInstrBuilder MTC1 = 504 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg}); 505 if (!MTC1.constrainAllUses(TII, TRI, RBI)) 506 return false; 507 } 508 if (Size == 64) { 509 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass); 510 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass); 511 MachineIRBuilder B(I); 512 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B)) 513 return false; 514 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B)) 515 return false; 516 517 MachineInstrBuilder PairF64 = B.buildInstr( 518 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64, 519 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh}); 520 if (!PairF64.constrainAllUses(TII, TRI, RBI)) 521 return false; 522 } 523 524 I.eraseFromParent(); 525 return true; 526 } 527 case G_FABS: { 528 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 529 unsigned FABSOpcode = 530 Size == 32 ? Mips::FABS_S 531 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32; 532 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode)) 533 .add(I.getOperand(0)) 534 .add(I.getOperand(1)); 535 break; 536 } 537 case G_FPTOSI: { 538 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits(); 539 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 540 (void)ToSize; 541 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI"); 542 assert((FromSize == 32 || FromSize == 64) && 543 "Unsupported floating point size for G_FPTOSI"); 544 545 unsigned Opcode; 546 if (FromSize == 32) 547 Opcode = Mips::TRUNC_W_S; 548 else 549 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32; 550 Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass); 551 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode)) 552 .addDef(ResultInFPR) 553 .addUse(I.getOperand(1).getReg()); 554 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI)) 555 return false; 556 557 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1)) 558 .addDef(I.getOperand(0).getReg()) 559 .addUse(ResultInFPR); 560 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) 561 return false; 562 563 I.eraseFromParent(); 564 return true; 565 } 566 case G_GLOBAL_VALUE: { 567 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal(); 568 if (MF.getTarget().isPositionIndependent()) { 569 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW)) 570 .addDef(I.getOperand(0).getReg()) 571 .addReg(MF.getInfo<MipsFunctionInfo>() 572 ->getGlobalBaseRegForGlobalISel()) 573 .addGlobalAddress(GVal); 574 // Global Values that don't have local linkage are handled differently 575 // when they are part of call sequence. MipsCallLowering::lowerCall 576 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds 577 // MO_GOT_CALL flag when Callee doesn't have local linkage. 578 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL) 579 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL); 580 else 581 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT); 582 LWGOT->addMemOperand( 583 MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), 584 MachineMemOperand::MOLoad, 4, 4)); 585 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI)) 586 return false; 587 588 if (GVal->hasLocalLinkage()) { 589 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass); 590 LWGOT->getOperand(0).setReg(LWGOTDef); 591 592 MachineInstr *ADDiu = 593 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 594 .addDef(I.getOperand(0).getReg()) 595 .addReg(LWGOTDef) 596 .addGlobalAddress(GVal); 597 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); 598 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) 599 return false; 600 } 601 } else { 602 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 603 604 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) 605 .addDef(LUiReg) 606 .addGlobalAddress(GVal); 607 LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI); 608 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) 609 return false; 610 611 MachineInstr *ADDiu = 612 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 613 .addDef(I.getOperand(0).getReg()) 614 .addUse(LUiReg) 615 .addGlobalAddress(GVal); 616 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); 617 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) 618 return false; 619 } 620 I.eraseFromParent(); 621 return true; 622 } 623 case G_JUMP_TABLE: { 624 if (MF.getTarget().isPositionIndependent()) { 625 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW)) 626 .addDef(I.getOperand(0).getReg()) 627 .addReg(MF.getInfo<MipsFunctionInfo>() 628 ->getGlobalBaseRegForGlobalISel()) 629 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT) 630 .addMemOperand( 631 MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), 632 MachineMemOperand::MOLoad, 4, 4)); 633 } else { 634 MI = 635 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) 636 .addDef(I.getOperand(0).getReg()) 637 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI); 638 } 639 break; 640 } 641 case G_ICMP: { 642 struct Instr { 643 unsigned Opcode; 644 Register Def, LHS, RHS; 645 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS) 646 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){}; 647 648 bool hasImm() const { 649 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi) 650 return true; 651 return false; 652 } 653 }; 654 655 SmallVector<struct Instr, 2> Instructions; 656 Register ICMPReg = I.getOperand(0).getReg(); 657 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass); 658 Register LHS = I.getOperand(2).getReg(); 659 Register RHS = I.getOperand(3).getReg(); 660 CmpInst::Predicate Cond = 661 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate()); 662 663 switch (Cond) { 664 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1 665 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); 666 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1); 667 break; 668 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS) 669 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); 670 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp); 671 break; 672 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS 673 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS); 674 break; 675 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS) 676 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS); 677 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 678 break; 679 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS 680 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS); 681 break; 682 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS) 683 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS); 684 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 685 break; 686 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS 687 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS); 688 break; 689 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS) 690 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS); 691 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 692 break; 693 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS 694 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS); 695 break; 696 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS) 697 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS); 698 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 699 break; 700 default: 701 return false; 702 } 703 704 MachineIRBuilder B(I); 705 for (const struct Instr &Instruction : Instructions) { 706 MachineInstrBuilder MIB = B.buildInstr( 707 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS}); 708 709 if (Instruction.hasImm()) 710 MIB.addImm(Instruction.RHS); 711 else 712 MIB.addUse(Instruction.RHS); 713 714 if (!MIB.constrainAllUses(TII, TRI, RBI)) 715 return false; 716 } 717 718 I.eraseFromParent(); 719 return true; 720 } 721 case G_FCMP: { 722 unsigned MipsFCMPCondCode; 723 bool isLogicallyNegated; 724 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>( 725 I.getOperand(1).getPredicate())) { 726 case CmpInst::FCMP_UNO: // Unordered 727 case CmpInst::FCMP_ORD: // Ordered (OR) 728 MipsFCMPCondCode = Mips::FCOND_UN; 729 isLogicallyNegated = Cond != CmpInst::FCMP_UNO; 730 break; 731 case CmpInst::FCMP_OEQ: // Equal 732 case CmpInst::FCMP_UNE: // Not Equal (NEQ) 733 MipsFCMPCondCode = Mips::FCOND_OEQ; 734 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ; 735 break; 736 case CmpInst::FCMP_UEQ: // Unordered or Equal 737 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL) 738 MipsFCMPCondCode = Mips::FCOND_UEQ; 739 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ; 740 break; 741 case CmpInst::FCMP_OLT: // Ordered or Less Than 742 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE) 743 MipsFCMPCondCode = Mips::FCOND_OLT; 744 isLogicallyNegated = Cond != CmpInst::FCMP_OLT; 745 break; 746 case CmpInst::FCMP_ULT: // Unordered or Less Than 747 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE) 748 MipsFCMPCondCode = Mips::FCOND_ULT; 749 isLogicallyNegated = Cond != CmpInst::FCMP_ULT; 750 break; 751 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal 752 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT) 753 MipsFCMPCondCode = Mips::FCOND_OLE; 754 isLogicallyNegated = Cond != CmpInst::FCMP_OLE; 755 break; 756 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal 757 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT) 758 MipsFCMPCondCode = Mips::FCOND_ULE; 759 isLogicallyNegated = Cond != CmpInst::FCMP_ULE; 760 break; 761 default: 762 return false; 763 } 764 765 // Default compare result in gpr register will be `true`. 766 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false 767 // using MOVF_I. When orignal predicate (Cond) is logically negated 768 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used. 769 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I; 770 771 Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 772 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 773 .addDef(TrueInReg) 774 .addUse(Mips::ZERO) 775 .addImm(1); 776 777 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); 778 unsigned FCMPOpcode = 779 Size == 32 ? Mips::FCMP_S32 780 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32; 781 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode)) 782 .addUse(I.getOperand(2).getReg()) 783 .addUse(I.getOperand(3).getReg()) 784 .addImm(MipsFCMPCondCode); 785 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI)) 786 return false; 787 788 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode)) 789 .addDef(I.getOperand(0).getReg()) 790 .addUse(Mips::ZERO) 791 .addUse(Mips::FCC0) 792 .addUse(TrueInReg); 793 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) 794 return false; 795 796 I.eraseFromParent(); 797 return true; 798 } 799 case G_FENCE: { 800 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0); 801 break; 802 } 803 case G_VASTART: { 804 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>(); 805 int FI = FuncInfo->getVarArgsFrameIndex(); 806 807 Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 808 MachineInstr *LEA_ADDiu = 809 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu)) 810 .addDef(LeaReg) 811 .addFrameIndex(FI) 812 .addImm(0); 813 if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI)) 814 return false; 815 816 MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW)) 817 .addUse(LeaReg) 818 .addUse(I.getOperand(0).getReg()) 819 .addImm(0); 820 if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI)) 821 return false; 822 823 I.eraseFromParent(); 824 return true; 825 } 826 default: 827 return false; 828 } 829 830 I.eraseFromParent(); 831 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); 832 } 833 834 namespace llvm { 835 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM, 836 MipsSubtarget &Subtarget, 837 MipsRegisterBankInfo &RBI) { 838 return new MipsInstructionSelector(TM, Subtarget, RBI); 839 } 840 } // end namespace llvm 841