1 //===-- MipsSEInstrInfo.cpp - Mips32/64 Instruction Information -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Mips32/64 implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MipsSEInstrInfo.h" 15 #include "InstPrinter/MipsInstPrinter.h" 16 #include "MipsMachineFunction.h" 17 #include "MipsTargetMachine.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/Support/CommandLine.h" 22 #include "llvm/Support/ErrorHandling.h" 23 #include "llvm/Support/TargetRegistry.h" 24 25 using namespace llvm; 26 27 MipsSEInstrInfo::MipsSEInstrInfo(const MipsSubtarget &STI) 28 : MipsInstrInfo(STI, STI.getRelocationModel() == Reloc::PIC_ ? Mips::B 29 : Mips::J), 30 RI() {} 31 32 const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const { 33 return RI; 34 } 35 36 /// isLoadFromStackSlot - If the specified machine instruction is a direct 37 /// load from a stack slot, return the virtual or physical register number of 38 /// the destination along with the FrameIndex of the loaded stack slot. If 39 /// not, return 0. This predicate must return 0 if the instruction has 40 /// any side effects other than loading from the stack slot. 41 unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 42 int &FrameIndex) const { 43 unsigned Opc = MI->getOpcode(); 44 45 if ((Opc == Mips::LW) || (Opc == Mips::LD) || 46 (Opc == Mips::LWC1) || (Opc == Mips::LDC1) || (Opc == Mips::LDC164)) { 47 if ((MI->getOperand(1).isFI()) && // is a stack slot 48 (MI->getOperand(2).isImm()) && // the imm is zero 49 (isZeroImm(MI->getOperand(2)))) { 50 FrameIndex = MI->getOperand(1).getIndex(); 51 return MI->getOperand(0).getReg(); 52 } 53 } 54 55 return 0; 56 } 57 58 /// isStoreToStackSlot - If the specified machine instruction is a direct 59 /// store to a stack slot, return the virtual or physical register number of 60 /// the source reg along with the FrameIndex of the loaded stack slot. If 61 /// not, return 0. This predicate must return 0 if the instruction has 62 /// any side effects other than storing to the stack slot. 63 unsigned MipsSEInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 64 int &FrameIndex) const { 65 unsigned Opc = MI->getOpcode(); 66 67 if ((Opc == Mips::SW) || (Opc == Mips::SD) || 68 (Opc == Mips::SWC1) || (Opc == Mips::SDC1) || (Opc == Mips::SDC164)) { 69 if ((MI->getOperand(1).isFI()) && // is a stack slot 70 (MI->getOperand(2).isImm()) && // the imm is zero 71 (isZeroImm(MI->getOperand(2)))) { 72 FrameIndex = MI->getOperand(1).getIndex(); 73 return MI->getOperand(0).getReg(); 74 } 75 } 76 return 0; 77 } 78 79 void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 80 MachineBasicBlock::iterator I, DebugLoc DL, 81 unsigned DestReg, unsigned SrcReg, 82 bool KillSrc) const { 83 unsigned Opc = 0, ZeroReg = 0; 84 bool isMicroMips = Subtarget.inMicroMipsMode(); 85 86 if (Mips::GPR32RegClass.contains(DestReg)) { // Copy to CPU Reg. 87 if (Mips::GPR32RegClass.contains(SrcReg)) { 88 if (isMicroMips) 89 Opc = Mips::MOVE16_MM; 90 else 91 Opc = Mips::OR, ZeroReg = Mips::ZERO; 92 } else if (Mips::CCRRegClass.contains(SrcReg)) 93 Opc = Mips::CFC1; 94 else if (Mips::FGR32RegClass.contains(SrcReg)) 95 Opc = Mips::MFC1; 96 else if (Mips::HI32RegClass.contains(SrcReg)) { 97 Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI; 98 SrcReg = 0; 99 } else if (Mips::LO32RegClass.contains(SrcReg)) { 100 Opc = isMicroMips ? Mips::MFLO16_MM : Mips::MFLO; 101 SrcReg = 0; 102 } else if (Mips::HI32DSPRegClass.contains(SrcReg)) 103 Opc = Mips::MFHI_DSP; 104 else if (Mips::LO32DSPRegClass.contains(SrcReg)) 105 Opc = Mips::MFLO_DSP; 106 else if (Mips::DSPCCRegClass.contains(SrcReg)) { 107 BuildMI(MBB, I, DL, get(Mips::RDDSP), DestReg).addImm(1 << 4) 108 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc)); 109 return; 110 } 111 else if (Mips::MSACtrlRegClass.contains(SrcReg)) 112 Opc = Mips::CFCMSA; 113 } 114 else if (Mips::GPR32RegClass.contains(SrcReg)) { // Copy from CPU Reg. 115 if (Mips::CCRRegClass.contains(DestReg)) 116 Opc = Mips::CTC1; 117 else if (Mips::FGR32RegClass.contains(DestReg)) 118 Opc = Mips::MTC1; 119 else if (Mips::HI32RegClass.contains(DestReg)) 120 Opc = Mips::MTHI, DestReg = 0; 121 else if (Mips::LO32RegClass.contains(DestReg)) 122 Opc = Mips::MTLO, DestReg = 0; 123 else if (Mips::HI32DSPRegClass.contains(DestReg)) 124 Opc = Mips::MTHI_DSP; 125 else if (Mips::LO32DSPRegClass.contains(DestReg)) 126 Opc = Mips::MTLO_DSP; 127 else if (Mips::DSPCCRegClass.contains(DestReg)) { 128 BuildMI(MBB, I, DL, get(Mips::WRDSP)) 129 .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1 << 4) 130 .addReg(DestReg, RegState::ImplicitDefine); 131 return; 132 } 133 else if (Mips::MSACtrlRegClass.contains(DestReg)) 134 Opc = Mips::CTCMSA; 135 } 136 else if (Mips::FGR32RegClass.contains(DestReg, SrcReg)) 137 Opc = Mips::FMOV_S; 138 else if (Mips::AFGR64RegClass.contains(DestReg, SrcReg)) 139 Opc = Mips::FMOV_D32; 140 else if (Mips::FGR64RegClass.contains(DestReg, SrcReg)) 141 Opc = Mips::FMOV_D64; 142 else if (Mips::GPR64RegClass.contains(DestReg)) { // Copy to CPU64 Reg. 143 if (Mips::GPR64RegClass.contains(SrcReg)) 144 Opc = Mips::OR64, ZeroReg = Mips::ZERO_64; 145 else if (Mips::HI64RegClass.contains(SrcReg)) 146 Opc = Mips::MFHI64, SrcReg = 0; 147 else if (Mips::LO64RegClass.contains(SrcReg)) 148 Opc = Mips::MFLO64, SrcReg = 0; 149 else if (Mips::FGR64RegClass.contains(SrcReg)) 150 Opc = Mips::DMFC1; 151 } 152 else if (Mips::GPR64RegClass.contains(SrcReg)) { // Copy from CPU64 Reg. 153 if (Mips::HI64RegClass.contains(DestReg)) 154 Opc = Mips::MTHI64, DestReg = 0; 155 else if (Mips::LO64RegClass.contains(DestReg)) 156 Opc = Mips::MTLO64, DestReg = 0; 157 else if (Mips::FGR64RegClass.contains(DestReg)) 158 Opc = Mips::DMTC1; 159 } 160 else if (Mips::MSA128BRegClass.contains(DestReg)) { // Copy to MSA reg 161 if (Mips::MSA128BRegClass.contains(SrcReg)) 162 Opc = Mips::MOVE_V; 163 } 164 165 assert(Opc && "Cannot copy registers"); 166 167 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc)); 168 169 if (DestReg) 170 MIB.addReg(DestReg, RegState::Define); 171 172 if (SrcReg) 173 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 174 175 if (ZeroReg) 176 MIB.addReg(ZeroReg); 177 } 178 179 void MipsSEInstrInfo:: 180 storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 181 unsigned SrcReg, bool isKill, int FI, 182 const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, 183 int64_t Offset) const { 184 DebugLoc DL; 185 MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore); 186 187 unsigned Opc = 0; 188 189 if (Mips::GPR32RegClass.hasSubClassEq(RC)) 190 Opc = Mips::SW; 191 else if (Mips::GPR64RegClass.hasSubClassEq(RC)) 192 Opc = Mips::SD; 193 else if (Mips::ACC64RegClass.hasSubClassEq(RC)) 194 Opc = Mips::STORE_ACC64; 195 else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC)) 196 Opc = Mips::STORE_ACC64DSP; 197 else if (Mips::ACC128RegClass.hasSubClassEq(RC)) 198 Opc = Mips::STORE_ACC128; 199 else if (Mips::DSPCCRegClass.hasSubClassEq(RC)) 200 Opc = Mips::STORE_CCOND_DSP; 201 else if (Mips::FGR32RegClass.hasSubClassEq(RC)) 202 Opc = Mips::SWC1; 203 else if (Mips::AFGR64RegClass.hasSubClassEq(RC)) 204 Opc = Mips::SDC1; 205 else if (Mips::FGR64RegClass.hasSubClassEq(RC)) 206 Opc = Mips::SDC164; 207 else if (RC->hasType(MVT::v16i8)) 208 Opc = Mips::ST_B; 209 else if (RC->hasType(MVT::v8i16) || RC->hasType(MVT::v8f16)) 210 Opc = Mips::ST_H; 211 else if (RC->hasType(MVT::v4i32) || RC->hasType(MVT::v4f32)) 212 Opc = Mips::ST_W; 213 else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64)) 214 Opc = Mips::ST_D; 215 216 assert(Opc && "Register class not handled!"); 217 BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)) 218 .addFrameIndex(FI).addImm(Offset).addMemOperand(MMO); 219 } 220 221 void MipsSEInstrInfo:: 222 loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 223 unsigned DestReg, int FI, const TargetRegisterClass *RC, 224 const TargetRegisterInfo *TRI, int64_t Offset) const { 225 DebugLoc DL; 226 if (I != MBB.end()) DL = I->getDebugLoc(); 227 MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad); 228 unsigned Opc = 0; 229 230 if (Mips::GPR32RegClass.hasSubClassEq(RC)) 231 Opc = Mips::LW; 232 else if (Mips::GPR64RegClass.hasSubClassEq(RC)) 233 Opc = Mips::LD; 234 else if (Mips::ACC64RegClass.hasSubClassEq(RC)) 235 Opc = Mips::LOAD_ACC64; 236 else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC)) 237 Opc = Mips::LOAD_ACC64DSP; 238 else if (Mips::ACC128RegClass.hasSubClassEq(RC)) 239 Opc = Mips::LOAD_ACC128; 240 else if (Mips::DSPCCRegClass.hasSubClassEq(RC)) 241 Opc = Mips::LOAD_CCOND_DSP; 242 else if (Mips::FGR32RegClass.hasSubClassEq(RC)) 243 Opc = Mips::LWC1; 244 else if (Mips::AFGR64RegClass.hasSubClassEq(RC)) 245 Opc = Mips::LDC1; 246 else if (Mips::FGR64RegClass.hasSubClassEq(RC)) 247 Opc = Mips::LDC164; 248 else if (RC->hasType(MVT::v16i8)) 249 Opc = Mips::LD_B; 250 else if (RC->hasType(MVT::v8i16) || RC->hasType(MVT::v8f16)) 251 Opc = Mips::LD_H; 252 else if (RC->hasType(MVT::v4i32) || RC->hasType(MVT::v4f32)) 253 Opc = Mips::LD_W; 254 else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64)) 255 Opc = Mips::LD_D; 256 257 assert(Opc && "Register class not handled!"); 258 BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(Offset) 259 .addMemOperand(MMO); 260 } 261 262 bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 263 MachineBasicBlock &MBB = *MI->getParent(); 264 bool isMicroMips = Subtarget.inMicroMipsMode(); 265 unsigned Opc; 266 267 switch(MI->getDesc().getOpcode()) { 268 default: 269 return false; 270 case Mips::RetRA: 271 expandRetRA(MBB, MI); 272 break; 273 case Mips::PseudoMFHI: 274 Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI; 275 expandPseudoMFHiLo(MBB, MI, Opc); 276 break; 277 case Mips::PseudoMFLO: 278 Opc = isMicroMips ? Mips::MFLO16_MM : Mips::MFLO; 279 expandPseudoMFHiLo(MBB, MI, Opc); 280 break; 281 case Mips::PseudoMFHI64: 282 expandPseudoMFHiLo(MBB, MI, Mips::MFHI64); 283 break; 284 case Mips::PseudoMFLO64: 285 expandPseudoMFHiLo(MBB, MI, Mips::MFLO64); 286 break; 287 case Mips::PseudoMTLOHI: 288 expandPseudoMTLoHi(MBB, MI, Mips::MTLO, Mips::MTHI, false); 289 break; 290 case Mips::PseudoMTLOHI64: 291 expandPseudoMTLoHi(MBB, MI, Mips::MTLO64, Mips::MTHI64, false); 292 break; 293 case Mips::PseudoMTLOHI_DSP: 294 expandPseudoMTLoHi(MBB, MI, Mips::MTLO_DSP, Mips::MTHI_DSP, true); 295 break; 296 case Mips::PseudoCVT_S_W: 297 expandCvtFPInt(MBB, MI, Mips::CVT_S_W, Mips::MTC1, false); 298 break; 299 case Mips::PseudoCVT_D32_W: 300 expandCvtFPInt(MBB, MI, Mips::CVT_D32_W, Mips::MTC1, false); 301 break; 302 case Mips::PseudoCVT_S_L: 303 expandCvtFPInt(MBB, MI, Mips::CVT_S_L, Mips::DMTC1, true); 304 break; 305 case Mips::PseudoCVT_D64_W: 306 expandCvtFPInt(MBB, MI, Mips::CVT_D64_W, Mips::MTC1, true); 307 break; 308 case Mips::PseudoCVT_D64_L: 309 expandCvtFPInt(MBB, MI, Mips::CVT_D64_L, Mips::DMTC1, true); 310 break; 311 case Mips::BuildPairF64: 312 expandBuildPairF64(MBB, MI, false); 313 break; 314 case Mips::BuildPairF64_64: 315 expandBuildPairF64(MBB, MI, true); 316 break; 317 case Mips::ExtractElementF64: 318 expandExtractElementF64(MBB, MI, false); 319 break; 320 case Mips::ExtractElementF64_64: 321 expandExtractElementF64(MBB, MI, true); 322 break; 323 case Mips::MIPSeh_return32: 324 case Mips::MIPSeh_return64: 325 expandEhReturn(MBB, MI); 326 break; 327 } 328 329 MBB.erase(MI); 330 return true; 331 } 332 333 /// getOppositeBranchOpc - Return the inverse of the specified 334 /// opcode, e.g. turning BEQ to BNE. 335 unsigned MipsSEInstrInfo::getOppositeBranchOpc(unsigned Opc) const { 336 switch (Opc) { 337 default: llvm_unreachable("Illegal opcode!"); 338 case Mips::BEQ: return Mips::BNE; 339 case Mips::BNE: return Mips::BEQ; 340 case Mips::BGTZ: return Mips::BLEZ; 341 case Mips::BGEZ: return Mips::BLTZ; 342 case Mips::BLTZ: return Mips::BGEZ; 343 case Mips::BLEZ: return Mips::BGTZ; 344 case Mips::BEQ64: return Mips::BNE64; 345 case Mips::BNE64: return Mips::BEQ64; 346 case Mips::BGTZ64: return Mips::BLEZ64; 347 case Mips::BGEZ64: return Mips::BLTZ64; 348 case Mips::BLTZ64: return Mips::BGEZ64; 349 case Mips::BLEZ64: return Mips::BGTZ64; 350 case Mips::BC1T: return Mips::BC1F; 351 case Mips::BC1F: return Mips::BC1T; 352 case Mips::BEQZC_MM: return Mips::BNEZC_MM; 353 case Mips::BNEZC_MM: return Mips::BEQZC_MM; 354 } 355 } 356 357 /// Adjust SP by Amount bytes. 358 void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount, 359 MachineBasicBlock &MBB, 360 MachineBasicBlock::iterator I) const { 361 MipsABIInfo ABI = Subtarget.getABI(); 362 DebugLoc DL; 363 unsigned ADDu = ABI.GetPtrAdduOp(); 364 unsigned ADDiu = ABI.GetPtrAddiuOp(); 365 366 if (Amount == 0) 367 return; 368 369 if (isInt<16>(Amount))// addi sp, sp, amount 370 BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount); 371 else { // Expand immediate that doesn't fit in 16-bit. 372 unsigned Reg = loadImmediate(Amount, MBB, I, DL, nullptr); 373 BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(Reg, RegState::Kill); 374 } 375 } 376 377 /// This function generates the sequence of instructions needed to get the 378 /// result of adding register REG and immediate IMM. 379 unsigned 380 MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB, 381 MachineBasicBlock::iterator II, DebugLoc DL, 382 unsigned *NewImm) const { 383 MipsAnalyzeImmediate AnalyzeImm; 384 const MipsSubtarget &STI = Subtarget; 385 MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); 386 unsigned Size = STI.isABI_N64() ? 64 : 32; 387 unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi; 388 unsigned ZEROReg = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO; 389 const TargetRegisterClass *RC = STI.isABI_N64() ? 390 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 391 bool LastInstrIsADDiu = NewImm; 392 393 const MipsAnalyzeImmediate::InstSeq &Seq = 394 AnalyzeImm.Analyze(Imm, Size, LastInstrIsADDiu); 395 MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin(); 396 397 assert(Seq.size() && (!LastInstrIsADDiu || (Seq.size() > 1))); 398 399 // The first instruction can be a LUi, which is different from other 400 // instructions (ADDiu, ORI and SLL) in that it does not have a register 401 // operand. 402 unsigned Reg = RegInfo.createVirtualRegister(RC); 403 404 if (Inst->Opc == LUi) 405 BuildMI(MBB, II, DL, get(LUi), Reg).addImm(SignExtend64<16>(Inst->ImmOpnd)); 406 else 407 BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(ZEROReg) 408 .addImm(SignExtend64<16>(Inst->ImmOpnd)); 409 410 // Build the remaining instructions in Seq. 411 for (++Inst; Inst != Seq.end() - LastInstrIsADDiu; ++Inst) 412 BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(Reg, RegState::Kill) 413 .addImm(SignExtend64<16>(Inst->ImmOpnd)); 414 415 if (LastInstrIsADDiu) 416 *NewImm = Inst->ImmOpnd; 417 418 return Reg; 419 } 420 421 unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const { 422 return (Opc == Mips::BEQ || Opc == Mips::BNE || Opc == Mips::BGTZ || 423 Opc == Mips::BGEZ || Opc == Mips::BLTZ || Opc == Mips::BLEZ || 424 Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 || 425 Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 || 426 Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::B || 427 Opc == Mips::J || Opc == Mips::BEQZC_MM || Opc == Mips::BNEZC_MM) ? 428 Opc : 0; 429 } 430 431 void MipsSEInstrInfo::expandRetRA(MachineBasicBlock &MBB, 432 MachineBasicBlock::iterator I) const { 433 if (Subtarget.isGP64bit()) 434 BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn64)) 435 .addReg(Mips::RA_64); 436 else 437 BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn)).addReg(Mips::RA); 438 } 439 440 std::pair<bool, bool> 441 MipsSEInstrInfo::compareOpndSize(unsigned Opc, 442 const MachineFunction &MF) const { 443 const MCInstrDesc &Desc = get(Opc); 444 assert(Desc.NumOperands == 2 && "Unary instruction expected."); 445 const MipsRegisterInfo *RI = &getRegisterInfo(); 446 unsigned DstRegSize = getRegClass(Desc, 0, RI, MF)->getSize(); 447 unsigned SrcRegSize = getRegClass(Desc, 1, RI, MF)->getSize(); 448 449 return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize); 450 } 451 452 void MipsSEInstrInfo::expandPseudoMFHiLo(MachineBasicBlock &MBB, 453 MachineBasicBlock::iterator I, 454 unsigned NewOpc) const { 455 BuildMI(MBB, I, I->getDebugLoc(), get(NewOpc), I->getOperand(0).getReg()); 456 } 457 458 void MipsSEInstrInfo::expandPseudoMTLoHi(MachineBasicBlock &MBB, 459 MachineBasicBlock::iterator I, 460 unsigned LoOpc, 461 unsigned HiOpc, 462 bool HasExplicitDef) const { 463 // Expand 464 // lo_hi pseudomtlohi $gpr0, $gpr1 465 // to these two instructions: 466 // mtlo $gpr0 467 // mthi $gpr1 468 469 DebugLoc DL = I->getDebugLoc(); 470 const MachineOperand &SrcLo = I->getOperand(1), &SrcHi = I->getOperand(2); 471 MachineInstrBuilder LoInst = BuildMI(MBB, I, DL, get(LoOpc)); 472 MachineInstrBuilder HiInst = BuildMI(MBB, I, DL, get(HiOpc)); 473 LoInst.addReg(SrcLo.getReg(), getKillRegState(SrcLo.isKill())); 474 HiInst.addReg(SrcHi.getReg(), getKillRegState(SrcHi.isKill())); 475 476 // Add lo/hi registers if the mtlo/hi instructions created have explicit 477 // def registers. 478 if (HasExplicitDef) { 479 unsigned DstReg = I->getOperand(0).getReg(); 480 unsigned DstLo = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo); 481 unsigned DstHi = getRegisterInfo().getSubReg(DstReg, Mips::sub_hi); 482 LoInst.addReg(DstLo, RegState::Define); 483 HiInst.addReg(DstHi, RegState::Define); 484 } 485 } 486 487 void MipsSEInstrInfo::expandCvtFPInt(MachineBasicBlock &MBB, 488 MachineBasicBlock::iterator I, 489 unsigned CvtOpc, unsigned MovOpc, 490 bool IsI64) const { 491 const MCInstrDesc &CvtDesc = get(CvtOpc), &MovDesc = get(MovOpc); 492 const MachineOperand &Dst = I->getOperand(0), &Src = I->getOperand(1); 493 unsigned DstReg = Dst.getReg(), SrcReg = Src.getReg(), TmpReg = DstReg; 494 unsigned KillSrc = getKillRegState(Src.isKill()); 495 DebugLoc DL = I->getDebugLoc(); 496 bool DstIsLarger, SrcIsLarger; 497 498 std::tie(DstIsLarger, SrcIsLarger) = 499 compareOpndSize(CvtOpc, *MBB.getParent()); 500 501 if (DstIsLarger) 502 TmpReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo); 503 504 if (SrcIsLarger) 505 DstReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo); 506 507 BuildMI(MBB, I, DL, MovDesc, TmpReg).addReg(SrcReg, KillSrc); 508 BuildMI(MBB, I, DL, CvtDesc, DstReg).addReg(TmpReg, RegState::Kill); 509 } 510 511 void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB, 512 MachineBasicBlock::iterator I, 513 bool FP64) const { 514 unsigned DstReg = I->getOperand(0).getReg(); 515 unsigned SrcReg = I->getOperand(1).getReg(); 516 unsigned N = I->getOperand(2).getImm(); 517 DebugLoc dl = I->getDebugLoc(); 518 519 assert(N < 2 && "Invalid immediate"); 520 unsigned SubIdx = N ? Mips::sub_hi : Mips::sub_lo; 521 unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx); 522 523 // FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload 524 // in MipsSEFrameLowering.cpp. 525 assert(!(Subtarget.isABI_FPXX() && !Subtarget.hasMips32r2())); 526 527 // FP64A (FP64 with nooddspreg) should have been handled with a spill/reload 528 // in MipsSEFrameLowering.cpp. 529 assert(!(Subtarget.isFP64bit() && !Subtarget.useOddSPReg())); 530 531 if (SubIdx == Mips::sub_hi && Subtarget.hasMTHC1()) { 532 // FIXME: Strictly speaking MFHC1 only reads the top 32-bits however, we 533 // claim to read the whole 64-bits as part of a white lie used to 534 // temporarily work around a widespread bug in the -mfp64 support. 535 // The problem is that none of the 32-bit fpu ops mention the fact 536 // that they clobber the upper 32-bits of the 64-bit FPR. Fixing that 537 // requires a major overhaul of the FPU implementation which can't 538 // be done right now due to time constraints. 539 // MFHC1 is one of two instructions that are affected since they are 540 // the only instructions that don't read the lower 32-bits. 541 // We therefore pretend that it reads the bottom 32-bits to 542 // artificially create a dependency and prevent the scheduler 543 // changing the behaviour of the code. 544 BuildMI(MBB, I, dl, get(FP64 ? Mips::MFHC1_D64 : Mips::MFHC1_D32), DstReg) 545 .addReg(SrcReg); 546 } else 547 BuildMI(MBB, I, dl, get(Mips::MFC1), DstReg).addReg(SubReg); 548 } 549 550 void MipsSEInstrInfo::expandBuildPairF64(MachineBasicBlock &MBB, 551 MachineBasicBlock::iterator I, 552 bool FP64) const { 553 unsigned DstReg = I->getOperand(0).getReg(); 554 unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg(); 555 const MCInstrDesc& Mtc1Tdd = get(Mips::MTC1); 556 DebugLoc dl = I->getDebugLoc(); 557 const TargetRegisterInfo &TRI = getRegisterInfo(); 558 559 // When mthc1 is available, use: 560 // mtc1 Lo, $fp 561 // mthc1 Hi, $fp 562 // 563 // Otherwise, for O32 FPXX ABI: 564 // spill + reload via ldc1 565 // This case is handled by the frame lowering code. 566 // 567 // Otherwise, for FP32: 568 // mtc1 Lo, $fp 569 // mtc1 Hi, $fp + 1 570 // 571 // The case where dmtc1 is available doesn't need to be handled here 572 // because it never creates a BuildPairF64 node. 573 574 // FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload 575 // in MipsSEFrameLowering.cpp. 576 assert(!(Subtarget.isABI_FPXX() && !Subtarget.hasMips32r2())); 577 578 // FP64A (FP64 with nooddspreg) should have been handled with a spill/reload 579 // in MipsSEFrameLowering.cpp. 580 assert(!(Subtarget.isFP64bit() && !Subtarget.useOddSPReg())); 581 582 BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_lo)) 583 .addReg(LoReg); 584 585 if (Subtarget.hasMTHC1()) { 586 // FIXME: The .addReg(DstReg) is a white lie used to temporarily work 587 // around a widespread bug in the -mfp64 support. 588 // The problem is that none of the 32-bit fpu ops mention the fact 589 // that they clobber the upper 32-bits of the 64-bit FPR. Fixing that 590 // requires a major overhaul of the FPU implementation which can't 591 // be done right now due to time constraints. 592 // MTHC1 is one of two instructions that are affected since they are 593 // the only instructions that don't read the lower 32-bits. 594 // We therefore pretend that it reads the bottom 32-bits to 595 // artificially create a dependency and prevent the scheduler 596 // changing the behaviour of the code. 597 BuildMI(MBB, I, dl, get(FP64 ? Mips::MTHC1_D64 : Mips::MTHC1_D32), DstReg) 598 .addReg(DstReg) 599 .addReg(HiReg); 600 } else if (Subtarget.isABI_FPXX()) 601 llvm_unreachable("BuildPairF64 not expanded in frame lowering code!"); 602 else 603 BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_hi)) 604 .addReg(HiReg); 605 } 606 607 void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB, 608 MachineBasicBlock::iterator I) const { 609 // This pseudo instruction is generated as part of the lowering of 610 // ISD::EH_RETURN. We convert it to a stack increment by OffsetReg, and 611 // indirect jump to TargetReg 612 MipsABIInfo ABI = Subtarget.getABI(); 613 unsigned ADDU = ABI.GetPtrAdduOp(); 614 unsigned SP = Subtarget.isGP64bit() ? Mips::SP_64 : Mips::SP; 615 unsigned RA = Subtarget.isGP64bit() ? Mips::RA_64 : Mips::RA; 616 unsigned T9 = Subtarget.isGP64bit() ? Mips::T9_64 : Mips::T9; 617 unsigned ZERO = Subtarget.isGP64bit() ? Mips::ZERO_64 : Mips::ZERO; 618 unsigned OffsetReg = I->getOperand(0).getReg(); 619 unsigned TargetReg = I->getOperand(1).getReg(); 620 621 // addu $ra, $v0, $zero 622 // addu $sp, $sp, $v1 623 // jr $ra (via RetRA) 624 const TargetMachine &TM = MBB.getParent()->getTarget(); 625 if (TM.getRelocationModel() == Reloc::PIC_) 626 BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), T9) 627 .addReg(TargetReg) 628 .addReg(ZERO); 629 BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), RA) 630 .addReg(TargetReg) 631 .addReg(ZERO); 632 BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), SP).addReg(SP).addReg(OffsetReg); 633 expandRetRA(MBB, I); 634 } 635 636 const MipsInstrInfo *llvm::createMipsSEInstrInfo(const MipsSubtarget &STI) { 637 return new MipsSEInstrInfo(STI); 638 } 639