1 //===- MipsSEFrameLowering.cpp - Mips32/64 Frame Information --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Mips32/64 implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MipsSEFrameLowering.h" 15 #include "MCTargetDesc/MipsABIInfo.h" 16 #include "MipsMachineFunction.h" 17 #include "MipsRegisterInfo.h" 18 #include "MipsSEInstrInfo.h" 19 #include "MipsSubtarget.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/StringRef.h" 22 #include "llvm/ADT/StringSwitch.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstr.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineModuleInfo.h" 29 #include "llvm/CodeGen/MachineOperand.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/RegisterScavenging.h" 32 #include "llvm/CodeGen/TargetInstrInfo.h" 33 #include "llvm/CodeGen/TargetRegisterInfo.h" 34 #include "llvm/CodeGen/TargetSubtargetInfo.h" 35 #include "llvm/IR/DebugLoc.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/MC/MCDwarf.h" 38 #include "llvm/MC/MCRegisterInfo.h" 39 #include "llvm/MC/MachineLocation.h" 40 #include "llvm/Support/CodeGen.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include "llvm/Support/MathExtras.h" 43 #include <cassert> 44 #include <cstdint> 45 #include <utility> 46 #include <vector> 47 48 using namespace llvm; 49 50 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) { 51 if (Mips::ACC64RegClass.contains(Src)) 52 return std::make_pair((unsigned)Mips::PseudoMFHI, 53 (unsigned)Mips::PseudoMFLO); 54 55 if (Mips::ACC64DSPRegClass.contains(Src)) 56 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP); 57 58 if (Mips::ACC128RegClass.contains(Src)) 59 return std::make_pair((unsigned)Mips::PseudoMFHI64, 60 (unsigned)Mips::PseudoMFLO64); 61 62 return std::make_pair(0, 0); 63 } 64 65 namespace { 66 67 /// Helper class to expand pseudos. 68 class ExpandPseudo { 69 public: 70 ExpandPseudo(MachineFunction &MF); 71 bool expand(); 72 73 private: 74 using Iter = MachineBasicBlock::iterator; 75 76 bool expandInstr(MachineBasicBlock &MBB, Iter I); 77 void expandLoadCCond(MachineBasicBlock &MBB, Iter I); 78 void expandStoreCCond(MachineBasicBlock &MBB, Iter I); 79 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); 80 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 81 unsigned MFLoOpc, unsigned RegSize); 82 bool expandCopy(MachineBasicBlock &MBB, Iter I); 83 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 84 unsigned MFLoOpc); 85 bool expandBuildPairF64(MachineBasicBlock &MBB, 86 MachineBasicBlock::iterator I, bool FP64) const; 87 bool expandExtractElementF64(MachineBasicBlock &MBB, 88 MachineBasicBlock::iterator I, bool FP64) const; 89 90 MachineFunction &MF; 91 MachineRegisterInfo &MRI; 92 const MipsSubtarget &Subtarget; 93 const MipsSEInstrInfo &TII; 94 const MipsRegisterInfo &RegInfo; 95 }; 96 97 } // end anonymous namespace 98 99 ExpandPseudo::ExpandPseudo(MachineFunction &MF_) 100 : MF(MF_), MRI(MF.getRegInfo()), 101 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())), 102 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())), 103 RegInfo(*Subtarget.getRegisterInfo()) {} 104 105 bool ExpandPseudo::expand() { 106 bool Expanded = false; 107 108 for (auto &MBB : MF) { 109 for (Iter I = MBB.begin(), End = MBB.end(); I != End;) 110 Expanded |= expandInstr(MBB, I++); 111 } 112 113 return Expanded; 114 } 115 116 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { 117 switch(I->getOpcode()) { 118 case Mips::LOAD_CCOND_DSP: 119 expandLoadCCond(MBB, I); 120 break; 121 case Mips::STORE_CCOND_DSP: 122 expandStoreCCond(MBB, I); 123 break; 124 case Mips::LOAD_ACC64: 125 case Mips::LOAD_ACC64DSP: 126 expandLoadACC(MBB, I, 4); 127 break; 128 case Mips::LOAD_ACC128: 129 expandLoadACC(MBB, I, 8); 130 break; 131 case Mips::STORE_ACC64: 132 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4); 133 break; 134 case Mips::STORE_ACC64DSP: 135 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4); 136 break; 137 case Mips::STORE_ACC128: 138 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8); 139 break; 140 case Mips::BuildPairF64: 141 if (expandBuildPairF64(MBB, I, false)) 142 MBB.erase(I); 143 return false; 144 case Mips::BuildPairF64_64: 145 if (expandBuildPairF64(MBB, I, true)) 146 MBB.erase(I); 147 return false; 148 case Mips::ExtractElementF64: 149 if (expandExtractElementF64(MBB, I, false)) 150 MBB.erase(I); 151 return false; 152 case Mips::ExtractElementF64_64: 153 if (expandExtractElementF64(MBB, I, true)) 154 MBB.erase(I); 155 return false; 156 case TargetOpcode::COPY: 157 if (!expandCopy(MBB, I)) 158 return false; 159 break; 160 default: 161 return false; 162 } 163 164 MBB.erase(I); 165 return true; 166 } 167 168 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { 169 // load $vr, FI 170 // copy ccond, $vr 171 172 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 173 174 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 175 unsigned VR = MRI.createVirtualRegister(RC); 176 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 177 178 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0); 179 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst) 180 .addReg(VR, RegState::Kill); 181 } 182 183 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { 184 // copy $vr, ccond 185 // store $vr, FI 186 187 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 188 189 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 190 unsigned VR = MRI.createVirtualRegister(RC); 191 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 192 193 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR) 194 .addReg(Src, getKillRegState(I->getOperand(0).isKill())); 195 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0); 196 } 197 198 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, 199 unsigned RegSize) { 200 // load $vr0, FI 201 // copy lo, $vr0 202 // load $vr1, FI + 4 203 // copy hi, $vr1 204 205 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 206 207 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 208 unsigned VR0 = MRI.createVirtualRegister(RC); 209 unsigned VR1 = MRI.createVirtualRegister(RC); 210 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 211 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); 212 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); 213 DebugLoc DL = I->getDebugLoc(); 214 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); 215 216 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0); 217 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill); 218 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize); 219 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill); 220 } 221 222 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, 223 unsigned MFHiOpc, unsigned MFLoOpc, 224 unsigned RegSize) { 225 // mflo $vr0, src 226 // store $vr0, FI 227 // mfhi $vr1, src 228 // store $vr1, FI + 4 229 230 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 231 232 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 233 unsigned VR0 = MRI.createVirtualRegister(RC); 234 unsigned VR1 = MRI.createVirtualRegister(RC); 235 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 236 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); 237 DebugLoc DL = I->getDebugLoc(); 238 239 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 240 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0); 241 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 242 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize); 243 } 244 245 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { 246 unsigned Src = I->getOperand(1).getReg(); 247 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src); 248 249 if (!Opcodes.first) 250 return false; 251 252 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second); 253 } 254 255 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, 256 unsigned MFHiOpc, unsigned MFLoOpc) { 257 // mflo $vr0, src 258 // copy dst_lo, $vr0 259 // mfhi $vr1, src 260 // copy dst_hi, $vr1 261 262 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); 263 const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Dst); 264 unsigned VRegSize = RegInfo.getRegSizeInBits(*DstRC) / 16; 265 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); 266 unsigned VR0 = MRI.createVirtualRegister(RC); 267 unsigned VR1 = MRI.createVirtualRegister(RC); 268 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); 269 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); 270 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); 271 DebugLoc DL = I->getDebugLoc(); 272 273 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 274 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo) 275 .addReg(VR0, RegState::Kill); 276 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 277 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi) 278 .addReg(VR1, RegState::Kill); 279 return true; 280 } 281 282 /// This method expands the same instruction that MipsSEInstrInfo:: 283 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not 284 /// available and the case where the ABI is FP64A. It is implemented here 285 /// because frame indexes are eliminated before MipsSEInstrInfo:: 286 /// expandBuildPairF64 is called. 287 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, 288 MachineBasicBlock::iterator I, 289 bool FP64) const { 290 // For fpxx and when mthc1 is not available, use: 291 // spill + reload via ldc1 292 // 293 // The case where dmtc1 is available doesn't need to be handled here 294 // because it never creates a BuildPairF64 node. 295 // 296 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 297 // for odd-numbered double precision values (because the lower 32-bits is 298 // transferred with mtc1 which is redirected to the upper half of the even 299 // register). Unfortunately, we have to make this decision before register 300 // allocation so for now we use a spill/reload sequence for all 301 // double-precision values in regardless of being an odd/even register. 302 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 303 (FP64 && !Subtarget.useOddSPReg())) { 304 unsigned DstReg = I->getOperand(0).getReg(); 305 unsigned LoReg = I->getOperand(1).getReg(); 306 unsigned HiReg = I->getOperand(2).getReg(); 307 308 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 309 // the cases where mthc1 is not available). 64-bit architectures and 310 // MIPS32r2 or later can use FGR64 though. 311 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 312 !Subtarget.isFP64bit()); 313 314 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 315 const TargetRegisterClass *RC2 = 316 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 317 318 // We re-use the same spill slot each time so that the stack frame doesn't 319 // grow too much in functions with a large number of moves. 320 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2); 321 if (!Subtarget.isLittle()) 322 std::swap(LoReg, HiReg); 323 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, 324 &RegInfo, 0); 325 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, 326 &RegInfo, 4); 327 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0); 328 return true; 329 } 330 331 return false; 332 } 333 334 /// This method expands the same instruction that MipsSEInstrInfo:: 335 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not 336 /// available and the case where the ABI is FP64A. It is implemented here 337 /// because frame indexes are eliminated before MipsSEInstrInfo:: 338 /// expandExtractElementF64 is called. 339 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, 340 MachineBasicBlock::iterator I, 341 bool FP64) const { 342 const MachineOperand &Op1 = I->getOperand(1); 343 const MachineOperand &Op2 = I->getOperand(2); 344 345 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) { 346 unsigned DstReg = I->getOperand(0).getReg(); 347 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg); 348 return true; 349 } 350 351 // For fpxx and when mfhc1 is not available, use: 352 // spill + reload via ldc1 353 // 354 // The case where dmfc1 is available doesn't need to be handled here 355 // because it never creates a ExtractElementF64 node. 356 // 357 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 358 // for odd-numbered double precision values (because the lower 32-bits is 359 // transferred with mfc1 which is redirected to the upper half of the even 360 // register). Unfortunately, we have to make this decision before register 361 // allocation so for now we use a spill/reload sequence for all 362 // double-precision values in regardless of being an odd/even register. 363 364 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 365 (FP64 && !Subtarget.useOddSPReg())) { 366 unsigned DstReg = I->getOperand(0).getReg(); 367 unsigned SrcReg = Op1.getReg(); 368 unsigned N = Op2.getImm(); 369 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N)); 370 371 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 372 // the cases where mfhc1 is not available). 64-bit architectures and 373 // MIPS32r2 or later can use FGR64 though. 374 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 375 !Subtarget.isFP64bit()); 376 377 const TargetRegisterClass *RC = 378 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 379 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass; 380 381 // We re-use the same spill slot each time so that the stack frame doesn't 382 // grow too much in functions with a large number of moves. 383 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC); 384 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0); 385 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset); 386 return true; 387 } 388 389 return false; 390 } 391 392 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI) 393 : MipsFrameLowering(STI, STI.getStackAlignment()) {} 394 395 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, 396 MachineBasicBlock &MBB) const { 397 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 398 MachineFrameInfo &MFI = MF.getFrameInfo(); 399 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 400 401 const MipsSEInstrInfo &TII = 402 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 403 const MipsRegisterInfo &RegInfo = 404 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 405 406 MachineBasicBlock::iterator MBBI = MBB.begin(); 407 DebugLoc dl; 408 MipsABIInfo ABI = STI.getABI(); 409 unsigned SP = ABI.GetStackPtr(); 410 unsigned FP = ABI.GetFramePtr(); 411 unsigned ZERO = ABI.GetNullPtr(); 412 unsigned MOVE = ABI.GetGPRMoveOp(); 413 unsigned ADDiu = ABI.GetPtrAddiuOp(); 414 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND; 415 416 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? 417 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 418 419 // First, compute final stack size. 420 uint64_t StackSize = MFI.getStackSize(); 421 422 // No need to allocate space on the stack. 423 if (StackSize == 0 && !MFI.adjustsStack()) return; 424 425 MachineModuleInfo &MMI = MF.getMMI(); 426 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 427 428 // Adjust stack. 429 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); 430 431 // emit ".cfi_def_cfa_offset StackSize" 432 unsigned CFIIndex = MF.addFrameInst( 433 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); 434 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 435 .addCFIIndex(CFIIndex); 436 437 if (MF.getFunction()->hasFnAttribute("interrupt")) 438 emitInterruptPrologueStub(MF, MBB); 439 440 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 441 442 if (!CSI.empty()) { 443 // Find the instruction past the last instruction that saves a callee-saved 444 // register to the stack. 445 for (unsigned i = 0; i < CSI.size(); ++i) 446 ++MBBI; 447 448 // Iterate over list of callee-saved registers and emit .cfi_offset 449 // directives. 450 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), 451 E = CSI.end(); I != E; ++I) { 452 int64_t Offset = MFI.getObjectOffset(I->getFrameIdx()); 453 unsigned Reg = I->getReg(); 454 455 // If Reg is a double precision register, emit two cfa_offsets, 456 // one for each of the paired single precision registers. 457 if (Mips::AFGR64RegClass.contains(Reg)) { 458 unsigned Reg0 = 459 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true); 460 unsigned Reg1 = 461 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true); 462 463 if (!STI.isLittle()) 464 std::swap(Reg0, Reg1); 465 466 unsigned CFIIndex = MF.addFrameInst( 467 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 468 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 469 .addCFIIndex(CFIIndex); 470 471 CFIIndex = MF.addFrameInst( 472 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 473 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 474 .addCFIIndex(CFIIndex); 475 } else if (Mips::FGR64RegClass.contains(Reg)) { 476 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true); 477 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1; 478 479 if (!STI.isLittle()) 480 std::swap(Reg0, Reg1); 481 482 unsigned CFIIndex = MF.addFrameInst( 483 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 484 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 485 .addCFIIndex(CFIIndex); 486 487 CFIIndex = MF.addFrameInst( 488 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 489 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 490 .addCFIIndex(CFIIndex); 491 } else { 492 // Reg is either in GPR32 or FGR32. 493 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 494 nullptr, MRI->getDwarfRegNum(Reg, true), Offset)); 495 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 496 .addCFIIndex(CFIIndex); 497 } 498 } 499 } 500 501 if (MipsFI->callsEhReturn()) { 502 // Insert instructions that spill eh data registers. 503 for (int I = 0; I < 4; ++I) { 504 if (!MBB.isLiveIn(ABI.GetEhDataReg(I))) 505 MBB.addLiveIn(ABI.GetEhDataReg(I)); 506 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false, 507 MipsFI->getEhDataRegFI(I), RC, &RegInfo); 508 } 509 510 // Emit .cfi_offset directives for eh data registers. 511 for (int I = 0; I < 4; ++I) { 512 int64_t Offset = MFI.getObjectOffset(MipsFI->getEhDataRegFI(I)); 513 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true); 514 unsigned CFIIndex = MF.addFrameInst( 515 MCCFIInstruction::createOffset(nullptr, Reg, Offset)); 516 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 517 .addCFIIndex(CFIIndex); 518 } 519 } 520 521 // if framepointer enabled, set it to point to the stack pointer. 522 if (hasFP(MF)) { 523 // Insert instruction "move $fp, $sp" at this location. 524 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO) 525 .setMIFlag(MachineInstr::FrameSetup); 526 527 // emit ".cfi_def_cfa_register $fp" 528 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfaRegister( 529 nullptr, MRI->getDwarfRegNum(FP, true))); 530 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 531 .addCFIIndex(CFIIndex); 532 533 if (RegInfo.needsStackRealignment(MF)) { 534 // addiu $Reg, $zero, -MaxAlignment 535 // andi $sp, $sp, $Reg 536 unsigned VR = MF.getRegInfo().createVirtualRegister(RC); 537 assert(isInt<16>(MFI.getMaxAlignment()) && 538 "Function's alignment size requirement is not supported."); 539 int MaxAlign = -(int)MFI.getMaxAlignment(); 540 541 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign); 542 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR); 543 544 if (hasBP(MF)) { 545 // move $s7, $sp 546 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7; 547 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP) 548 .addReg(SP) 549 .addReg(ZERO); 550 } 551 } 552 } 553 } 554 555 void MipsSEFrameLowering::emitInterruptPrologueStub( 556 MachineFunction &MF, MachineBasicBlock &MBB) const { 557 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 558 MachineBasicBlock::iterator MBBI = MBB.begin(); 559 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 560 561 // Report an error the target doesn't support Mips32r2 or later. 562 // The epilogue relies on the use of the "ehb" to clear execution 563 // hazards. Pre R2 Mips relies on an implementation defined number 564 // of "ssnop"s to clear the execution hazard. Support for ssnop hazard 565 // clearing is not provided so reject that configuration. 566 if (!STI.hasMips32r2()) 567 report_fatal_error( 568 "\"interrupt\" attribute is not supported on pre-MIPS32R2 or " 569 "MIPS16 targets."); 570 571 // The GP register contains the "user" value, so we cannot perform 572 // any gp relative loads until we restore the "kernel" or "system" gp 573 // value. Until support is written we shall only accept the static 574 // relocation model. 575 if ((STI.getRelocationModel() != Reloc::Static)) 576 report_fatal_error("\"interrupt\" attribute is only supported for the " 577 "static relocation model on MIPS at the present time."); 578 579 if (!STI.isABI_O32() || STI.hasMips64()) 580 report_fatal_error("\"interrupt\" attribute is only supported for the " 581 "O32 ABI on MIPS32R2+ at the present time."); 582 583 // Perform ISR handling like GCC 584 StringRef IntKind = 585 MF.getFunction()->getFnAttribute("interrupt").getValueAsString(); 586 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 587 588 // EIC interrupt handling needs to read the Cause register to disable 589 // interrupts. 590 if (IntKind == "eic") { 591 // Coprocessor registers are always live per se. 592 MBB.addLiveIn(Mips::COP013); 593 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K0) 594 .addReg(Mips::COP013) 595 .addImm(0) 596 .setMIFlag(MachineInstr::FrameSetup); 597 598 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EXT), Mips::K0) 599 .addReg(Mips::K0) 600 .addImm(10) 601 .addImm(6) 602 .setMIFlag(MachineInstr::FrameSetup); 603 } 604 605 // Fetch and spill EPC 606 MBB.addLiveIn(Mips::COP014); 607 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 608 .addReg(Mips::COP014) 609 .addImm(0) 610 .setMIFlag(MachineInstr::FrameSetup); 611 612 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 613 MipsFI->getISRRegFI(0), PtrRC, 614 STI.getRegisterInfo(), 0); 615 616 // Fetch and Spill Status 617 MBB.addLiveIn(Mips::COP012); 618 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 619 .addReg(Mips::COP012) 620 .addImm(0) 621 .setMIFlag(MachineInstr::FrameSetup); 622 623 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 624 MipsFI->getISRRegFI(1), PtrRC, 625 STI.getRegisterInfo(), 0); 626 627 // Build the configuration for disabling lower priority interrupts. Non EIC 628 // interrupts need to be masked off with zero, EIC from the Cause register. 629 unsigned InsPosition = 8; 630 unsigned InsSize = 0; 631 unsigned SrcReg = Mips::ZERO; 632 633 // If the interrupt we're tied to is the EIC, switch the source for the 634 // masking off interrupts to the cause register. 635 if (IntKind == "eic") { 636 SrcReg = Mips::K0; 637 InsPosition = 10; 638 InsSize = 6; 639 } else 640 InsSize = StringSwitch<unsigned>(IntKind) 641 .Case("sw0", 1) 642 .Case("sw1", 2) 643 .Case("hw0", 3) 644 .Case("hw1", 4) 645 .Case("hw2", 5) 646 .Case("hw3", 6) 647 .Case("hw4", 7) 648 .Case("hw5", 8) 649 .Default(0); 650 assert(InsSize != 0 && "Unknown interrupt type!"); 651 652 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 653 .addReg(SrcReg) 654 .addImm(InsPosition) 655 .addImm(InsSize) 656 .addReg(Mips::K1) 657 .setMIFlag(MachineInstr::FrameSetup); 658 659 // Mask off KSU, ERL, EXL 660 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 661 .addReg(Mips::ZERO) 662 .addImm(1) 663 .addImm(4) 664 .addReg(Mips::K1) 665 .setMIFlag(MachineInstr::FrameSetup); 666 667 // Disable the FPU as we are not spilling those register sets. 668 if (!STI.useSoftFloat()) 669 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 670 .addReg(Mips::ZERO) 671 .addImm(29) 672 .addImm(1) 673 .addReg(Mips::K1) 674 .setMIFlag(MachineInstr::FrameSetup); 675 676 // Set the new status 677 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 678 .addReg(Mips::K1) 679 .addImm(0) 680 .setMIFlag(MachineInstr::FrameSetup); 681 } 682 683 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, 684 MachineBasicBlock &MBB) const { 685 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 686 MachineFrameInfo &MFI = MF.getFrameInfo(); 687 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 688 689 const MipsSEInstrInfo &TII = 690 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 691 const MipsRegisterInfo &RegInfo = 692 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 693 694 DebugLoc DL = MBBI->getDebugLoc(); 695 MipsABIInfo ABI = STI.getABI(); 696 unsigned SP = ABI.GetStackPtr(); 697 unsigned FP = ABI.GetFramePtr(); 698 unsigned ZERO = ABI.GetNullPtr(); 699 unsigned MOVE = ABI.GetGPRMoveOp(); 700 701 // if framepointer enabled, restore the stack pointer. 702 if (hasFP(MF)) { 703 // Find the first instruction that restores a callee-saved register. 704 MachineBasicBlock::iterator I = MBBI; 705 706 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i) 707 --I; 708 709 // Insert instruction "move $sp, $fp" at this location. 710 BuildMI(MBB, I, DL, TII.get(MOVE), SP).addReg(FP).addReg(ZERO); 711 } 712 713 if (MipsFI->callsEhReturn()) { 714 const TargetRegisterClass *RC = 715 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 716 717 // Find first instruction that restores a callee-saved register. 718 MachineBasicBlock::iterator I = MBBI; 719 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i) 720 --I; 721 722 // Insert instructions that restore eh data registers. 723 for (int J = 0; J < 4; ++J) { 724 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J), 725 MipsFI->getEhDataRegFI(J), RC, &RegInfo); 726 } 727 } 728 729 if (MF.getFunction()->hasFnAttribute("interrupt")) 730 emitInterruptEpilogueStub(MF, MBB); 731 732 // Get the number of bytes from FrameInfo 733 uint64_t StackSize = MFI.getStackSize(); 734 735 if (!StackSize) 736 return; 737 738 // Adjust stack. 739 TII.adjustStackPtr(SP, StackSize, MBB, MBBI); 740 } 741 742 void MipsSEFrameLowering::emitInterruptEpilogueStub( 743 MachineFunction &MF, MachineBasicBlock &MBB) const { 744 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 745 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 746 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 747 748 // Perform ISR handling like GCC 749 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 750 751 // Disable Interrupts. 752 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::DI), Mips::ZERO); 753 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB)); 754 755 // Restore EPC 756 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 757 MipsFI->getISRRegFI(0), PtrRC, 758 STI.getRegisterInfo()); 759 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014) 760 .addReg(Mips::K1) 761 .addImm(0); 762 763 // Restore Status 764 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 765 MipsFI->getISRRegFI(1), PtrRC, 766 STI.getRegisterInfo()); 767 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 768 .addReg(Mips::K1) 769 .addImm(0); 770 } 771 772 int MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF, 773 int FI, 774 unsigned &FrameReg) const { 775 const MachineFrameInfo &MFI = MF.getFrameInfo(); 776 MipsABIInfo ABI = STI.getABI(); 777 778 if (MFI.isFixedObjectIndex(FI)) 779 FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr(); 780 else 781 FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr(); 782 783 return MFI.getObjectOffset(FI) + MFI.getStackSize() - 784 getOffsetOfLocalArea() + MFI.getOffsetAdjustment(); 785 } 786 787 bool MipsSEFrameLowering:: 788 spillCalleeSavedRegisters(MachineBasicBlock &MBB, 789 MachineBasicBlock::iterator MI, 790 const std::vector<CalleeSavedInfo> &CSI, 791 const TargetRegisterInfo *TRI) const { 792 MachineFunction *MF = MBB.getParent(); 793 MachineBasicBlock *EntryBlock = &MF->front(); 794 const TargetInstrInfo &TII = *STI.getInstrInfo(); 795 796 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 797 // Add the callee-saved register as live-in. Do not add if the register is 798 // RA and return address is taken, because it has already been added in 799 // method MipsTargetLowering::lowerRETURNADDR. 800 // It's killed at the spill, unless the register is RA and return address 801 // is taken. 802 unsigned Reg = CSI[i].getReg(); 803 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64) 804 && MF->getFrameInfo().isReturnAddressTaken(); 805 if (!IsRAAndRetAddrIsTaken) 806 EntryBlock->addLiveIn(Reg); 807 808 // ISRs require HI/LO to be spilled into kernel registers to be then 809 // spilled to the stack frame. 810 bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 || 811 Reg == Mips::HI0 || Reg == Mips::HI0_64); 812 const Function *Func = MBB.getParent()->getFunction(); 813 if (IsLOHI && Func->hasFnAttribute("interrupt")) { 814 DebugLoc DL = MI->getDebugLoc(); 815 816 unsigned Op = 0; 817 if (!STI.getABI().ArePtrs64bit()) { 818 Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO; 819 Reg = Mips::K0; 820 } else { 821 Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64; 822 Reg = Mips::K0_64; 823 } 824 BuildMI(MBB, MI, DL, TII.get(Op), Mips::K0) 825 .setMIFlag(MachineInstr::FrameSetup); 826 } 827 828 // Insert the spill to the stack frame. 829 bool IsKill = !IsRAAndRetAddrIsTaken; 830 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 831 TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill, 832 CSI[i].getFrameIdx(), RC, TRI); 833 } 834 835 return true; 836 } 837 838 bool 839 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 840 const MachineFrameInfo &MFI = MF.getFrameInfo(); 841 // Reserve call frame if the size of the maximum call frame fits into 16-bit 842 // immediate field and there are no variable sized objects on the stack. 843 // Make sure the second register scavenger spill slot can be accessed with one 844 // instruction. 845 return isInt<16>(MFI.getMaxCallFrameSize() + getStackAlignment()) && 846 !MFI.hasVarSizedObjects(); 847 } 848 849 /// Mark \p Reg and all registers aliasing it in the bitset. 850 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs, 851 unsigned Reg) { 852 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 853 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) 854 SavedRegs.set(*AI); 855 } 856 857 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF, 858 BitVector &SavedRegs, 859 RegScavenger *RS) const { 860 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 861 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 862 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 863 MipsABIInfo ABI = STI.getABI(); 864 unsigned FP = ABI.GetFramePtr(); 865 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7; 866 867 // Mark $fp as used if function has dedicated frame pointer. 868 if (hasFP(MF)) 869 setAliasRegs(MF, SavedRegs, FP); 870 // Mark $s7 as used if function has dedicated base pointer. 871 if (hasBP(MF)) 872 setAliasRegs(MF, SavedRegs, BP); 873 874 // Create spill slots for eh data registers if function calls eh_return. 875 if (MipsFI->callsEhReturn()) 876 MipsFI->createEhDataRegsFI(); 877 878 // Create spill slots for Coprocessor 0 registers if function is an ISR. 879 if (MipsFI->isISR()) 880 MipsFI->createISRRegFI(); 881 882 // Expand pseudo instructions which load, store or copy accumulators. 883 // Add an emergency spill slot if a pseudo was expanded. 884 if (ExpandPseudo(MF).expand()) { 885 // The spill slot should be half the size of the accumulator. If target is 886 // mips64, it should be 64-bit, otherwise it should be 32-bt. 887 const TargetRegisterClass &RC = STI.hasMips64() ? 888 Mips::GPR64RegClass : Mips::GPR32RegClass; 889 int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC), 890 TRI->getSpillAlignment(RC), 891 false); 892 RS->addScavengingFrameIndex(FI); 893 } 894 895 // Set scavenging frame index if necessary. 896 uint64_t MaxSPOffset = estimateStackSize(MF); 897 898 // MSA has a minimum offset of 10 bits signed. If there is a variable 899 // sized object on the stack, the estimation cannot account for it. 900 if (isIntN(STI.hasMSA() ? 10 : 16, MaxSPOffset) && 901 !MF.getFrameInfo().hasVarSizedObjects()) 902 return; 903 904 const TargetRegisterClass &RC = 905 ABI.ArePtrs64bit() ? Mips::GPR64RegClass : Mips::GPR32RegClass; 906 int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC), 907 TRI->getSpillAlignment(RC), 908 false); 909 RS->addScavengingFrameIndex(FI); 910 } 911 912 const MipsFrameLowering * 913 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) { 914 return new MipsSEFrameLowering(ST); 915 } 916