1 //===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Mips32/64 implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MipsSEFrameLowering.h" 15 #include "MCTargetDesc/MipsBaseInfo.h" 16 #include "MipsMachineFunction.h" 17 #include "MipsSEInstrInfo.h" 18 #include "MipsSubtarget.h" 19 #include "llvm/ADT/StringSwitch.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Target/TargetOptions.h" 30 31 using namespace llvm; 32 33 namespace { 34 typedef MachineBasicBlock::iterator Iter; 35 36 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) { 37 if (Mips::ACC64RegClass.contains(Src)) 38 return std::make_pair((unsigned)Mips::PseudoMFHI, 39 (unsigned)Mips::PseudoMFLO); 40 41 if (Mips::ACC64DSPRegClass.contains(Src)) 42 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP); 43 44 if (Mips::ACC128RegClass.contains(Src)) 45 return std::make_pair((unsigned)Mips::PseudoMFHI64, 46 (unsigned)Mips::PseudoMFLO64); 47 48 return std::make_pair(0, 0); 49 } 50 51 /// Helper class to expand pseudos. 52 class ExpandPseudo { 53 public: 54 ExpandPseudo(MachineFunction &MF); 55 bool expand(); 56 57 private: 58 bool expandInstr(MachineBasicBlock &MBB, Iter I); 59 void expandLoadCCond(MachineBasicBlock &MBB, Iter I); 60 void expandStoreCCond(MachineBasicBlock &MBB, Iter I); 61 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); 62 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 63 unsigned MFLoOpc, unsigned RegSize); 64 bool expandCopy(MachineBasicBlock &MBB, Iter I); 65 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 66 unsigned MFLoOpc); 67 bool expandBuildPairF64(MachineBasicBlock &MBB, 68 MachineBasicBlock::iterator I, bool FP64) const; 69 bool expandExtractElementF64(MachineBasicBlock &MBB, 70 MachineBasicBlock::iterator I, bool FP64) const; 71 72 MachineFunction &MF; 73 MachineRegisterInfo &MRI; 74 const MipsSubtarget &Subtarget; 75 const MipsSEInstrInfo &TII; 76 const MipsRegisterInfo &RegInfo; 77 }; 78 } 79 80 ExpandPseudo::ExpandPseudo(MachineFunction &MF_) 81 : MF(MF_), MRI(MF.getRegInfo()), 82 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())), 83 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())), 84 RegInfo(*Subtarget.getRegisterInfo()) {} 85 86 bool ExpandPseudo::expand() { 87 bool Expanded = false; 88 89 for (MachineFunction::iterator BB = MF.begin(), BBEnd = MF.end(); 90 BB != BBEnd; ++BB) 91 for (Iter I = BB->begin(), End = BB->end(); I != End;) 92 Expanded |= expandInstr(*BB, I++); 93 94 return Expanded; 95 } 96 97 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { 98 switch(I->getOpcode()) { 99 case Mips::LOAD_CCOND_DSP: 100 expandLoadCCond(MBB, I); 101 break; 102 case Mips::STORE_CCOND_DSP: 103 expandStoreCCond(MBB, I); 104 break; 105 case Mips::LOAD_ACC64: 106 case Mips::LOAD_ACC64DSP: 107 expandLoadACC(MBB, I, 4); 108 break; 109 case Mips::LOAD_ACC128: 110 expandLoadACC(MBB, I, 8); 111 break; 112 case Mips::STORE_ACC64: 113 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4); 114 break; 115 case Mips::STORE_ACC64DSP: 116 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4); 117 break; 118 case Mips::STORE_ACC128: 119 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8); 120 break; 121 case Mips::BuildPairF64: 122 if (expandBuildPairF64(MBB, I, false)) 123 MBB.erase(I); 124 return false; 125 case Mips::BuildPairF64_64: 126 if (expandBuildPairF64(MBB, I, true)) 127 MBB.erase(I); 128 return false; 129 case Mips::ExtractElementF64: 130 if (expandExtractElementF64(MBB, I, false)) 131 MBB.erase(I); 132 return false; 133 case Mips::ExtractElementF64_64: 134 if (expandExtractElementF64(MBB, I, true)) 135 MBB.erase(I); 136 return false; 137 case TargetOpcode::COPY: 138 if (!expandCopy(MBB, I)) 139 return false; 140 break; 141 default: 142 return false; 143 } 144 145 MBB.erase(I); 146 return true; 147 } 148 149 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { 150 // load $vr, FI 151 // copy ccond, $vr 152 153 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 154 155 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 156 unsigned VR = MRI.createVirtualRegister(RC); 157 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 158 159 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0); 160 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst) 161 .addReg(VR, RegState::Kill); 162 } 163 164 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { 165 // copy $vr, ccond 166 // store $vr, FI 167 168 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 169 170 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 171 unsigned VR = MRI.createVirtualRegister(RC); 172 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 173 174 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR) 175 .addReg(Src, getKillRegState(I->getOperand(0).isKill())); 176 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0); 177 } 178 179 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, 180 unsigned RegSize) { 181 // load $vr0, FI 182 // copy lo, $vr0 183 // load $vr1, FI + 4 184 // copy hi, $vr1 185 186 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 187 188 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 189 unsigned VR0 = MRI.createVirtualRegister(RC); 190 unsigned VR1 = MRI.createVirtualRegister(RC); 191 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 192 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); 193 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); 194 DebugLoc DL = I->getDebugLoc(); 195 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); 196 197 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0); 198 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill); 199 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize); 200 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill); 201 } 202 203 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, 204 unsigned MFHiOpc, unsigned MFLoOpc, 205 unsigned RegSize) { 206 // mflo $vr0, src 207 // store $vr0, FI 208 // mfhi $vr1, src 209 // store $vr1, FI + 4 210 211 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 212 213 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 214 unsigned VR0 = MRI.createVirtualRegister(RC); 215 unsigned VR1 = MRI.createVirtualRegister(RC); 216 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 217 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); 218 DebugLoc DL = I->getDebugLoc(); 219 220 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 221 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0); 222 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 223 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize); 224 } 225 226 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { 227 unsigned Src = I->getOperand(1).getReg(); 228 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src); 229 230 if (!Opcodes.first) 231 return false; 232 233 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second); 234 } 235 236 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, 237 unsigned MFHiOpc, unsigned MFLoOpc) { 238 // mflo $vr0, src 239 // copy dst_lo, $vr0 240 // mfhi $vr1, src 241 // copy dst_hi, $vr1 242 243 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); 244 unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2; 245 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); 246 unsigned VR0 = MRI.createVirtualRegister(RC); 247 unsigned VR1 = MRI.createVirtualRegister(RC); 248 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); 249 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); 250 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); 251 DebugLoc DL = I->getDebugLoc(); 252 253 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 254 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo) 255 .addReg(VR0, RegState::Kill); 256 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 257 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi) 258 .addReg(VR1, RegState::Kill); 259 return true; 260 } 261 262 /// This method expands the same instruction that MipsSEInstrInfo:: 263 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not 264 /// available and the case where the ABI is FP64A. It is implemented here 265 /// because frame indexes are eliminated before MipsSEInstrInfo:: 266 /// expandBuildPairF64 is called. 267 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, 268 MachineBasicBlock::iterator I, 269 bool FP64) const { 270 // For fpxx and when mthc1 is not available, use: 271 // spill + reload via ldc1 272 // 273 // The case where dmtc1 is available doesn't need to be handled here 274 // because it never creates a BuildPairF64 node. 275 // 276 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 277 // for odd-numbered double precision values (because the lower 32-bits is 278 // transferred with mtc1 which is redirected to the upper half of the even 279 // register). Unfortunately, we have to make this decision before register 280 // allocation so for now we use a spill/reload sequence for all 281 // double-precision values in regardless of being an odd/even register. 282 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 283 (FP64 && !Subtarget.useOddSPReg())) { 284 unsigned DstReg = I->getOperand(0).getReg(); 285 unsigned LoReg = I->getOperand(1).getReg(); 286 unsigned HiReg = I->getOperand(2).getReg(); 287 288 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 289 // the cases where mthc1 is not available). 64-bit architectures and 290 // MIPS32r2 or later can use FGR64 though. 291 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 292 !Subtarget.isFP64bit()); 293 294 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 295 const TargetRegisterClass *RC2 = 296 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 297 298 // We re-use the same spill slot each time so that the stack frame doesn't 299 // grow too much in functions with a large number of moves. 300 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2); 301 if (!Subtarget.isLittle()) 302 std::swap(LoReg, HiReg); 303 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, 304 &RegInfo, 0); 305 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, 306 &RegInfo, 4); 307 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0); 308 return true; 309 } 310 311 return false; 312 } 313 314 /// This method expands the same instruction that MipsSEInstrInfo:: 315 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not 316 /// available and the case where the ABI is FP64A. It is implemented here 317 /// because frame indexes are eliminated before MipsSEInstrInfo:: 318 /// expandExtractElementF64 is called. 319 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, 320 MachineBasicBlock::iterator I, 321 bool FP64) const { 322 const MachineOperand &Op1 = I->getOperand(1); 323 const MachineOperand &Op2 = I->getOperand(2); 324 325 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) { 326 unsigned DstReg = I->getOperand(0).getReg(); 327 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg); 328 return true; 329 } 330 331 // For fpxx and when mfhc1 is not available, use: 332 // spill + reload via ldc1 333 // 334 // The case where dmfc1 is available doesn't need to be handled here 335 // because it never creates a ExtractElementF64 node. 336 // 337 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 338 // for odd-numbered double precision values (because the lower 32-bits is 339 // transferred with mfc1 which is redirected to the upper half of the even 340 // register). Unfortunately, we have to make this decision before register 341 // allocation so for now we use a spill/reload sequence for all 342 // double-precision values in regardless of being an odd/even register. 343 344 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 345 (FP64 && !Subtarget.useOddSPReg())) { 346 unsigned DstReg = I->getOperand(0).getReg(); 347 unsigned SrcReg = Op1.getReg(); 348 unsigned N = Op2.getImm(); 349 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N)); 350 351 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 352 // the cases where mfhc1 is not available). 64-bit architectures and 353 // MIPS32r2 or later can use FGR64 though. 354 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 355 !Subtarget.isFP64bit()); 356 357 const TargetRegisterClass *RC = 358 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 359 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass; 360 361 // We re-use the same spill slot each time so that the stack frame doesn't 362 // grow too much in functions with a large number of moves. 363 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC); 364 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0); 365 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset); 366 return true; 367 } 368 369 return false; 370 } 371 372 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI) 373 : MipsFrameLowering(STI, STI.stackAlignment()) {} 374 375 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, 376 MachineBasicBlock &MBB) const { 377 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 378 MachineFrameInfo *MFI = MF.getFrameInfo(); 379 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 380 381 const MipsSEInstrInfo &TII = 382 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 383 const MipsRegisterInfo &RegInfo = 384 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 385 386 MachineBasicBlock::iterator MBBI = MBB.begin(); 387 DebugLoc dl; 388 MipsABIInfo ABI = STI.getABI(); 389 unsigned SP = ABI.GetStackPtr(); 390 unsigned FP = ABI.GetFramePtr(); 391 unsigned ZERO = ABI.GetNullPtr(); 392 unsigned MOVE = ABI.GetGPRMoveOp(); 393 unsigned ADDiu = ABI.GetPtrAddiuOp(); 394 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND; 395 396 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? 397 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 398 399 // First, compute final stack size. 400 uint64_t StackSize = MFI->getStackSize(); 401 402 // No need to allocate space on the stack. 403 if (StackSize == 0 && !MFI->adjustsStack()) return; 404 405 MachineModuleInfo &MMI = MF.getMMI(); 406 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 407 MachineLocation DstML, SrcML; 408 409 // Adjust stack. 410 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); 411 412 // emit ".cfi_def_cfa_offset StackSize" 413 unsigned CFIIndex = MMI.addFrameInst( 414 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); 415 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 416 .addCFIIndex(CFIIndex); 417 418 if (MF.getFunction()->hasFnAttribute("interrupt")) 419 emitInterruptPrologueStub(MF, MBB); 420 421 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 422 423 if (CSI.size()) { 424 // Find the instruction past the last instruction that saves a callee-saved 425 // register to the stack. 426 for (unsigned i = 0; i < CSI.size(); ++i) 427 ++MBBI; 428 429 // Iterate over list of callee-saved registers and emit .cfi_offset 430 // directives. 431 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), 432 E = CSI.end(); I != E; ++I) { 433 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 434 unsigned Reg = I->getReg(); 435 436 // If Reg is a double precision register, emit two cfa_offsets, 437 // one for each of the paired single precision registers. 438 if (Mips::AFGR64RegClass.contains(Reg)) { 439 unsigned Reg0 = 440 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true); 441 unsigned Reg1 = 442 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true); 443 444 if (!STI.isLittle()) 445 std::swap(Reg0, Reg1); 446 447 unsigned CFIIndex = MMI.addFrameInst( 448 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 449 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 450 .addCFIIndex(CFIIndex); 451 452 CFIIndex = MMI.addFrameInst( 453 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 454 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 455 .addCFIIndex(CFIIndex); 456 } else if (Mips::FGR64RegClass.contains(Reg)) { 457 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true); 458 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1; 459 460 if (!STI.isLittle()) 461 std::swap(Reg0, Reg1); 462 463 unsigned CFIIndex = MMI.addFrameInst( 464 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 465 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 466 .addCFIIndex(CFIIndex); 467 468 CFIIndex = MMI.addFrameInst( 469 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 470 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 471 .addCFIIndex(CFIIndex); 472 } else { 473 // Reg is either in GPR32 or FGR32. 474 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset( 475 nullptr, MRI->getDwarfRegNum(Reg, 1), Offset)); 476 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 477 .addCFIIndex(CFIIndex); 478 } 479 } 480 } 481 482 if (MipsFI->callsEhReturn()) { 483 // Insert instructions that spill eh data registers. 484 for (int I = 0; I < 4; ++I) { 485 if (!MBB.isLiveIn(ABI.GetEhDataReg(I))) 486 MBB.addLiveIn(ABI.GetEhDataReg(I)); 487 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false, 488 MipsFI->getEhDataRegFI(I), RC, &RegInfo); 489 } 490 491 // Emit .cfi_offset directives for eh data registers. 492 for (int I = 0; I < 4; ++I) { 493 int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I)); 494 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true); 495 unsigned CFIIndex = MMI.addFrameInst( 496 MCCFIInstruction::createOffset(nullptr, Reg, Offset)); 497 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 498 .addCFIIndex(CFIIndex); 499 } 500 } 501 502 // if framepointer enabled, set it to point to the stack pointer. 503 if (hasFP(MF)) { 504 // Insert instruction "move $fp, $sp" at this location. 505 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO) 506 .setMIFlag(MachineInstr::FrameSetup); 507 508 // emit ".cfi_def_cfa_register $fp" 509 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister( 510 nullptr, MRI->getDwarfRegNum(FP, true))); 511 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 512 .addCFIIndex(CFIIndex); 513 514 if (RegInfo.needsStackRealignment(MF)) { 515 // addiu $Reg, $zero, -MaxAlignment 516 // andi $sp, $sp, $Reg 517 unsigned VR = MF.getRegInfo().createVirtualRegister(RC); 518 assert(isInt<16>(MFI->getMaxAlignment()) && 519 "Function's alignment size requirement is not supported."); 520 int MaxAlign = - (signed) MFI->getMaxAlignment(); 521 522 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign); 523 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR); 524 525 if (hasBP(MF)) { 526 // move $s7, $sp 527 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7; 528 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP) 529 .addReg(SP) 530 .addReg(ZERO); 531 } 532 } 533 } 534 } 535 536 void MipsSEFrameLowering::emitInterruptPrologueStub( 537 MachineFunction &MF, MachineBasicBlock &MBB) const { 538 539 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 540 MachineBasicBlock::iterator MBBI = MBB.begin(); 541 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 542 543 // Report an error the target doesn't support Mips32r2 or later. 544 // The epilogue relies on the use of the "ehb" to clear execution 545 // hazards. Pre R2 Mips relies on an implementation defined number 546 // of "ssnop"s to clear the execution hazard. Support for ssnop hazard 547 // clearing is not provided so reject that configuration. 548 if (!STI.hasMips32r2()) 549 report_fatal_error( 550 "\"interrupt\" attribute is not supported on pre-MIPS32R2 or " 551 "MIPS16 targets."); 552 553 // The GP register contains the "user" value, so we cannot perform 554 // any gp relative loads until we restore the "kernel" or "system" gp 555 // value. Until support is written we shall only accept the static 556 // relocation model. 557 if ((STI.getRelocationModel() != Reloc::Static)) 558 report_fatal_error("\"interrupt\" attribute is only supported for the " 559 "static relocation model on MIPS at the present time."); 560 561 if (!STI.isABI_O32() || STI.hasMips64()) 562 report_fatal_error("\"interrupt\" attribute is only supported for the " 563 "O32 ABI on MIPS32R2+ at the present time."); 564 565 // Perform ISR handling like GCC 566 StringRef IntKind = 567 MF.getFunction()->getFnAttribute("interrupt").getValueAsString(); 568 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 569 570 // EIC interrupt handling needs to read the Cause register to disable 571 // interrupts. 572 if (IntKind == "eic") { 573 // Coprocessor registers are always live per se. 574 MBB.addLiveIn(Mips::COP013); 575 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K0) 576 .addReg(Mips::COP013) 577 .addImm(0) 578 .setMIFlag(MachineInstr::FrameSetup); 579 580 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EXT), Mips::K0) 581 .addReg(Mips::K0) 582 .addImm(10) 583 .addImm(6) 584 .setMIFlag(MachineInstr::FrameSetup); 585 } 586 587 // Fetch and spill EPC 588 MBB.addLiveIn(Mips::COP014); 589 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 590 .addReg(Mips::COP014) 591 .addImm(0) 592 .setMIFlag(MachineInstr::FrameSetup); 593 594 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 595 MipsFI->getISRRegFI(0), PtrRC, 596 STI.getRegisterInfo(), 0); 597 598 // Fetch and Spill Status 599 MBB.addLiveIn(Mips::COP012); 600 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 601 .addReg(Mips::COP012) 602 .addImm(0) 603 .setMIFlag(MachineInstr::FrameSetup); 604 605 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 606 MipsFI->getISRRegFI(1), PtrRC, 607 STI.getRegisterInfo(), 0); 608 609 // Build the configuration for disabling lower priority interrupts. Non EIC 610 // interrupts need to be masked off with zero, EIC from the Cause register. 611 unsigned InsPosition = 8; 612 unsigned InsSize = 0; 613 unsigned SrcReg = Mips::ZERO; 614 615 // If the interrupt we're tied to is the EIC, switch the source for the 616 // masking off interrupts to the cause register. 617 if (IntKind == "eic") { 618 SrcReg = Mips::K0; 619 InsPosition = 10; 620 InsSize = 6; 621 } else 622 InsSize = StringSwitch<unsigned>(IntKind) 623 .Case("sw0", 1) 624 .Case("sw1", 2) 625 .Case("hw0", 3) 626 .Case("hw1", 4) 627 .Case("hw2", 5) 628 .Case("hw3", 6) 629 .Case("hw4", 7) 630 .Case("hw5", 8) 631 .Default(0); 632 assert(InsSize != 0 && "Unknown interrupt type!"); 633 634 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 635 .addReg(SrcReg) 636 .addImm(InsPosition) 637 .addImm(InsSize) 638 .addReg(Mips::K1) 639 .setMIFlag(MachineInstr::FrameSetup); 640 641 // Mask off KSU, ERL, EXL 642 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 643 .addReg(Mips::ZERO) 644 .addImm(1) 645 .addImm(4) 646 .addReg(Mips::K1) 647 .setMIFlag(MachineInstr::FrameSetup); 648 649 // Disable the FPU as we are not spilling those register sets. 650 if (!STI.useSoftFloat()) 651 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 652 .addReg(Mips::ZERO) 653 .addImm(29) 654 .addImm(1) 655 .addReg(Mips::K1) 656 .setMIFlag(MachineInstr::FrameSetup); 657 658 // Set the new status 659 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 660 .addReg(Mips::K1) 661 .addImm(0) 662 .setMIFlag(MachineInstr::FrameSetup); 663 } 664 665 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, 666 MachineBasicBlock &MBB) const { 667 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 668 MachineFrameInfo *MFI = MF.getFrameInfo(); 669 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 670 671 const MipsSEInstrInfo &TII = 672 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 673 const MipsRegisterInfo &RegInfo = 674 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 675 676 DebugLoc DL = MBBI->getDebugLoc(); 677 MipsABIInfo ABI = STI.getABI(); 678 unsigned SP = ABI.GetStackPtr(); 679 unsigned FP = ABI.GetFramePtr(); 680 unsigned ZERO = ABI.GetNullPtr(); 681 unsigned MOVE = ABI.GetGPRMoveOp(); 682 683 // if framepointer enabled, restore the stack pointer. 684 if (hasFP(MF)) { 685 // Find the first instruction that restores a callee-saved register. 686 MachineBasicBlock::iterator I = MBBI; 687 688 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 689 --I; 690 691 // Insert instruction "move $sp, $fp" at this location. 692 BuildMI(MBB, I, DL, TII.get(MOVE), SP).addReg(FP).addReg(ZERO); 693 } 694 695 if (MipsFI->callsEhReturn()) { 696 const TargetRegisterClass *RC = 697 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 698 699 // Find first instruction that restores a callee-saved register. 700 MachineBasicBlock::iterator I = MBBI; 701 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 702 --I; 703 704 // Insert instructions that restore eh data registers. 705 for (int J = 0; J < 4; ++J) { 706 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J), 707 MipsFI->getEhDataRegFI(J), RC, &RegInfo); 708 } 709 } 710 711 if (MF.getFunction()->hasFnAttribute("interrupt")) 712 emitInterruptEpilogueStub(MF, MBB); 713 714 // Get the number of bytes from FrameInfo 715 uint64_t StackSize = MFI->getStackSize(); 716 717 if (!StackSize) 718 return; 719 720 // Adjust stack. 721 TII.adjustStackPtr(SP, StackSize, MBB, MBBI); 722 } 723 724 void MipsSEFrameLowering::emitInterruptEpilogueStub( 725 MachineFunction &MF, MachineBasicBlock &MBB) const { 726 727 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 728 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 729 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 730 731 // Perform ISR handling like GCC 732 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 733 734 // Disable Interrupts. 735 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::DI), Mips::ZERO); 736 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB)); 737 738 // Restore EPC 739 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 740 MipsFI->getISRRegFI(0), PtrRC, 741 STI.getRegisterInfo()); 742 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014) 743 .addReg(Mips::K1) 744 .addImm(0); 745 746 // Restore Status 747 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 748 MipsFI->getISRRegFI(1), PtrRC, 749 STI.getRegisterInfo()); 750 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 751 .addReg(Mips::K1) 752 .addImm(0); 753 } 754 755 int MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF, 756 int FI, 757 unsigned &FrameReg) const { 758 const MachineFrameInfo *MFI = MF.getFrameInfo(); 759 MipsABIInfo ABI = STI.getABI(); 760 761 if (MFI->isFixedObjectIndex(FI)) 762 FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr(); 763 else 764 FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr(); 765 766 return MFI->getObjectOffset(FI) + MFI->getStackSize() - 767 getOffsetOfLocalArea() + MFI->getOffsetAdjustment(); 768 } 769 770 bool MipsSEFrameLowering:: 771 spillCalleeSavedRegisters(MachineBasicBlock &MBB, 772 MachineBasicBlock::iterator MI, 773 const std::vector<CalleeSavedInfo> &CSI, 774 const TargetRegisterInfo *TRI) const { 775 MachineFunction *MF = MBB.getParent(); 776 MachineBasicBlock *EntryBlock = &MF->front(); 777 const TargetInstrInfo &TII = *STI.getInstrInfo(); 778 779 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 780 // Add the callee-saved register as live-in. Do not add if the register is 781 // RA and return address is taken, because it has already been added in 782 // method MipsTargetLowering::LowerRETURNADDR. 783 // It's killed at the spill, unless the register is RA and return address 784 // is taken. 785 unsigned Reg = CSI[i].getReg(); 786 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64) 787 && MF->getFrameInfo()->isReturnAddressTaken(); 788 if (!IsRAAndRetAddrIsTaken) 789 EntryBlock->addLiveIn(Reg); 790 791 // ISRs require HI/LO to be spilled into kernel registers to be then 792 // spilled to the stack frame. 793 bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 || 794 Reg == Mips::HI0 || Reg == Mips::HI0_64); 795 const Function *Func = MBB.getParent()->getFunction(); 796 if (IsLOHI && Func->hasFnAttribute("interrupt")) { 797 DebugLoc DL = MI->getDebugLoc(); 798 799 unsigned Op = 0; 800 if (!STI.getABI().ArePtrs64bit()) { 801 Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO; 802 Reg = Mips::K0; 803 } else { 804 Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64; 805 Reg = Mips::K0_64; 806 } 807 BuildMI(MBB, MI, DL, TII.get(Op), Mips::K0) 808 .setMIFlag(MachineInstr::FrameSetup); 809 } 810 811 // Insert the spill to the stack frame. 812 bool IsKill = !IsRAAndRetAddrIsTaken; 813 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 814 TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill, 815 CSI[i].getFrameIdx(), RC, TRI); 816 } 817 818 return true; 819 } 820 821 bool 822 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 823 const MachineFrameInfo *MFI = MF.getFrameInfo(); 824 825 // Reserve call frame if the size of the maximum call frame fits into 16-bit 826 // immediate field and there are no variable sized objects on the stack. 827 // Make sure the second register scavenger spill slot can be accessed with one 828 // instruction. 829 return isInt<16>(MFI->getMaxCallFrameSize() + getStackAlignment()) && 830 !MFI->hasVarSizedObjects(); 831 } 832 833 /// Mark \p Reg and all registers aliasing it in the bitset. 834 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs, 835 unsigned Reg) { 836 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 837 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) 838 SavedRegs.set(*AI); 839 } 840 841 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF, 842 BitVector &SavedRegs, 843 RegScavenger *RS) const { 844 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 845 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 846 MipsABIInfo ABI = STI.getABI(); 847 unsigned FP = ABI.GetFramePtr(); 848 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7; 849 850 // Mark $fp as used if function has dedicated frame pointer. 851 if (hasFP(MF)) 852 setAliasRegs(MF, SavedRegs, FP); 853 // Mark $s7 as used if function has dedicated base pointer. 854 if (hasBP(MF)) 855 setAliasRegs(MF, SavedRegs, BP); 856 857 // Create spill slots for eh data registers if function calls eh_return. 858 if (MipsFI->callsEhReturn()) 859 MipsFI->createEhDataRegsFI(); 860 861 // Create spill slots for Coprocessor 0 registers if function is an ISR. 862 if (MipsFI->isISR()) 863 MipsFI->createISRRegFI(); 864 865 // Expand pseudo instructions which load, store or copy accumulators. 866 // Add an emergency spill slot if a pseudo was expanded. 867 if (ExpandPseudo(MF).expand()) { 868 // The spill slot should be half the size of the accumulator. If target is 869 // mips64, it should be 64-bit, otherwise it should be 32-bt. 870 const TargetRegisterClass *RC = STI.hasMips64() ? 871 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 872 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 873 RC->getAlignment(), false); 874 RS->addScavengingFrameIndex(FI); 875 } 876 877 // Set scavenging frame index if necessary. 878 uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() + 879 estimateStackSize(MF); 880 881 if (isInt<16>(MaxSPOffset)) 882 return; 883 884 const TargetRegisterClass *RC = 885 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 886 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 887 RC->getAlignment(), false); 888 RS->addScavengingFrameIndex(FI); 889 } 890 891 const MipsFrameLowering * 892 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) { 893 return new MipsSEFrameLowering(ST); 894 } 895