1 //===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Mips32/64 implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MipsSEFrameLowering.h" 15 #include "MCTargetDesc/MipsBaseInfo.h" 16 #include "MipsAnalyzeImmediate.h" 17 #include "MipsMachineFunction.h" 18 #include "MipsSEInstrInfo.h" 19 #include "MipsSubtarget.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Target/TargetOptions.h" 30 31 using namespace llvm; 32 33 namespace { 34 typedef MachineBasicBlock::iterator Iter; 35 36 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) { 37 if (Mips::ACC64RegClass.contains(Src)) 38 return std::make_pair((unsigned)Mips::PseudoMFHI, 39 (unsigned)Mips::PseudoMFLO); 40 41 if (Mips::ACC64DSPRegClass.contains(Src)) 42 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP); 43 44 if (Mips::ACC128RegClass.contains(Src)) 45 return std::make_pair((unsigned)Mips::PseudoMFHI64, 46 (unsigned)Mips::PseudoMFLO64); 47 48 return std::make_pair(0, 0); 49 } 50 51 /// Helper class to expand pseudos. 52 class ExpandPseudo { 53 public: 54 ExpandPseudo(MachineFunction &MF); 55 bool expand(); 56 57 private: 58 bool expandInstr(MachineBasicBlock &MBB, Iter I); 59 void expandLoadCCond(MachineBasicBlock &MBB, Iter I); 60 void expandStoreCCond(MachineBasicBlock &MBB, Iter I); 61 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); 62 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 63 unsigned MFLoOpc, unsigned RegSize); 64 bool expandCopy(MachineBasicBlock &MBB, Iter I); 65 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 66 unsigned MFLoOpc); 67 bool expandBuildPairF64(MachineBasicBlock &MBB, 68 MachineBasicBlock::iterator I, bool FP64) const; 69 bool expandExtractElementF64(MachineBasicBlock &MBB, 70 MachineBasicBlock::iterator I, bool FP64) const; 71 72 MachineFunction &MF; 73 MachineRegisterInfo &MRI; 74 }; 75 } 76 77 ExpandPseudo::ExpandPseudo(MachineFunction &MF_) 78 : MF(MF_), MRI(MF.getRegInfo()) {} 79 80 bool ExpandPseudo::expand() { 81 bool Expanded = false; 82 83 for (MachineFunction::iterator BB = MF.begin(), BBEnd = MF.end(); 84 BB != BBEnd; ++BB) 85 for (Iter I = BB->begin(), End = BB->end(); I != End;) 86 Expanded |= expandInstr(*BB, I++); 87 88 return Expanded; 89 } 90 91 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { 92 switch(I->getOpcode()) { 93 case Mips::LOAD_CCOND_DSP: 94 expandLoadCCond(MBB, I); 95 break; 96 case Mips::STORE_CCOND_DSP: 97 expandStoreCCond(MBB, I); 98 break; 99 case Mips::LOAD_ACC64: 100 case Mips::LOAD_ACC64DSP: 101 expandLoadACC(MBB, I, 4); 102 break; 103 case Mips::LOAD_ACC128: 104 expandLoadACC(MBB, I, 8); 105 break; 106 case Mips::STORE_ACC64: 107 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4); 108 break; 109 case Mips::STORE_ACC64DSP: 110 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4); 111 break; 112 case Mips::STORE_ACC128: 113 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8); 114 break; 115 case Mips::BuildPairF64: 116 if (expandBuildPairF64(MBB, I, false)) 117 MBB.erase(I); 118 return false; 119 case Mips::BuildPairF64_64: 120 if (expandBuildPairF64(MBB, I, true)) 121 MBB.erase(I); 122 return false; 123 case Mips::ExtractElementF64: 124 if (expandExtractElementF64(MBB, I, false)) 125 MBB.erase(I); 126 return false; 127 case Mips::ExtractElementF64_64: 128 if (expandExtractElementF64(MBB, I, true)) 129 MBB.erase(I); 130 return false; 131 case TargetOpcode::COPY: 132 if (!expandCopy(MBB, I)) 133 return false; 134 break; 135 default: 136 return false; 137 } 138 139 MBB.erase(I); 140 return true; 141 } 142 143 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { 144 // load $vr, FI 145 // copy ccond, $vr 146 147 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 148 149 const MipsSEInstrInfo &TII = 150 *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo()); 151 const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>( 152 MF.getSubtarget().getRegisterInfo()); 153 154 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 155 unsigned VR = MRI.createVirtualRegister(RC); 156 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 157 158 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0); 159 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst) 160 .addReg(VR, RegState::Kill); 161 } 162 163 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { 164 // copy $vr, ccond 165 // store $vr, FI 166 167 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 168 169 const MipsSEInstrInfo &TII = 170 *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo()); 171 const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>( 172 MF.getSubtarget().getRegisterInfo()); 173 174 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 175 unsigned VR = MRI.createVirtualRegister(RC); 176 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 177 178 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR) 179 .addReg(Src, getKillRegState(I->getOperand(0).isKill())); 180 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0); 181 } 182 183 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, 184 unsigned RegSize) { 185 // load $vr0, FI 186 // copy lo, $vr0 187 // load $vr1, FI + 4 188 // copy hi, $vr1 189 190 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 191 192 const MipsSEInstrInfo &TII = 193 *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo()); 194 const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>( 195 MF.getSubtarget().getRegisterInfo()); 196 197 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 198 unsigned VR0 = MRI.createVirtualRegister(RC); 199 unsigned VR1 = MRI.createVirtualRegister(RC); 200 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 201 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); 202 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); 203 DebugLoc DL = I->getDebugLoc(); 204 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); 205 206 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0); 207 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill); 208 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize); 209 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill); 210 } 211 212 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, 213 unsigned MFHiOpc, unsigned MFLoOpc, 214 unsigned RegSize) { 215 // mflo $vr0, src 216 // store $vr0, FI 217 // mfhi $vr1, src 218 // store $vr1, FI + 4 219 220 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 221 222 const MipsSEInstrInfo &TII = 223 *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo()); 224 const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>( 225 MF.getSubtarget().getRegisterInfo()); 226 227 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 228 unsigned VR0 = MRI.createVirtualRegister(RC); 229 unsigned VR1 = MRI.createVirtualRegister(RC); 230 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 231 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); 232 DebugLoc DL = I->getDebugLoc(); 233 234 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 235 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0); 236 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 237 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize); 238 } 239 240 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { 241 unsigned Src = I->getOperand(1).getReg(); 242 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src); 243 244 if (!Opcodes.first) 245 return false; 246 247 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second); 248 } 249 250 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, 251 unsigned MFHiOpc, unsigned MFLoOpc) { 252 // mflo $vr0, src 253 // copy dst_lo, $vr0 254 // mfhi $vr1, src 255 // copy dst_hi, $vr1 256 257 const MipsSEInstrInfo &TII = 258 *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo()); 259 const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>( 260 MF.getSubtarget().getRegisterInfo()); 261 262 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); 263 unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2; 264 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); 265 unsigned VR0 = MRI.createVirtualRegister(RC); 266 unsigned VR1 = MRI.createVirtualRegister(RC); 267 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); 268 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); 269 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); 270 DebugLoc DL = I->getDebugLoc(); 271 272 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 273 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo) 274 .addReg(VR0, RegState::Kill); 275 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 276 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi) 277 .addReg(VR1, RegState::Kill); 278 return true; 279 } 280 281 /// This method expands the same instruction that MipsSEInstrInfo:: 282 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not 283 /// available and the case where the ABI is FP64A. It is implemented here 284 /// because frame indexes are eliminated before MipsSEInstrInfo:: 285 /// expandBuildPairF64 is called. 286 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, 287 MachineBasicBlock::iterator I, 288 bool FP64) const { 289 // For fpxx and when mthc1 is not available, use: 290 // spill + reload via ldc1 291 // 292 // The case where dmtc1 is available doesn't need to be handled here 293 // because it never creates a BuildPairF64 node. 294 // 295 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 296 // for odd-numbered double precision values (because the lower 32-bits is 297 // transferred with mtc1 which is redirected to the upper half of the even 298 // register). Unfortunately, we have to make this decision before register 299 // allocation so for now we use a spill/reload sequence for all 300 // double-precision values in regardless of being an odd/even register. 301 302 const TargetMachine &TM = MF.getTarget(); 303 const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>(); 304 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 305 (FP64 && !Subtarget.useOddSPReg())) { 306 const MipsSEInstrInfo &TII = *static_cast<const MipsSEInstrInfo *>( 307 TM.getSubtargetImpl()->getInstrInfo()); 308 const MipsRegisterInfo &TRI = *static_cast<const MipsRegisterInfo *>( 309 TM.getSubtargetImpl()->getRegisterInfo()); 310 311 unsigned DstReg = I->getOperand(0).getReg(); 312 unsigned LoReg = I->getOperand(1).getReg(); 313 unsigned HiReg = I->getOperand(2).getReg(); 314 315 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 316 // the cases where mthc1 is not available). 64-bit architectures and 317 // MIPS32r2 or later can use FGR64 though. 318 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 319 !Subtarget.isFP64bit()); 320 321 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 322 const TargetRegisterClass *RC2 = 323 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 324 325 // We re-use the same spill slot each time so that the stack frame doesn't 326 // grow too much in functions with a large number of moves. 327 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2); 328 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, &TRI, 329 0); 330 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, &TRI, 331 4); 332 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &TRI, 0); 333 return true; 334 } 335 336 return false; 337 } 338 339 /// This method expands the same instruction that MipsSEInstrInfo:: 340 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not 341 /// available and the case where the ABI is FP64A. It is implemented here 342 /// because frame indexes are eliminated before MipsSEInstrInfo:: 343 /// expandExtractElementF64 is called. 344 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, 345 MachineBasicBlock::iterator I, 346 bool FP64) const { 347 // For fpxx and when mfhc1 is not available, use: 348 // spill + reload via ldc1 349 // 350 // The case where dmfc1 is available doesn't need to be handled here 351 // because it never creates a ExtractElementF64 node. 352 // 353 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 354 // for odd-numbered double precision values (because the lower 32-bits is 355 // transferred with mfc1 which is redirected to the upper half of the even 356 // register). Unfortunately, we have to make this decision before register 357 // allocation so for now we use a spill/reload sequence for all 358 // double-precision values in regardless of being an odd/even register. 359 360 const TargetMachine &TM = MF.getTarget(); 361 const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>(); 362 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 363 (FP64 && !Subtarget.useOddSPReg())) { 364 const MipsSEInstrInfo &TII = *static_cast<const MipsSEInstrInfo *>( 365 TM.getSubtargetImpl()->getInstrInfo()); 366 const MipsRegisterInfo &TRI = *static_cast<const MipsRegisterInfo *>( 367 TM.getSubtargetImpl()->getRegisterInfo()); 368 369 unsigned DstReg = I->getOperand(0).getReg(); 370 unsigned SrcReg = I->getOperand(1).getReg(); 371 unsigned N = I->getOperand(2).getImm(); 372 373 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 374 // the cases where mfhc1 is not available). 64-bit architectures and 375 // MIPS32r2 or later can use FGR64 though. 376 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 377 !Subtarget.isFP64bit()); 378 379 const TargetRegisterClass *RC = 380 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 381 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass; 382 383 // We re-use the same spill slot each time so that the stack frame doesn't 384 // grow too much in functions with a large number of moves. 385 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC); 386 TII.storeRegToStack(MBB, I, SrcReg, I->getOperand(1).isKill(), FI, RC, &TRI, 387 0); 388 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &TRI, N * 4); 389 return true; 390 } 391 392 return false; 393 } 394 395 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI) 396 : MipsFrameLowering(STI, STI.stackAlignment()) {} 397 398 unsigned MipsSEFrameLowering::ehDataReg(unsigned I) const { 399 static const unsigned EhDataReg[] = { 400 Mips::A0, Mips::A1, Mips::A2, Mips::A3 401 }; 402 static const unsigned EhDataReg64[] = { 403 Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64 404 }; 405 406 return STI.isABI_N64() ? EhDataReg64[I] : EhDataReg[I]; 407 } 408 409 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const { 410 MachineBasicBlock &MBB = MF.front(); 411 MachineFrameInfo *MFI = MF.getFrameInfo(); 412 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 413 414 const MipsSEInstrInfo &TII = 415 *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo()); 416 const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>( 417 MF.getSubtarget().getRegisterInfo()); 418 419 MachineBasicBlock::iterator MBBI = MBB.begin(); 420 DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 421 unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP; 422 unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP; 423 unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO; 424 unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu; 425 426 // First, compute final stack size. 427 uint64_t StackSize = MFI->getStackSize(); 428 429 // No need to allocate space on the stack. 430 if (StackSize == 0 && !MFI->adjustsStack()) return; 431 432 MachineModuleInfo &MMI = MF.getMMI(); 433 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 434 MachineLocation DstML, SrcML; 435 436 // Adjust stack. 437 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); 438 439 // emit ".cfi_def_cfa_offset StackSize" 440 unsigned CFIIndex = MMI.addFrameInst( 441 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); 442 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 443 .addCFIIndex(CFIIndex); 444 445 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 446 447 if (CSI.size()) { 448 // Find the instruction past the last instruction that saves a callee-saved 449 // register to the stack. 450 for (unsigned i = 0; i < CSI.size(); ++i) 451 ++MBBI; 452 453 // Iterate over list of callee-saved registers and emit .cfi_offset 454 // directives. 455 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), 456 E = CSI.end(); I != E; ++I) { 457 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 458 unsigned Reg = I->getReg(); 459 460 // If Reg is a double precision register, emit two cfa_offsets, 461 // one for each of the paired single precision registers. 462 if (Mips::AFGR64RegClass.contains(Reg)) { 463 unsigned Reg0 = 464 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true); 465 unsigned Reg1 = 466 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true); 467 468 if (!STI.isLittle()) 469 std::swap(Reg0, Reg1); 470 471 unsigned CFIIndex = MMI.addFrameInst( 472 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 473 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 474 .addCFIIndex(CFIIndex); 475 476 CFIIndex = MMI.addFrameInst( 477 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 478 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 479 .addCFIIndex(CFIIndex); 480 } else if (Mips::FGR64RegClass.contains(Reg)) { 481 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true); 482 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1; 483 484 if (!STI.isLittle()) 485 std::swap(Reg0, Reg1); 486 487 unsigned CFIIndex = MMI.addFrameInst( 488 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 489 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 490 .addCFIIndex(CFIIndex); 491 492 CFIIndex = MMI.addFrameInst( 493 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 494 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 495 .addCFIIndex(CFIIndex); 496 } else { 497 // Reg is either in GPR32 or FGR32. 498 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset( 499 nullptr, MRI->getDwarfRegNum(Reg, 1), Offset)); 500 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 501 .addCFIIndex(CFIIndex); 502 } 503 } 504 } 505 506 if (MipsFI->callsEhReturn()) { 507 const TargetRegisterClass *RC = STI.isABI_N64() ? 508 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 509 510 // Insert instructions that spill eh data registers. 511 for (int I = 0; I < 4; ++I) { 512 if (!MBB.isLiveIn(ehDataReg(I))) 513 MBB.addLiveIn(ehDataReg(I)); 514 TII.storeRegToStackSlot(MBB, MBBI, ehDataReg(I), false, 515 MipsFI->getEhDataRegFI(I), RC, &RegInfo); 516 } 517 518 // Emit .cfi_offset directives for eh data registers. 519 for (int I = 0; I < 4; ++I) { 520 int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I)); 521 unsigned Reg = MRI->getDwarfRegNum(ehDataReg(I), true); 522 unsigned CFIIndex = MMI.addFrameInst( 523 MCCFIInstruction::createOffset(nullptr, Reg, Offset)); 524 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 525 .addCFIIndex(CFIIndex); 526 } 527 } 528 529 // if framepointer enabled, set it to point to the stack pointer. 530 if (hasFP(MF)) { 531 // Insert instruction "move $fp, $sp" at this location. 532 BuildMI(MBB, MBBI, dl, TII.get(ADDu), FP).addReg(SP).addReg(ZERO) 533 .setMIFlag(MachineInstr::FrameSetup); 534 535 // emit ".cfi_def_cfa_register $fp" 536 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister( 537 nullptr, MRI->getDwarfRegNum(FP, true))); 538 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 539 .addCFIIndex(CFIIndex); 540 } 541 } 542 543 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, 544 MachineBasicBlock &MBB) const { 545 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 546 MachineFrameInfo *MFI = MF.getFrameInfo(); 547 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 548 549 const MipsSEInstrInfo &TII = 550 *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo()); 551 const MipsRegisterInfo &RegInfo = *static_cast<const MipsRegisterInfo *>( 552 MF.getSubtarget().getRegisterInfo()); 553 554 DebugLoc dl = MBBI->getDebugLoc(); 555 unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP; 556 unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP; 557 unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO; 558 unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu; 559 560 // if framepointer enabled, restore the stack pointer. 561 if (hasFP(MF)) { 562 // Find the first instruction that restores a callee-saved register. 563 MachineBasicBlock::iterator I = MBBI; 564 565 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 566 --I; 567 568 // Insert instruction "move $sp, $fp" at this location. 569 BuildMI(MBB, I, dl, TII.get(ADDu), SP).addReg(FP).addReg(ZERO); 570 } 571 572 if (MipsFI->callsEhReturn()) { 573 const TargetRegisterClass *RC = STI.isABI_N64() ? 574 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 575 576 // Find first instruction that restores a callee-saved register. 577 MachineBasicBlock::iterator I = MBBI; 578 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 579 --I; 580 581 // Insert instructions that restore eh data registers. 582 for (int J = 0; J < 4; ++J) { 583 TII.loadRegFromStackSlot(MBB, I, ehDataReg(J), MipsFI->getEhDataRegFI(J), 584 RC, &RegInfo); 585 } 586 } 587 588 // Get the number of bytes from FrameInfo 589 uint64_t StackSize = MFI->getStackSize(); 590 591 if (!StackSize) 592 return; 593 594 // Adjust stack. 595 TII.adjustStackPtr(SP, StackSize, MBB, MBBI); 596 } 597 598 bool MipsSEFrameLowering:: 599 spillCalleeSavedRegisters(MachineBasicBlock &MBB, 600 MachineBasicBlock::iterator MI, 601 const std::vector<CalleeSavedInfo> &CSI, 602 const TargetRegisterInfo *TRI) const { 603 MachineFunction *MF = MBB.getParent(); 604 MachineBasicBlock *EntryBlock = MF->begin(); 605 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo(); 606 607 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 608 // Add the callee-saved register as live-in. Do not add if the register is 609 // RA and return address is taken, because it has already been added in 610 // method MipsTargetLowering::LowerRETURNADDR. 611 // It's killed at the spill, unless the register is RA and return address 612 // is taken. 613 unsigned Reg = CSI[i].getReg(); 614 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64) 615 && MF->getFrameInfo()->isReturnAddressTaken(); 616 if (!IsRAAndRetAddrIsTaken) 617 EntryBlock->addLiveIn(Reg); 618 619 // Insert the spill to the stack frame. 620 bool IsKill = !IsRAAndRetAddrIsTaken; 621 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 622 TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill, 623 CSI[i].getFrameIdx(), RC, TRI); 624 } 625 626 return true; 627 } 628 629 bool 630 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 631 const MachineFrameInfo *MFI = MF.getFrameInfo(); 632 633 // Reserve call frame if the size of the maximum call frame fits into 16-bit 634 // immediate field and there are no variable sized objects on the stack. 635 // Make sure the second register scavenger spill slot can be accessed with one 636 // instruction. 637 return isInt<16>(MFI->getMaxCallFrameSize() + getStackAlignment()) && 638 !MFI->hasVarSizedObjects(); 639 } 640 641 // Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions 642 void MipsSEFrameLowering:: 643 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, 644 MachineBasicBlock::iterator I) const { 645 const MipsSEInstrInfo &TII = 646 *static_cast<const MipsSEInstrInfo *>(MF.getSubtarget().getInstrInfo()); 647 648 if (!hasReservedCallFrame(MF)) { 649 int64_t Amount = I->getOperand(0).getImm(); 650 651 if (I->getOpcode() == Mips::ADJCALLSTACKDOWN) 652 Amount = -Amount; 653 654 unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP; 655 TII.adjustStackPtr(SP, Amount, MBB, I); 656 } 657 658 MBB.erase(I); 659 } 660 661 void MipsSEFrameLowering:: 662 processFunctionBeforeCalleeSavedScan(MachineFunction &MF, 663 RegScavenger *RS) const { 664 MachineRegisterInfo &MRI = MF.getRegInfo(); 665 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 666 unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP; 667 668 // Mark $fp as used if function has dedicated frame pointer. 669 if (hasFP(MF)) 670 MRI.setPhysRegUsed(FP); 671 672 // Create spill slots for eh data registers if function calls eh_return. 673 if (MipsFI->callsEhReturn()) 674 MipsFI->createEhDataRegsFI(); 675 676 // Expand pseudo instructions which load, store or copy accumulators. 677 // Add an emergency spill slot if a pseudo was expanded. 678 if (ExpandPseudo(MF).expand()) { 679 // The spill slot should be half the size of the accumulator. If target is 680 // mips64, it should be 64-bit, otherwise it should be 32-bt. 681 const TargetRegisterClass *RC = STI.hasMips64() ? 682 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 683 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 684 RC->getAlignment(), false); 685 RS->addScavengingFrameIndex(FI); 686 } 687 688 // Set scavenging frame index if necessary. 689 uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() + 690 estimateStackSize(MF); 691 692 if (isInt<16>(MaxSPOffset)) 693 return; 694 695 const TargetRegisterClass *RC = STI.isABI_N64() ? 696 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 697 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 698 RC->getAlignment(), false); 699 RS->addScavengingFrameIndex(FI); 700 } 701 702 const MipsFrameLowering * 703 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) { 704 return new MipsSEFrameLowering(ST); 705 } 706