1 //===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Mips32/64 implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MipsSEFrameLowering.h" 15 #include "MCTargetDesc/MipsBaseInfo.h" 16 #include "MipsAnalyzeImmediate.h" 17 #include "MipsMachineFunction.h" 18 #include "MipsSEInstrInfo.h" 19 #include "MipsSubtarget.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Target/TargetOptions.h" 30 31 using namespace llvm; 32 33 namespace { 34 typedef MachineBasicBlock::iterator Iter; 35 36 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) { 37 if (Mips::ACC64RegClass.contains(Src)) 38 return std::make_pair((unsigned)Mips::PseudoMFHI, 39 (unsigned)Mips::PseudoMFLO); 40 41 if (Mips::ACC64DSPRegClass.contains(Src)) 42 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP); 43 44 if (Mips::ACC128RegClass.contains(Src)) 45 return std::make_pair((unsigned)Mips::PseudoMFHI64, 46 (unsigned)Mips::PseudoMFLO64); 47 48 return std::make_pair(0, 0); 49 } 50 51 /// Helper class to expand pseudos. 52 class ExpandPseudo { 53 public: 54 ExpandPseudo(MachineFunction &MF); 55 bool expand(); 56 57 private: 58 bool expandInstr(MachineBasicBlock &MBB, Iter I); 59 void expandLoadCCond(MachineBasicBlock &MBB, Iter I); 60 void expandStoreCCond(MachineBasicBlock &MBB, Iter I); 61 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); 62 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 63 unsigned MFLoOpc, unsigned RegSize); 64 bool expandCopy(MachineBasicBlock &MBB, Iter I); 65 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 66 unsigned MFLoOpc); 67 bool expandBuildPairF64(MachineBasicBlock &MBB, 68 MachineBasicBlock::iterator I, bool FP64) const; 69 bool expandExtractElementF64(MachineBasicBlock &MBB, 70 MachineBasicBlock::iterator I, bool FP64) const; 71 72 MachineFunction &MF; 73 MachineRegisterInfo &MRI; 74 const MipsSubtarget &Subtarget; 75 const MipsSEInstrInfo &TII; 76 const MipsRegisterInfo &RegInfo; 77 }; 78 } 79 80 ExpandPseudo::ExpandPseudo(MachineFunction &MF_) 81 : MF(MF_), MRI(MF.getRegInfo()), 82 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())), 83 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())), 84 RegInfo(*Subtarget.getRegisterInfo()) {} 85 86 bool ExpandPseudo::expand() { 87 bool Expanded = false; 88 89 for (MachineFunction::iterator BB = MF.begin(), BBEnd = MF.end(); 90 BB != BBEnd; ++BB) 91 for (Iter I = BB->begin(), End = BB->end(); I != End;) 92 Expanded |= expandInstr(*BB, I++); 93 94 return Expanded; 95 } 96 97 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { 98 switch(I->getOpcode()) { 99 case Mips::LOAD_CCOND_DSP: 100 expandLoadCCond(MBB, I); 101 break; 102 case Mips::STORE_CCOND_DSP: 103 expandStoreCCond(MBB, I); 104 break; 105 case Mips::LOAD_ACC64: 106 case Mips::LOAD_ACC64DSP: 107 expandLoadACC(MBB, I, 4); 108 break; 109 case Mips::LOAD_ACC128: 110 expandLoadACC(MBB, I, 8); 111 break; 112 case Mips::STORE_ACC64: 113 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4); 114 break; 115 case Mips::STORE_ACC64DSP: 116 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4); 117 break; 118 case Mips::STORE_ACC128: 119 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8); 120 break; 121 case Mips::BuildPairF64: 122 if (expandBuildPairF64(MBB, I, false)) 123 MBB.erase(I); 124 return false; 125 case Mips::BuildPairF64_64: 126 if (expandBuildPairF64(MBB, I, true)) 127 MBB.erase(I); 128 return false; 129 case Mips::ExtractElementF64: 130 if (expandExtractElementF64(MBB, I, false)) 131 MBB.erase(I); 132 return false; 133 case Mips::ExtractElementF64_64: 134 if (expandExtractElementF64(MBB, I, true)) 135 MBB.erase(I); 136 return false; 137 case TargetOpcode::COPY: 138 if (!expandCopy(MBB, I)) 139 return false; 140 break; 141 default: 142 return false; 143 } 144 145 MBB.erase(I); 146 return true; 147 } 148 149 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { 150 // load $vr, FI 151 // copy ccond, $vr 152 153 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 154 155 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 156 unsigned VR = MRI.createVirtualRegister(RC); 157 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 158 159 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0); 160 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst) 161 .addReg(VR, RegState::Kill); 162 } 163 164 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { 165 // copy $vr, ccond 166 // store $vr, FI 167 168 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 169 170 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 171 unsigned VR = MRI.createVirtualRegister(RC); 172 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 173 174 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR) 175 .addReg(Src, getKillRegState(I->getOperand(0).isKill())); 176 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0); 177 } 178 179 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, 180 unsigned RegSize) { 181 // load $vr0, FI 182 // copy lo, $vr0 183 // load $vr1, FI + 4 184 // copy hi, $vr1 185 186 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 187 188 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 189 unsigned VR0 = MRI.createVirtualRegister(RC); 190 unsigned VR1 = MRI.createVirtualRegister(RC); 191 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 192 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); 193 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); 194 DebugLoc DL = I->getDebugLoc(); 195 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); 196 197 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0); 198 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill); 199 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize); 200 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill); 201 } 202 203 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, 204 unsigned MFHiOpc, unsigned MFLoOpc, 205 unsigned RegSize) { 206 // mflo $vr0, src 207 // store $vr0, FI 208 // mfhi $vr1, src 209 // store $vr1, FI + 4 210 211 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 212 213 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 214 unsigned VR0 = MRI.createVirtualRegister(RC); 215 unsigned VR1 = MRI.createVirtualRegister(RC); 216 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 217 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); 218 DebugLoc DL = I->getDebugLoc(); 219 220 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 221 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0); 222 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 223 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize); 224 } 225 226 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { 227 unsigned Src = I->getOperand(1).getReg(); 228 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src); 229 230 if (!Opcodes.first) 231 return false; 232 233 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second); 234 } 235 236 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, 237 unsigned MFHiOpc, unsigned MFLoOpc) { 238 // mflo $vr0, src 239 // copy dst_lo, $vr0 240 // mfhi $vr1, src 241 // copy dst_hi, $vr1 242 243 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); 244 unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2; 245 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); 246 unsigned VR0 = MRI.createVirtualRegister(RC); 247 unsigned VR1 = MRI.createVirtualRegister(RC); 248 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); 249 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); 250 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); 251 DebugLoc DL = I->getDebugLoc(); 252 253 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 254 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo) 255 .addReg(VR0, RegState::Kill); 256 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 257 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi) 258 .addReg(VR1, RegState::Kill); 259 return true; 260 } 261 262 /// This method expands the same instruction that MipsSEInstrInfo:: 263 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not 264 /// available and the case where the ABI is FP64A. It is implemented here 265 /// because frame indexes are eliminated before MipsSEInstrInfo:: 266 /// expandBuildPairF64 is called. 267 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, 268 MachineBasicBlock::iterator I, 269 bool FP64) const { 270 // For fpxx and when mthc1 is not available, use: 271 // spill + reload via ldc1 272 // 273 // The case where dmtc1 is available doesn't need to be handled here 274 // because it never creates a BuildPairF64 node. 275 // 276 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 277 // for odd-numbered double precision values (because the lower 32-bits is 278 // transferred with mtc1 which is redirected to the upper half of the even 279 // register). Unfortunately, we have to make this decision before register 280 // allocation so for now we use a spill/reload sequence for all 281 // double-precision values in regardless of being an odd/even register. 282 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 283 (FP64 && !Subtarget.useOddSPReg())) { 284 unsigned DstReg = I->getOperand(0).getReg(); 285 unsigned LoReg = I->getOperand(1).getReg(); 286 unsigned HiReg = I->getOperand(2).getReg(); 287 288 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 289 // the cases where mthc1 is not available). 64-bit architectures and 290 // MIPS32r2 or later can use FGR64 though. 291 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 292 !Subtarget.isFP64bit()); 293 294 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 295 const TargetRegisterClass *RC2 = 296 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 297 298 // We re-use the same spill slot each time so that the stack frame doesn't 299 // grow too much in functions with a large number of moves. 300 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2); 301 if (!Subtarget.isLittle()) 302 std::swap(LoReg, HiReg); 303 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, 304 &RegInfo, 0); 305 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, 306 &RegInfo, 4); 307 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0); 308 return true; 309 } 310 311 return false; 312 } 313 314 /// This method expands the same instruction that MipsSEInstrInfo:: 315 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not 316 /// available and the case where the ABI is FP64A. It is implemented here 317 /// because frame indexes are eliminated before MipsSEInstrInfo:: 318 /// expandExtractElementF64 is called. 319 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, 320 MachineBasicBlock::iterator I, 321 bool FP64) const { 322 const MachineOperand &Op1 = I->getOperand(1); 323 const MachineOperand &Op2 = I->getOperand(2); 324 325 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) { 326 unsigned DstReg = I->getOperand(0).getReg(); 327 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg); 328 return true; 329 } 330 331 // For fpxx and when mfhc1 is not available, use: 332 // spill + reload via ldc1 333 // 334 // The case where dmfc1 is available doesn't need to be handled here 335 // because it never creates a ExtractElementF64 node. 336 // 337 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 338 // for odd-numbered double precision values (because the lower 32-bits is 339 // transferred with mfc1 which is redirected to the upper half of the even 340 // register). Unfortunately, we have to make this decision before register 341 // allocation so for now we use a spill/reload sequence for all 342 // double-precision values in regardless of being an odd/even register. 343 344 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 345 (FP64 && !Subtarget.useOddSPReg())) { 346 unsigned DstReg = I->getOperand(0).getReg(); 347 unsigned SrcReg = Op1.getReg(); 348 unsigned N = Op2.getImm(); 349 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N)); 350 351 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 352 // the cases where mfhc1 is not available). 64-bit architectures and 353 // MIPS32r2 or later can use FGR64 though. 354 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 355 !Subtarget.isFP64bit()); 356 357 const TargetRegisterClass *RC = 358 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 359 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass; 360 361 // We re-use the same spill slot each time so that the stack frame doesn't 362 // grow too much in functions with a large number of moves. 363 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC); 364 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0); 365 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset); 366 return true; 367 } 368 369 return false; 370 } 371 372 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI) 373 : MipsFrameLowering(STI, STI.stackAlignment()) {} 374 375 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, 376 MachineBasicBlock &MBB) const { 377 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 378 MachineFrameInfo *MFI = MF.getFrameInfo(); 379 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 380 381 const MipsSEInstrInfo &TII = 382 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 383 const MipsRegisterInfo &RegInfo = 384 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 385 386 MachineBasicBlock::iterator MBBI = MBB.begin(); 387 DebugLoc dl; 388 MipsABIInfo ABI = STI.getABI(); 389 unsigned SP = ABI.GetStackPtr(); 390 unsigned FP = ABI.GetFramePtr(); 391 unsigned ZERO = ABI.GetNullPtr(); 392 unsigned MOVE = ABI.GetGPRMoveOp(); 393 unsigned ADDiu = ABI.GetPtrAddiuOp(); 394 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND; 395 396 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? 397 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 398 399 // First, compute final stack size. 400 uint64_t StackSize = MFI->getStackSize(); 401 402 // No need to allocate space on the stack. 403 if (StackSize == 0 && !MFI->adjustsStack()) return; 404 405 MachineModuleInfo &MMI = MF.getMMI(); 406 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 407 MachineLocation DstML, SrcML; 408 409 // Adjust stack. 410 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); 411 412 // emit ".cfi_def_cfa_offset StackSize" 413 unsigned CFIIndex = MMI.addFrameInst( 414 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); 415 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 416 .addCFIIndex(CFIIndex); 417 418 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 419 420 if (CSI.size()) { 421 // Find the instruction past the last instruction that saves a callee-saved 422 // register to the stack. 423 for (unsigned i = 0; i < CSI.size(); ++i) 424 ++MBBI; 425 426 // Iterate over list of callee-saved registers and emit .cfi_offset 427 // directives. 428 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), 429 E = CSI.end(); I != E; ++I) { 430 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 431 unsigned Reg = I->getReg(); 432 433 // If Reg is a double precision register, emit two cfa_offsets, 434 // one for each of the paired single precision registers. 435 if (Mips::AFGR64RegClass.contains(Reg)) { 436 unsigned Reg0 = 437 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true); 438 unsigned Reg1 = 439 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true); 440 441 if (!STI.isLittle()) 442 std::swap(Reg0, Reg1); 443 444 unsigned CFIIndex = MMI.addFrameInst( 445 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 446 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 447 .addCFIIndex(CFIIndex); 448 449 CFIIndex = MMI.addFrameInst( 450 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 451 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 452 .addCFIIndex(CFIIndex); 453 } else if (Mips::FGR64RegClass.contains(Reg)) { 454 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true); 455 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1; 456 457 if (!STI.isLittle()) 458 std::swap(Reg0, Reg1); 459 460 unsigned CFIIndex = MMI.addFrameInst( 461 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 462 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 463 .addCFIIndex(CFIIndex); 464 465 CFIIndex = MMI.addFrameInst( 466 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 467 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 468 .addCFIIndex(CFIIndex); 469 } else { 470 // Reg is either in GPR32 or FGR32. 471 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset( 472 nullptr, MRI->getDwarfRegNum(Reg, 1), Offset)); 473 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 474 .addCFIIndex(CFIIndex); 475 } 476 } 477 } 478 479 if (MipsFI->callsEhReturn()) { 480 // Insert instructions that spill eh data registers. 481 for (int I = 0; I < 4; ++I) { 482 if (!MBB.isLiveIn(ABI.GetEhDataReg(I))) 483 MBB.addLiveIn(ABI.GetEhDataReg(I)); 484 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false, 485 MipsFI->getEhDataRegFI(I), RC, &RegInfo); 486 } 487 488 // Emit .cfi_offset directives for eh data registers. 489 for (int I = 0; I < 4; ++I) { 490 int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I)); 491 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true); 492 unsigned CFIIndex = MMI.addFrameInst( 493 MCCFIInstruction::createOffset(nullptr, Reg, Offset)); 494 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 495 .addCFIIndex(CFIIndex); 496 } 497 } 498 499 // if framepointer enabled, set it to point to the stack pointer. 500 if (hasFP(MF)) { 501 // Insert instruction "move $fp, $sp" at this location. 502 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO) 503 .setMIFlag(MachineInstr::FrameSetup); 504 505 // emit ".cfi_def_cfa_register $fp" 506 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister( 507 nullptr, MRI->getDwarfRegNum(FP, true))); 508 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 509 .addCFIIndex(CFIIndex); 510 511 if (RegInfo.needsStackRealignment(MF)) { 512 // addiu $Reg, $zero, -MaxAlignment 513 // andi $sp, $sp, $Reg 514 unsigned VR = MF.getRegInfo().createVirtualRegister(RC); 515 assert(isInt<16>(MFI->getMaxAlignment()) && 516 "Function's alignment size requirement is not supported."); 517 int MaxAlign = - (signed) MFI->getMaxAlignment(); 518 519 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign); 520 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR); 521 522 if (hasBP(MF)) { 523 // move $s7, $sp 524 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7; 525 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP) 526 .addReg(SP) 527 .addReg(ZERO); 528 } 529 } 530 } 531 } 532 533 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, 534 MachineBasicBlock &MBB) const { 535 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 536 MachineFrameInfo *MFI = MF.getFrameInfo(); 537 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 538 539 const MipsSEInstrInfo &TII = 540 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 541 const MipsRegisterInfo &RegInfo = 542 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 543 544 DebugLoc dl = MBBI->getDebugLoc(); 545 MipsABIInfo ABI = STI.getABI(); 546 unsigned SP = ABI.GetStackPtr(); 547 unsigned FP = ABI.GetFramePtr(); 548 unsigned ZERO = ABI.GetNullPtr(); 549 unsigned MOVE = ABI.GetGPRMoveOp(); 550 551 // if framepointer enabled, restore the stack pointer. 552 if (hasFP(MF)) { 553 // Find the first instruction that restores a callee-saved register. 554 MachineBasicBlock::iterator I = MBBI; 555 556 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 557 --I; 558 559 // Insert instruction "move $sp, $fp" at this location. 560 BuildMI(MBB, I, dl, TII.get(MOVE), SP).addReg(FP).addReg(ZERO); 561 } 562 563 if (MipsFI->callsEhReturn()) { 564 const TargetRegisterClass *RC = 565 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 566 567 // Find first instruction that restores a callee-saved register. 568 MachineBasicBlock::iterator I = MBBI; 569 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 570 --I; 571 572 // Insert instructions that restore eh data registers. 573 for (int J = 0; J < 4; ++J) { 574 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J), 575 MipsFI->getEhDataRegFI(J), RC, &RegInfo); 576 } 577 } 578 579 // Get the number of bytes from FrameInfo 580 uint64_t StackSize = MFI->getStackSize(); 581 582 if (!StackSize) 583 return; 584 585 // Adjust stack. 586 TII.adjustStackPtr(SP, StackSize, MBB, MBBI); 587 } 588 589 bool MipsSEFrameLowering:: 590 spillCalleeSavedRegisters(MachineBasicBlock &MBB, 591 MachineBasicBlock::iterator MI, 592 const std::vector<CalleeSavedInfo> &CSI, 593 const TargetRegisterInfo *TRI) const { 594 MachineFunction *MF = MBB.getParent(); 595 MachineBasicBlock *EntryBlock = &MF->front(); 596 const TargetInstrInfo &TII = *STI.getInstrInfo(); 597 598 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 599 // Add the callee-saved register as live-in. Do not add if the register is 600 // RA and return address is taken, because it has already been added in 601 // method MipsTargetLowering::LowerRETURNADDR. 602 // It's killed at the spill, unless the register is RA and return address 603 // is taken. 604 unsigned Reg = CSI[i].getReg(); 605 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64) 606 && MF->getFrameInfo()->isReturnAddressTaken(); 607 if (!IsRAAndRetAddrIsTaken) 608 EntryBlock->addLiveIn(Reg); 609 610 // Insert the spill to the stack frame. 611 bool IsKill = !IsRAAndRetAddrIsTaken; 612 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 613 TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill, 614 CSI[i].getFrameIdx(), RC, TRI); 615 } 616 617 return true; 618 } 619 620 bool 621 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 622 const MachineFrameInfo *MFI = MF.getFrameInfo(); 623 624 // Reserve call frame if the size of the maximum call frame fits into 16-bit 625 // immediate field and there are no variable sized objects on the stack. 626 // Make sure the second register scavenger spill slot can be accessed with one 627 // instruction. 628 return isInt<16>(MFI->getMaxCallFrameSize() + getStackAlignment()) && 629 !MFI->hasVarSizedObjects(); 630 } 631 632 /// Mark \p Reg and all registers aliasing it in the bitset. 633 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs, 634 unsigned Reg) { 635 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 636 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) 637 SavedRegs.set(*AI); 638 } 639 640 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF, 641 BitVector &SavedRegs, 642 RegScavenger *RS) const { 643 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 644 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 645 MipsABIInfo ABI = STI.getABI(); 646 unsigned FP = ABI.GetFramePtr(); 647 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7; 648 649 // Mark $fp as used if function has dedicated frame pointer. 650 if (hasFP(MF)) 651 setAliasRegs(MF, SavedRegs, FP); 652 // Mark $s7 as used if function has dedicated base pointer. 653 if (hasBP(MF)) 654 setAliasRegs(MF, SavedRegs, BP); 655 656 // Create spill slots for eh data registers if function calls eh_return. 657 if (MipsFI->callsEhReturn()) 658 MipsFI->createEhDataRegsFI(); 659 660 // Expand pseudo instructions which load, store or copy accumulators. 661 // Add an emergency spill slot if a pseudo was expanded. 662 if (ExpandPseudo(MF).expand()) { 663 // The spill slot should be half the size of the accumulator. If target is 664 // mips64, it should be 64-bit, otherwise it should be 32-bt. 665 const TargetRegisterClass *RC = STI.hasMips64() ? 666 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 667 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 668 RC->getAlignment(), false); 669 RS->addScavengingFrameIndex(FI); 670 } 671 672 // Set scavenging frame index if necessary. 673 uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() + 674 estimateStackSize(MF); 675 676 if (isInt<16>(MaxSPOffset)) 677 return; 678 679 const TargetRegisterClass *RC = 680 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 681 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 682 RC->getAlignment(), false); 683 RS->addScavengingFrameIndex(FI); 684 } 685 686 const MipsFrameLowering * 687 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) { 688 return new MipsSEFrameLowering(ST); 689 } 690