1 //===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Mips32/64 implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MipsSEFrameLowering.h" 15 #include "MCTargetDesc/MipsBaseInfo.h" 16 #include "MipsAnalyzeImmediate.h" 17 #include "MipsMachineFunction.h" 18 #include "MipsSEInstrInfo.h" 19 #include "MipsSubtarget.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Target/TargetOptions.h" 30 31 using namespace llvm; 32 33 namespace { 34 typedef MachineBasicBlock::iterator Iter; 35 36 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) { 37 if (Mips::ACC64RegClass.contains(Src)) 38 return std::make_pair((unsigned)Mips::PseudoMFHI, 39 (unsigned)Mips::PseudoMFLO); 40 41 if (Mips::ACC64DSPRegClass.contains(Src)) 42 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP); 43 44 if (Mips::ACC128RegClass.contains(Src)) 45 return std::make_pair((unsigned)Mips::PseudoMFHI64, 46 (unsigned)Mips::PseudoMFLO64); 47 48 return std::make_pair(0, 0); 49 } 50 51 /// Helper class to expand pseudos. 52 class ExpandPseudo { 53 public: 54 ExpandPseudo(MachineFunction &MF); 55 bool expand(); 56 57 private: 58 bool expandInstr(MachineBasicBlock &MBB, Iter I); 59 void expandLoadCCond(MachineBasicBlock &MBB, Iter I); 60 void expandStoreCCond(MachineBasicBlock &MBB, Iter I); 61 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); 62 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 63 unsigned MFLoOpc, unsigned RegSize); 64 bool expandCopy(MachineBasicBlock &MBB, Iter I); 65 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 66 unsigned MFLoOpc); 67 bool expandBuildPairF64(MachineBasicBlock &MBB, 68 MachineBasicBlock::iterator I, bool FP64) const; 69 bool expandExtractElementF64(MachineBasicBlock &MBB, 70 MachineBasicBlock::iterator I, bool FP64) const; 71 72 MachineFunction &MF; 73 MachineRegisterInfo &MRI; 74 const MipsSubtarget &Subtarget; 75 const MipsSEInstrInfo &TII; 76 const MipsRegisterInfo &RegInfo; 77 }; 78 } 79 80 ExpandPseudo::ExpandPseudo(MachineFunction &MF_) 81 : MF(MF_), MRI(MF.getRegInfo()), 82 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())), 83 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())), 84 RegInfo(*Subtarget.getRegisterInfo()) {} 85 86 bool ExpandPseudo::expand() { 87 bool Expanded = false; 88 89 for (MachineFunction::iterator BB = MF.begin(), BBEnd = MF.end(); 90 BB != BBEnd; ++BB) 91 for (Iter I = BB->begin(), End = BB->end(); I != End;) 92 Expanded |= expandInstr(*BB, I++); 93 94 return Expanded; 95 } 96 97 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { 98 switch(I->getOpcode()) { 99 case Mips::LOAD_CCOND_DSP: 100 expandLoadCCond(MBB, I); 101 break; 102 case Mips::STORE_CCOND_DSP: 103 expandStoreCCond(MBB, I); 104 break; 105 case Mips::LOAD_ACC64: 106 case Mips::LOAD_ACC64DSP: 107 expandLoadACC(MBB, I, 4); 108 break; 109 case Mips::LOAD_ACC128: 110 expandLoadACC(MBB, I, 8); 111 break; 112 case Mips::STORE_ACC64: 113 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4); 114 break; 115 case Mips::STORE_ACC64DSP: 116 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4); 117 break; 118 case Mips::STORE_ACC128: 119 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8); 120 break; 121 case Mips::BuildPairF64: 122 if (expandBuildPairF64(MBB, I, false)) 123 MBB.erase(I); 124 return false; 125 case Mips::BuildPairF64_64: 126 if (expandBuildPairF64(MBB, I, true)) 127 MBB.erase(I); 128 return false; 129 case Mips::ExtractElementF64: 130 if (expandExtractElementF64(MBB, I, false)) 131 MBB.erase(I); 132 return false; 133 case Mips::ExtractElementF64_64: 134 if (expandExtractElementF64(MBB, I, true)) 135 MBB.erase(I); 136 return false; 137 case TargetOpcode::COPY: 138 if (!expandCopy(MBB, I)) 139 return false; 140 break; 141 default: 142 return false; 143 } 144 145 MBB.erase(I); 146 return true; 147 } 148 149 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { 150 // load $vr, FI 151 // copy ccond, $vr 152 153 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 154 155 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 156 unsigned VR = MRI.createVirtualRegister(RC); 157 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 158 159 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0); 160 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst) 161 .addReg(VR, RegState::Kill); 162 } 163 164 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { 165 // copy $vr, ccond 166 // store $vr, FI 167 168 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 169 170 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 171 unsigned VR = MRI.createVirtualRegister(RC); 172 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 173 174 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR) 175 .addReg(Src, getKillRegState(I->getOperand(0).isKill())); 176 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0); 177 } 178 179 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, 180 unsigned RegSize) { 181 // load $vr0, FI 182 // copy lo, $vr0 183 // load $vr1, FI + 4 184 // copy hi, $vr1 185 186 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 187 188 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 189 unsigned VR0 = MRI.createVirtualRegister(RC); 190 unsigned VR1 = MRI.createVirtualRegister(RC); 191 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 192 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); 193 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); 194 DebugLoc DL = I->getDebugLoc(); 195 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); 196 197 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0); 198 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill); 199 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize); 200 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill); 201 } 202 203 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, 204 unsigned MFHiOpc, unsigned MFLoOpc, 205 unsigned RegSize) { 206 // mflo $vr0, src 207 // store $vr0, FI 208 // mfhi $vr1, src 209 // store $vr1, FI + 4 210 211 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 212 213 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 214 unsigned VR0 = MRI.createVirtualRegister(RC); 215 unsigned VR1 = MRI.createVirtualRegister(RC); 216 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 217 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); 218 DebugLoc DL = I->getDebugLoc(); 219 220 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 221 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0); 222 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 223 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize); 224 } 225 226 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { 227 unsigned Src = I->getOperand(1).getReg(); 228 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src); 229 230 if (!Opcodes.first) 231 return false; 232 233 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second); 234 } 235 236 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, 237 unsigned MFHiOpc, unsigned MFLoOpc) { 238 // mflo $vr0, src 239 // copy dst_lo, $vr0 240 // mfhi $vr1, src 241 // copy dst_hi, $vr1 242 243 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); 244 unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2; 245 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); 246 unsigned VR0 = MRI.createVirtualRegister(RC); 247 unsigned VR1 = MRI.createVirtualRegister(RC); 248 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); 249 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); 250 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); 251 DebugLoc DL = I->getDebugLoc(); 252 253 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 254 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo) 255 .addReg(VR0, RegState::Kill); 256 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 257 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi) 258 .addReg(VR1, RegState::Kill); 259 return true; 260 } 261 262 /// This method expands the same instruction that MipsSEInstrInfo:: 263 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not 264 /// available and the case where the ABI is FP64A. It is implemented here 265 /// because frame indexes are eliminated before MipsSEInstrInfo:: 266 /// expandBuildPairF64 is called. 267 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, 268 MachineBasicBlock::iterator I, 269 bool FP64) const { 270 // For fpxx and when mthc1 is not available, use: 271 // spill + reload via ldc1 272 // 273 // The case where dmtc1 is available doesn't need to be handled here 274 // because it never creates a BuildPairF64 node. 275 // 276 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 277 // for odd-numbered double precision values (because the lower 32-bits is 278 // transferred with mtc1 which is redirected to the upper half of the even 279 // register). Unfortunately, we have to make this decision before register 280 // allocation so for now we use a spill/reload sequence for all 281 // double-precision values in regardless of being an odd/even register. 282 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 283 (FP64 && !Subtarget.useOddSPReg())) { 284 unsigned DstReg = I->getOperand(0).getReg(); 285 unsigned LoReg = I->getOperand(1).getReg(); 286 unsigned HiReg = I->getOperand(2).getReg(); 287 288 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 289 // the cases where mthc1 is not available). 64-bit architectures and 290 // MIPS32r2 or later can use FGR64 though. 291 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 292 !Subtarget.isFP64bit()); 293 294 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 295 const TargetRegisterClass *RC2 = 296 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 297 298 // We re-use the same spill slot each time so that the stack frame doesn't 299 // grow too much in functions with a large number of moves. 300 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2); 301 if (!Subtarget.isLittle()) 302 std::swap(LoReg, HiReg); 303 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, 304 &RegInfo, 0); 305 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, 306 &RegInfo, 4); 307 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0); 308 return true; 309 } 310 311 return false; 312 } 313 314 /// This method expands the same instruction that MipsSEInstrInfo:: 315 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not 316 /// available and the case where the ABI is FP64A. It is implemented here 317 /// because frame indexes are eliminated before MipsSEInstrInfo:: 318 /// expandExtractElementF64 is called. 319 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, 320 MachineBasicBlock::iterator I, 321 bool FP64) const { 322 // For fpxx and when mfhc1 is not available, use: 323 // spill + reload via ldc1 324 // 325 // The case where dmfc1 is available doesn't need to be handled here 326 // because it never creates a ExtractElementF64 node. 327 // 328 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 329 // for odd-numbered double precision values (because the lower 32-bits is 330 // transferred with mfc1 which is redirected to the upper half of the even 331 // register). Unfortunately, we have to make this decision before register 332 // allocation so for now we use a spill/reload sequence for all 333 // double-precision values in regardless of being an odd/even register. 334 335 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || 336 (FP64 && !Subtarget.useOddSPReg())) { 337 unsigned DstReg = I->getOperand(0).getReg(); 338 unsigned SrcReg = I->getOperand(1).getReg(); 339 unsigned N = I->getOperand(2).getImm(); 340 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N)); 341 342 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 343 // the cases where mfhc1 is not available). 64-bit architectures and 344 // MIPS32r2 or later can use FGR64 though. 345 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 346 !Subtarget.isFP64bit()); 347 348 const TargetRegisterClass *RC = 349 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 350 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass; 351 352 // We re-use the same spill slot each time so that the stack frame doesn't 353 // grow too much in functions with a large number of moves. 354 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC); 355 TII.storeRegToStack(MBB, I, SrcReg, I->getOperand(1).isKill(), FI, RC, 356 &RegInfo, 0); 357 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset); 358 return true; 359 } 360 361 return false; 362 } 363 364 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI) 365 : MipsFrameLowering(STI, STI.stackAlignment()) {} 366 367 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, 368 MachineBasicBlock &MBB) const { 369 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 370 MachineFrameInfo *MFI = MF.getFrameInfo(); 371 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 372 373 const MipsSEInstrInfo &TII = 374 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 375 const MipsRegisterInfo &RegInfo = 376 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 377 378 MachineBasicBlock::iterator MBBI = MBB.begin(); 379 DebugLoc dl; 380 MipsABIInfo ABI = STI.getABI(); 381 unsigned SP = ABI.GetStackPtr(); 382 unsigned FP = ABI.GetFramePtr(); 383 unsigned ZERO = ABI.GetNullPtr(); 384 unsigned MOVE = ABI.GetGPRMoveOp(); 385 unsigned ADDiu = ABI.GetPtrAddiuOp(); 386 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND; 387 388 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? 389 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 390 391 // First, compute final stack size. 392 uint64_t StackSize = MFI->getStackSize(); 393 394 // No need to allocate space on the stack. 395 if (StackSize == 0 && !MFI->adjustsStack()) return; 396 397 MachineModuleInfo &MMI = MF.getMMI(); 398 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 399 MachineLocation DstML, SrcML; 400 401 // Adjust stack. 402 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); 403 404 // emit ".cfi_def_cfa_offset StackSize" 405 unsigned CFIIndex = MMI.addFrameInst( 406 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); 407 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 408 .addCFIIndex(CFIIndex); 409 410 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); 411 412 if (CSI.size()) { 413 // Find the instruction past the last instruction that saves a callee-saved 414 // register to the stack. 415 for (unsigned i = 0; i < CSI.size(); ++i) 416 ++MBBI; 417 418 // Iterate over list of callee-saved registers and emit .cfi_offset 419 // directives. 420 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), 421 E = CSI.end(); I != E; ++I) { 422 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); 423 unsigned Reg = I->getReg(); 424 425 // If Reg is a double precision register, emit two cfa_offsets, 426 // one for each of the paired single precision registers. 427 if (Mips::AFGR64RegClass.contains(Reg)) { 428 unsigned Reg0 = 429 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true); 430 unsigned Reg1 = 431 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true); 432 433 if (!STI.isLittle()) 434 std::swap(Reg0, Reg1); 435 436 unsigned CFIIndex = MMI.addFrameInst( 437 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 438 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 439 .addCFIIndex(CFIIndex); 440 441 CFIIndex = MMI.addFrameInst( 442 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 443 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 444 .addCFIIndex(CFIIndex); 445 } else if (Mips::FGR64RegClass.contains(Reg)) { 446 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true); 447 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1; 448 449 if (!STI.isLittle()) 450 std::swap(Reg0, Reg1); 451 452 unsigned CFIIndex = MMI.addFrameInst( 453 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 454 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 455 .addCFIIndex(CFIIndex); 456 457 CFIIndex = MMI.addFrameInst( 458 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 459 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 460 .addCFIIndex(CFIIndex); 461 } else { 462 // Reg is either in GPR32 or FGR32. 463 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset( 464 nullptr, MRI->getDwarfRegNum(Reg, 1), Offset)); 465 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 466 .addCFIIndex(CFIIndex); 467 } 468 } 469 } 470 471 if (MipsFI->callsEhReturn()) { 472 // Insert instructions that spill eh data registers. 473 for (int I = 0; I < 4; ++I) { 474 if (!MBB.isLiveIn(ABI.GetEhDataReg(I))) 475 MBB.addLiveIn(ABI.GetEhDataReg(I)); 476 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false, 477 MipsFI->getEhDataRegFI(I), RC, &RegInfo); 478 } 479 480 // Emit .cfi_offset directives for eh data registers. 481 for (int I = 0; I < 4; ++I) { 482 int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I)); 483 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true); 484 unsigned CFIIndex = MMI.addFrameInst( 485 MCCFIInstruction::createOffset(nullptr, Reg, Offset)); 486 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 487 .addCFIIndex(CFIIndex); 488 } 489 } 490 491 // if framepointer enabled, set it to point to the stack pointer. 492 if (hasFP(MF)) { 493 // Insert instruction "move $fp, $sp" at this location. 494 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO) 495 .setMIFlag(MachineInstr::FrameSetup); 496 497 // emit ".cfi_def_cfa_register $fp" 498 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister( 499 nullptr, MRI->getDwarfRegNum(FP, true))); 500 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 501 .addCFIIndex(CFIIndex); 502 503 if (RegInfo.needsStackRealignment(MF)) { 504 // addiu $Reg, $zero, -MaxAlignment 505 // andi $sp, $sp, $Reg 506 unsigned VR = MF.getRegInfo().createVirtualRegister(RC); 507 assert(isInt<16>(MFI->getMaxAlignment()) && 508 "Function's alignment size requirement is not supported."); 509 int MaxAlign = - (signed) MFI->getMaxAlignment(); 510 511 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign); 512 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR); 513 514 if (hasBP(MF)) { 515 // move $s7, $sp 516 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7; 517 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP) 518 .addReg(SP) 519 .addReg(ZERO); 520 } 521 } 522 } 523 } 524 525 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, 526 MachineBasicBlock &MBB) const { 527 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 528 MachineFrameInfo *MFI = MF.getFrameInfo(); 529 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 530 531 const MipsSEInstrInfo &TII = 532 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 533 const MipsRegisterInfo &RegInfo = 534 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 535 536 DebugLoc dl = MBBI->getDebugLoc(); 537 MipsABIInfo ABI = STI.getABI(); 538 unsigned SP = ABI.GetStackPtr(); 539 unsigned FP = ABI.GetFramePtr(); 540 unsigned ZERO = ABI.GetNullPtr(); 541 unsigned MOVE = ABI.GetGPRMoveOp(); 542 543 // if framepointer enabled, restore the stack pointer. 544 if (hasFP(MF)) { 545 // Find the first instruction that restores a callee-saved register. 546 MachineBasicBlock::iterator I = MBBI; 547 548 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 549 --I; 550 551 // Insert instruction "move $sp, $fp" at this location. 552 BuildMI(MBB, I, dl, TII.get(MOVE), SP).addReg(FP).addReg(ZERO); 553 } 554 555 if (MipsFI->callsEhReturn()) { 556 const TargetRegisterClass *RC = 557 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 558 559 // Find first instruction that restores a callee-saved register. 560 MachineBasicBlock::iterator I = MBBI; 561 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) 562 --I; 563 564 // Insert instructions that restore eh data registers. 565 for (int J = 0; J < 4; ++J) { 566 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J), 567 MipsFI->getEhDataRegFI(J), RC, &RegInfo); 568 } 569 } 570 571 // Get the number of bytes from FrameInfo 572 uint64_t StackSize = MFI->getStackSize(); 573 574 if (!StackSize) 575 return; 576 577 // Adjust stack. 578 TII.adjustStackPtr(SP, StackSize, MBB, MBBI); 579 } 580 581 bool MipsSEFrameLowering:: 582 spillCalleeSavedRegisters(MachineBasicBlock &MBB, 583 MachineBasicBlock::iterator MI, 584 const std::vector<CalleeSavedInfo> &CSI, 585 const TargetRegisterInfo *TRI) const { 586 MachineFunction *MF = MBB.getParent(); 587 MachineBasicBlock *EntryBlock = MF->begin(); 588 const TargetInstrInfo &TII = *STI.getInstrInfo(); 589 590 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 591 // Add the callee-saved register as live-in. Do not add if the register is 592 // RA and return address is taken, because it has already been added in 593 // method MipsTargetLowering::LowerRETURNADDR. 594 // It's killed at the spill, unless the register is RA and return address 595 // is taken. 596 unsigned Reg = CSI[i].getReg(); 597 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64) 598 && MF->getFrameInfo()->isReturnAddressTaken(); 599 if (!IsRAAndRetAddrIsTaken) 600 EntryBlock->addLiveIn(Reg); 601 602 // Insert the spill to the stack frame. 603 bool IsKill = !IsRAAndRetAddrIsTaken; 604 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 605 TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill, 606 CSI[i].getFrameIdx(), RC, TRI); 607 } 608 609 return true; 610 } 611 612 bool 613 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 614 const MachineFrameInfo *MFI = MF.getFrameInfo(); 615 616 // Reserve call frame if the size of the maximum call frame fits into 16-bit 617 // immediate field and there are no variable sized objects on the stack. 618 // Make sure the second register scavenger spill slot can be accessed with one 619 // instruction. 620 return isInt<16>(MFI->getMaxCallFrameSize() + getStackAlignment()) && 621 !MFI->hasVarSizedObjects(); 622 } 623 624 /// Mark \p Reg and all registers aliasing it in the bitset. 625 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs, 626 unsigned Reg) { 627 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 628 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) 629 SavedRegs.set(*AI); 630 } 631 632 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF, 633 BitVector &SavedRegs, 634 RegScavenger *RS) const { 635 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 636 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 637 MipsABIInfo ABI = STI.getABI(); 638 unsigned FP = ABI.GetFramePtr(); 639 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7; 640 641 // Mark $fp as used if function has dedicated frame pointer. 642 if (hasFP(MF)) 643 setAliasRegs(MF, SavedRegs, FP); 644 // Mark $s7 as used if function has dedicated base pointer. 645 if (hasBP(MF)) 646 setAliasRegs(MF, SavedRegs, BP); 647 648 // Create spill slots for eh data registers if function calls eh_return. 649 if (MipsFI->callsEhReturn()) 650 MipsFI->createEhDataRegsFI(); 651 652 // Expand pseudo instructions which load, store or copy accumulators. 653 // Add an emergency spill slot if a pseudo was expanded. 654 if (ExpandPseudo(MF).expand()) { 655 // The spill slot should be half the size of the accumulator. If target is 656 // mips64, it should be 64-bit, otherwise it should be 32-bt. 657 const TargetRegisterClass *RC = STI.hasMips64() ? 658 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 659 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 660 RC->getAlignment(), false); 661 RS->addScavengingFrameIndex(FI); 662 } 663 664 // Set scavenging frame index if necessary. 665 uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() + 666 estimateStackSize(MF); 667 668 if (isInt<16>(MaxSPOffset)) 669 return; 670 671 const TargetRegisterClass *RC = 672 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 673 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 674 RC->getAlignment(), false); 675 RS->addScavengingFrameIndex(FI); 676 } 677 678 const MipsFrameLowering * 679 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) { 680 return new MipsSEFrameLowering(ST); 681 } 682