1 //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the AArch64 implementation of TargetFrameLowering class. 11 // 12 // On AArch64, stack frames are structured as follows: 13 // 14 // The stack grows downward. 15 // 16 // All of the individual frame areas on the frame below are optional, i.e. it's 17 // possible to create a function so that the particular area isn't present 18 // in the frame. 19 // 20 // At function entry, the "frame" looks as follows: 21 // 22 // | | Higher address 23 // |-----------------------------------| 24 // | | 25 // | arguments passed on the stack | 26 // | | 27 // |-----------------------------------| <- sp 28 // | | Lower address 29 // 30 // 31 // After the prologue has run, the frame has the following general structure. 32 // Note that this doesn't depict the case where a red-zone is used. Also, 33 // technically the last frame area (VLAs) doesn't get created until in the 34 // main function body, after the prologue is run. However, it's depicted here 35 // for completeness. 36 // 37 // | | Higher address 38 // |-----------------------------------| 39 // | | 40 // | arguments passed on the stack | 41 // | | 42 // |-----------------------------------| 43 // | | 44 // | (Win64 only) varargs from reg | 45 // | | 46 // |-----------------------------------| 47 // | | 48 // | prev_fp, prev_lr | 49 // | (a.k.a. "frame record") | 50 // |-----------------------------------| <- fp(=x29) 51 // | | 52 // | other callee-saved registers | 53 // | | 54 // |-----------------------------------| 55 // |.empty.space.to.make.part.below....| 56 // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at 57 // |.the.standard.16-byte.alignment....| compile time; if present) 58 // |-----------------------------------| 59 // | | 60 // | local variables of fixed size | 61 // | including spill slots | 62 // |-----------------------------------| <- bp(not defined by ABI, 63 // |.variable-sized.local.variables....| LLVM chooses X19) 64 // |.(VLAs)............................| (size of this area is unknown at 65 // |...................................| compile time) 66 // |-----------------------------------| <- sp 67 // | | Lower address 68 // 69 // 70 // To access the data in a frame, at-compile time, a constant offset must be 71 // computable from one of the pointers (fp, bp, sp) to access it. The size 72 // of the areas with a dotted background cannot be computed at compile-time 73 // if they are present, making it required to have all three of fp, bp and 74 // sp to be set up to be able to access all contents in the frame areas, 75 // assuming all of the frame areas are non-empty. 76 // 77 // For most functions, some of the frame areas are empty. For those functions, 78 // it may not be necessary to set up fp or bp: 79 // * A base pointer is definitely needed when there are both VLAs and local 80 // variables with more-than-default alignment requirements. 81 // * A frame pointer is definitely needed when there are local variables with 82 // more-than-default alignment requirements. 83 // 84 // In some cases when a base pointer is not strictly needed, it is generated 85 // anyway when offsets from the frame pointer to access local variables become 86 // so large that the offset can't be encoded in the immediate fields of loads 87 // or stores. 88 // 89 // FIXME: also explain the redzone concept. 90 // FIXME: also explain the concept of reserved call frames. 91 // 92 //===----------------------------------------------------------------------===// 93 94 #include "AArch64FrameLowering.h" 95 #include "AArch64InstrInfo.h" 96 #include "AArch64MachineFunctionInfo.h" 97 #include "AArch64RegisterInfo.h" 98 #include "AArch64Subtarget.h" 99 #include "AArch64TargetMachine.h" 100 #include "MCTargetDesc/AArch64AddressingModes.h" 101 #include "llvm/ADT/SmallVector.h" 102 #include "llvm/ADT/Statistic.h" 103 #include "llvm/CodeGen/LivePhysRegs.h" 104 #include "llvm/CodeGen/MachineBasicBlock.h" 105 #include "llvm/CodeGen/MachineFrameInfo.h" 106 #include "llvm/CodeGen/MachineFunction.h" 107 #include "llvm/CodeGen/MachineInstr.h" 108 #include "llvm/CodeGen/MachineInstrBuilder.h" 109 #include "llvm/CodeGen/MachineMemOperand.h" 110 #include "llvm/CodeGen/MachineModuleInfo.h" 111 #include "llvm/CodeGen/MachineOperand.h" 112 #include "llvm/CodeGen/MachineRegisterInfo.h" 113 #include "llvm/CodeGen/RegisterScavenging.h" 114 #include "llvm/CodeGen/TargetInstrInfo.h" 115 #include "llvm/CodeGen/TargetRegisterInfo.h" 116 #include "llvm/CodeGen/TargetSubtargetInfo.h" 117 #include "llvm/IR/Attributes.h" 118 #include "llvm/IR/CallingConv.h" 119 #include "llvm/IR/DataLayout.h" 120 #include "llvm/IR/DebugLoc.h" 121 #include "llvm/IR/Function.h" 122 #include "llvm/MC/MCDwarf.h" 123 #include "llvm/Support/CommandLine.h" 124 #include "llvm/Support/Debug.h" 125 #include "llvm/Support/ErrorHandling.h" 126 #include "llvm/Support/MathExtras.h" 127 #include "llvm/Support/raw_ostream.h" 128 #include "llvm/Target/TargetMachine.h" 129 #include "llvm/Target/TargetOptions.h" 130 #include <cassert> 131 #include <cstdint> 132 #include <iterator> 133 #include <vector> 134 135 using namespace llvm; 136 137 #define DEBUG_TYPE "frame-info" 138 139 static cl::opt<bool> EnableRedZone("aarch64-redzone", 140 cl::desc("enable use of redzone on AArch64"), 141 cl::init(false), cl::Hidden); 142 143 static cl::opt<bool> 144 ReverseCSRRestoreSeq("reverse-csr-restore-seq", 145 cl::desc("reverse the CSR restore sequence"), 146 cl::init(false), cl::Hidden); 147 148 STATISTIC(NumRedZoneFunctions, "Number of functions using red zone"); 149 150 /// This is the biggest offset to the stack pointer we can encode in aarch64 151 /// instructions (without using a separate calculation and a temp register). 152 /// Note that the exception here are vector stores/loads which cannot encode any 153 /// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()). 154 static const unsigned DefaultSafeSPDisplacement = 255; 155 156 /// Look at each instruction that references stack frames and return the stack 157 /// size limit beyond which some of these instructions will require a scratch 158 /// register during their expansion later. 159 static unsigned estimateRSStackSizeLimit(MachineFunction &MF) { 160 // FIXME: For now, just conservatively guestimate based on unscaled indexing 161 // range. We'll end up allocating an unnecessary spill slot a lot, but 162 // realistically that's not a big deal at this stage of the game. 163 for (MachineBasicBlock &MBB : MF) { 164 for (MachineInstr &MI : MBB) { 165 if (MI.isDebugValue() || MI.isPseudo() || 166 MI.getOpcode() == AArch64::ADDXri || 167 MI.getOpcode() == AArch64::ADDSXri) 168 continue; 169 170 for (const MachineOperand &MO : MI.operands()) { 171 if (!MO.isFI()) 172 continue; 173 174 int Offset = 0; 175 if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) == 176 AArch64FrameOffsetCannotUpdate) 177 return 0; 178 } 179 } 180 } 181 return DefaultSafeSPDisplacement; 182 } 183 184 bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { 185 if (!EnableRedZone) 186 return false; 187 // Don't use the red zone if the function explicitly asks us not to. 188 // This is typically used for kernel code. 189 if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone)) 190 return false; 191 192 const MachineFrameInfo &MFI = MF.getFrameInfo(); 193 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 194 unsigned NumBytes = AFI->getLocalStackSize(); 195 196 return !(MFI.hasCalls() || hasFP(MF) || NumBytes > 128); 197 } 198 199 /// hasFP - Return true if the specified function should have a dedicated frame 200 /// pointer register. 201 bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const { 202 const MachineFrameInfo &MFI = MF.getFrameInfo(); 203 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 204 // Retain behavior of always omitting the FP for leaf functions when possible. 205 if (MFI.hasCalls() && MF.getTarget().Options.DisableFramePointerElim(MF)) 206 return true; 207 if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || 208 MFI.hasStackMap() || MFI.hasPatchPoint() || 209 RegInfo->needsStackRealignment(MF)) 210 return true; 211 // With large callframes around we may need to use FP to access the scavenging 212 // emergency spillslot. 213 // 214 // Unfortunately some calls to hasFP() like machine verifier -> 215 // getReservedReg() -> hasFP in the middle of global isel are too early 216 // to know the max call frame size. Hopefully conservatively returning "true" 217 // in those cases is fine. 218 // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs. 219 if (!MFI.isMaxCallFrameSizeComputed() || 220 MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement) 221 return true; 222 223 return false; 224 } 225 226 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is 227 /// not required, we reserve argument space for call sites in the function 228 /// immediately on entry to the current function. This eliminates the need for 229 /// add/sub sp brackets around call sites. Returns true if the call frame is 230 /// included as part of the stack frame. 231 bool 232 AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 233 return !MF.getFrameInfo().hasVarSizedObjects(); 234 } 235 236 MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr( 237 MachineFunction &MF, MachineBasicBlock &MBB, 238 MachineBasicBlock::iterator I) const { 239 const AArch64InstrInfo *TII = 240 static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); 241 DebugLoc DL = I->getDebugLoc(); 242 unsigned Opc = I->getOpcode(); 243 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 244 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 245 246 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 247 if (!TFI->hasReservedCallFrame(MF)) { 248 unsigned Align = getStackAlignment(); 249 250 int64_t Amount = I->getOperand(0).getImm(); 251 Amount = alignTo(Amount, Align); 252 if (!IsDestroy) 253 Amount = -Amount; 254 255 // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it 256 // doesn't have to pop anything), then the first operand will be zero too so 257 // this adjustment is a no-op. 258 if (CalleePopAmount == 0) { 259 // FIXME: in-function stack adjustment for calls is limited to 24-bits 260 // because there's no guaranteed temporary register available. 261 // 262 // ADD/SUB (immediate) has only LSL #0 and LSL #12 available. 263 // 1) For offset <= 12-bit, we use LSL #0 264 // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses 265 // LSL #0, and the other uses LSL #12. 266 // 267 // Most call frames will be allocated at the start of a function so 268 // this is OK, but it is a limitation that needs dealing with. 269 assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large"); 270 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, Amount, TII); 271 } 272 } else if (CalleePopAmount != 0) { 273 // If the calling convention demands that the callee pops arguments from the 274 // stack, we want to add it back if we have a reserved call frame. 275 assert(CalleePopAmount < 0xffffff && "call frame too large"); 276 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, -CalleePopAmount, 277 TII); 278 } 279 return MBB.erase(I); 280 } 281 282 void AArch64FrameLowering::emitCalleeSavedFrameMoves( 283 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 284 MachineFunction &MF = *MBB.getParent(); 285 MachineFrameInfo &MFI = MF.getFrameInfo(); 286 const TargetSubtargetInfo &STI = MF.getSubtarget(); 287 const MCRegisterInfo *MRI = STI.getRegisterInfo(); 288 const TargetInstrInfo *TII = STI.getInstrInfo(); 289 DebugLoc DL = MBB.findDebugLoc(MBBI); 290 291 // Add callee saved registers to move list. 292 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 293 if (CSI.empty()) 294 return; 295 296 for (const auto &Info : CSI) { 297 unsigned Reg = Info.getReg(); 298 int64_t Offset = 299 MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea(); 300 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 301 unsigned CFIIndex = MF.addFrameInst( 302 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 303 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 304 .addCFIIndex(CFIIndex) 305 .setMIFlags(MachineInstr::FrameSetup); 306 } 307 } 308 309 // Find a scratch register that we can use at the start of the prologue to 310 // re-align the stack pointer. We avoid using callee-save registers since they 311 // may appear to be free when this is called from canUseAsPrologue (during 312 // shrink wrapping), but then no longer be free when this is called from 313 // emitPrologue. 314 // 315 // FIXME: This is a bit conservative, since in the above case we could use one 316 // of the callee-save registers as a scratch temp to re-align the stack pointer, 317 // but we would then have to make sure that we were in fact saving at least one 318 // callee-save register in the prologue, which is additional complexity that 319 // doesn't seem worth the benefit. 320 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) { 321 MachineFunction *MF = MBB->getParent(); 322 323 // If MBB is an entry block, use X9 as the scratch register 324 if (&MF->front() == MBB) 325 return AArch64::X9; 326 327 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 328 const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo(); 329 LivePhysRegs LiveRegs(TRI); 330 LiveRegs.addLiveIns(*MBB); 331 332 // Mark callee saved registers as used so we will not choose them. 333 const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(MF); 334 for (unsigned i = 0; CSRegs[i]; ++i) 335 LiveRegs.addReg(CSRegs[i]); 336 337 // Prefer X9 since it was historically used for the prologue scratch reg. 338 const MachineRegisterInfo &MRI = MF->getRegInfo(); 339 if (LiveRegs.available(MRI, AArch64::X9)) 340 return AArch64::X9; 341 342 for (unsigned Reg : AArch64::GPR64RegClass) { 343 if (LiveRegs.available(MRI, Reg)) 344 return Reg; 345 } 346 return AArch64::NoRegister; 347 } 348 349 bool AArch64FrameLowering::canUseAsPrologue( 350 const MachineBasicBlock &MBB) const { 351 const MachineFunction *MF = MBB.getParent(); 352 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB); 353 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 354 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 355 356 // Don't need a scratch register if we're not going to re-align the stack. 357 if (!RegInfo->needsStackRealignment(*MF)) 358 return true; 359 // Otherwise, we can use any block as long as it has a scratch register 360 // available. 361 return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister; 362 } 363 364 static bool windowsRequiresStackProbe(MachineFunction &MF, 365 unsigned StackSizeInBytes) { 366 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 367 if (!Subtarget.isTargetWindows()) 368 return false; 369 const Function &F = MF.getFunction(); 370 // TODO: When implementing stack protectors, take that into account 371 // for the probe threshold. 372 unsigned StackProbeSize = 4096; 373 if (F.hasFnAttribute("stack-probe-size")) 374 F.getFnAttribute("stack-probe-size") 375 .getValueAsString() 376 .getAsInteger(0, StackProbeSize); 377 return (StackSizeInBytes >= StackProbeSize) && 378 !F.hasFnAttribute("no-stack-arg-probe"); 379 } 380 381 bool AArch64FrameLowering::shouldCombineCSRLocalStackBump( 382 MachineFunction &MF, unsigned StackBumpBytes) const { 383 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 384 const MachineFrameInfo &MFI = MF.getFrameInfo(); 385 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 386 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 387 388 if (AFI->getLocalStackSize() == 0) 389 return false; 390 391 // 512 is the maximum immediate for stp/ldp that will be used for 392 // callee-save save/restores 393 if (StackBumpBytes >= 512 || windowsRequiresStackProbe(MF, StackBumpBytes)) 394 return false; 395 396 if (MFI.hasVarSizedObjects()) 397 return false; 398 399 if (RegInfo->needsStackRealignment(MF)) 400 return false; 401 402 // This isn't strictly necessary, but it simplifies things a bit since the 403 // current RedZone handling code assumes the SP is adjusted by the 404 // callee-save save/restore code. 405 if (canUseRedZone(MF)) 406 return false; 407 408 return true; 409 } 410 411 // Convert callee-save register save/restore instruction to do stack pointer 412 // decrement/increment to allocate/deallocate the callee-save stack area by 413 // converting store/load to use pre/post increment version. 414 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec( 415 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 416 const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc) { 417 unsigned NewOpc; 418 bool NewIsUnscaled = false; 419 switch (MBBI->getOpcode()) { 420 default: 421 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 422 case AArch64::STPXi: 423 NewOpc = AArch64::STPXpre; 424 break; 425 case AArch64::STPDi: 426 NewOpc = AArch64::STPDpre; 427 break; 428 case AArch64::STRXui: 429 NewOpc = AArch64::STRXpre; 430 NewIsUnscaled = true; 431 break; 432 case AArch64::STRDui: 433 NewOpc = AArch64::STRDpre; 434 NewIsUnscaled = true; 435 break; 436 case AArch64::LDPXi: 437 NewOpc = AArch64::LDPXpost; 438 break; 439 case AArch64::LDPDi: 440 NewOpc = AArch64::LDPDpost; 441 break; 442 case AArch64::LDRXui: 443 NewOpc = AArch64::LDRXpost; 444 NewIsUnscaled = true; 445 break; 446 case AArch64::LDRDui: 447 NewOpc = AArch64::LDRDpost; 448 NewIsUnscaled = true; 449 break; 450 } 451 452 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)); 453 MIB.addReg(AArch64::SP, RegState::Define); 454 455 // Copy all operands other than the immediate offset. 456 unsigned OpndIdx = 0; 457 for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd; 458 ++OpndIdx) 459 MIB.add(MBBI->getOperand(OpndIdx)); 460 461 assert(MBBI->getOperand(OpndIdx).getImm() == 0 && 462 "Unexpected immediate offset in first/last callee-save save/restore " 463 "instruction!"); 464 assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP && 465 "Unexpected base register in callee-save save/restore instruction!"); 466 // Last operand is immediate offset that needs fixing. 467 assert(CSStackSizeInc % 8 == 0); 468 int64_t CSStackSizeIncImm = CSStackSizeInc; 469 if (!NewIsUnscaled) 470 CSStackSizeIncImm /= 8; 471 MIB.addImm(CSStackSizeIncImm); 472 473 MIB.setMIFlags(MBBI->getFlags()); 474 MIB.setMemRefs(MBBI->memoperands_begin(), MBBI->memoperands_end()); 475 476 return std::prev(MBB.erase(MBBI)); 477 } 478 479 // Fixup callee-save register save/restore instructions to take into account 480 // combined SP bump by adding the local stack size to the stack offsets. 481 static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, 482 unsigned LocalStackSize) { 483 unsigned Opc = MI.getOpcode(); 484 (void)Opc; 485 assert((Opc == AArch64::STPXi || Opc == AArch64::STPDi || 486 Opc == AArch64::STRXui || Opc == AArch64::STRDui || 487 Opc == AArch64::LDPXi || Opc == AArch64::LDPDi || 488 Opc == AArch64::LDRXui || Opc == AArch64::LDRDui) && 489 "Unexpected callee-save save/restore opcode!"); 490 491 unsigned OffsetIdx = MI.getNumExplicitOperands() - 1; 492 assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP && 493 "Unexpected base register in callee-save save/restore instruction!"); 494 // Last operand is immediate offset that needs fixing. 495 MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx); 496 // All generated opcodes have scaled offsets. 497 assert(LocalStackSize % 8 == 0); 498 OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / 8); 499 } 500 501 void AArch64FrameLowering::emitPrologue(MachineFunction &MF, 502 MachineBasicBlock &MBB) const { 503 MachineBasicBlock::iterator MBBI = MBB.begin(); 504 const MachineFrameInfo &MFI = MF.getFrameInfo(); 505 const Function &F = MF.getFunction(); 506 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 507 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 508 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 509 MachineModuleInfo &MMI = MF.getMMI(); 510 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 511 bool needsFrameMoves = MMI.hasDebugInfo() || F.needsUnwindTableEntry(); 512 bool HasFP = hasFP(MF); 513 514 // Debug location must be unknown since the first debug location is used 515 // to determine the end of the prologue. 516 DebugLoc DL; 517 518 // All calls are tail calls in GHC calling conv, and functions have no 519 // prologue/epilogue. 520 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 521 return; 522 523 int NumBytes = (int)MFI.getStackSize(); 524 if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) { 525 assert(!HasFP && "unexpected function without stack frame but with FP"); 526 527 // All of the stack allocation is for locals. 528 AFI->setLocalStackSize(NumBytes); 529 530 if (!NumBytes) 531 return; 532 // REDZONE: If the stack size is less than 128 bytes, we don't need 533 // to actually allocate. 534 if (canUseRedZone(MF)) { 535 AFI->setHasRedZone(true); 536 ++NumRedZoneFunctions; 537 } else { 538 AFI->setHasRedZone(false); 539 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII, 540 MachineInstr::FrameSetup); 541 542 // Label used to tie together the PROLOG_LABEL and the MachineMoves. 543 MCSymbol *FrameLabel = MMI.getContext().createTempSymbol(); 544 // Encode the stack size of the leaf function. 545 unsigned CFIIndex = MF.addFrameInst( 546 MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes)); 547 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 548 .addCFIIndex(CFIIndex) 549 .setMIFlags(MachineInstr::FrameSetup); 550 } 551 return; 552 } 553 554 bool IsWin64 = 555 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 556 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; 557 558 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 559 // All of the remaining stack allocations are for locals. 560 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 561 562 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); 563 if (CombineSPBump) { 564 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII, 565 MachineInstr::FrameSetup); 566 NumBytes = 0; 567 } else if (PrologueSaveSize != 0) { 568 MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(MBB, MBBI, DL, TII, 569 -PrologueSaveSize); 570 NumBytes -= PrologueSaveSize; 571 } 572 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 573 574 // Move past the saves of the callee-saved registers, fixing up the offsets 575 // and pre-inc if we decided to combine the callee-save and local stack 576 // pointer bump above. 577 MachineBasicBlock::iterator End = MBB.end(); 578 while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup)) { 579 if (CombineSPBump) 580 fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize()); 581 ++MBBI; 582 } 583 if (HasFP) { 584 // Only set up FP if we actually need to. Frame pointer is fp = 585 // sp - fixedobject - 16. 586 int FPOffset = AFI->getCalleeSavedStackSize() - 16; 587 if (CombineSPBump) 588 FPOffset += AFI->getLocalStackSize(); 589 590 // Issue sub fp, sp, FPOffset or 591 // mov fp,sp when FPOffset is zero. 592 // Note: All stores of callee-saved registers are marked as "FrameSetup". 593 // This code marks the instruction(s) that set the FP also. 594 emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII, 595 MachineInstr::FrameSetup); 596 } 597 598 if (windowsRequiresStackProbe(MF, NumBytes)) { 599 uint32_t NumWords = NumBytes >> 4; 600 601 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), AArch64::X15) 602 .addImm(NumWords) 603 .setMIFlags(MachineInstr::FrameSetup); 604 605 switch (MF.getTarget().getCodeModel()) { 606 case CodeModel::Small: 607 case CodeModel::Medium: 608 case CodeModel::Kernel: 609 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL)) 610 .addExternalSymbol("__chkstk") 611 .addReg(AArch64::X15, RegState::Implicit) 612 .setMIFlags(MachineInstr::FrameSetup); 613 break; 614 case CodeModel::Large: 615 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVaddrEXT)) 616 .addReg(AArch64::X16, RegState::Define) 617 .addExternalSymbol("__chkstk") 618 .addExternalSymbol("__chkstk") 619 .setMIFlags(MachineInstr::FrameSetup); 620 621 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BLR)) 622 .addReg(AArch64::X16, RegState::Kill) 623 .addReg(AArch64::X15, RegState::Implicit | RegState::Define) 624 .setMIFlags(MachineInstr::FrameSetup); 625 break; 626 } 627 628 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP) 629 .addReg(AArch64::SP, RegState::Kill) 630 .addReg(AArch64::X15, RegState::Kill) 631 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 4)) 632 .setMIFlags(MachineInstr::FrameSetup); 633 NumBytes = 0; 634 } 635 636 // Allocate space for the rest of the frame. 637 if (NumBytes) { 638 const bool NeedsRealignment = RegInfo->needsStackRealignment(MF); 639 unsigned scratchSPReg = AArch64::SP; 640 641 if (NeedsRealignment) { 642 scratchSPReg = findScratchNonCalleeSaveRegister(&MBB); 643 assert(scratchSPReg != AArch64::NoRegister); 644 } 645 646 // If we're a leaf function, try using the red zone. 647 if (!canUseRedZone(MF)) 648 // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have 649 // the correct value here, as NumBytes also includes padding bytes, 650 // which shouldn't be counted here. 651 emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII, 652 MachineInstr::FrameSetup); 653 654 if (NeedsRealignment) { 655 const unsigned Alignment = MFI.getMaxAlignment(); 656 const unsigned NrBitsToZero = countTrailingZeros(Alignment); 657 assert(NrBitsToZero > 1); 658 assert(scratchSPReg != AArch64::SP); 659 660 // SUB X9, SP, NumBytes 661 // -- X9 is temporary register, so shouldn't contain any live data here, 662 // -- free to use. This is already produced by emitFrameOffset above. 663 // AND SP, X9, 0b11111...0000 664 // The logical immediates have a non-trivial encoding. The following 665 // formula computes the encoded immediate with all ones but 666 // NrBitsToZero zero bits as least significant bits. 667 uint32_t andMaskEncoded = (1 << 12) // = N 668 | ((64 - NrBitsToZero) << 6) // immr 669 | ((64 - NrBitsToZero - 1) << 0); // imms 670 671 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP) 672 .addReg(scratchSPReg, RegState::Kill) 673 .addImm(andMaskEncoded); 674 AFI->setStackRealigned(true); 675 } 676 } 677 678 // If we need a base pointer, set it up here. It's whatever the value of the 679 // stack pointer is at this point. Any variable size objects will be allocated 680 // after this, so we can still use the base pointer to reference locals. 681 // 682 // FIXME: Clarify FrameSetup flags here. 683 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is 684 // needed. 685 if (RegInfo->hasBasePointer(MF)) { 686 TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP, 687 false); 688 } 689 690 if (needsFrameMoves) { 691 const DataLayout &TD = MF.getDataLayout(); 692 const int StackGrowth = -TD.getPointerSize(0); 693 unsigned FramePtr = RegInfo->getFrameRegister(MF); 694 // An example of the prologue: 695 // 696 // .globl __foo 697 // .align 2 698 // __foo: 699 // Ltmp0: 700 // .cfi_startproc 701 // .cfi_personality 155, ___gxx_personality_v0 702 // Leh_func_begin: 703 // .cfi_lsda 16, Lexception33 704 // 705 // stp xa,bx, [sp, -#offset]! 706 // ... 707 // stp x28, x27, [sp, #offset-32] 708 // stp fp, lr, [sp, #offset-16] 709 // add fp, sp, #offset - 16 710 // sub sp, sp, #1360 711 // 712 // The Stack: 713 // +-------------------------------------------+ 714 // 10000 | ........ | ........ | ........ | ........ | 715 // 10004 | ........ | ........ | ........ | ........ | 716 // +-------------------------------------------+ 717 // 10008 | ........ | ........ | ........ | ........ | 718 // 1000c | ........ | ........ | ........ | ........ | 719 // +===========================================+ 720 // 10010 | X28 Register | 721 // 10014 | X28 Register | 722 // +-------------------------------------------+ 723 // 10018 | X27 Register | 724 // 1001c | X27 Register | 725 // +===========================================+ 726 // 10020 | Frame Pointer | 727 // 10024 | Frame Pointer | 728 // +-------------------------------------------+ 729 // 10028 | Link Register | 730 // 1002c | Link Register | 731 // +===========================================+ 732 // 10030 | ........ | ........ | ........ | ........ | 733 // 10034 | ........ | ........ | ........ | ........ | 734 // +-------------------------------------------+ 735 // 10038 | ........ | ........ | ........ | ........ | 736 // 1003c | ........ | ........ | ........ | ........ | 737 // +-------------------------------------------+ 738 // 739 // [sp] = 10030 :: >>initial value<< 740 // sp = 10020 :: stp fp, lr, [sp, #-16]! 741 // fp = sp == 10020 :: mov fp, sp 742 // [sp] == 10020 :: stp x28, x27, [sp, #-16]! 743 // sp == 10010 :: >>final value<< 744 // 745 // The frame pointer (w29) points to address 10020. If we use an offset of 746 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24 747 // for w27, and -32 for w28: 748 // 749 // Ltmp1: 750 // .cfi_def_cfa w29, 16 751 // Ltmp2: 752 // .cfi_offset w30, -8 753 // Ltmp3: 754 // .cfi_offset w29, -16 755 // Ltmp4: 756 // .cfi_offset w27, -24 757 // Ltmp5: 758 // .cfi_offset w28, -32 759 760 if (HasFP) { 761 // Define the current CFA rule to use the provided FP. 762 unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true); 763 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( 764 nullptr, Reg, 2 * StackGrowth - FixedObject)); 765 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 766 .addCFIIndex(CFIIndex) 767 .setMIFlags(MachineInstr::FrameSetup); 768 } else { 769 // Encode the stack size of the leaf function. 770 unsigned CFIIndex = MF.addFrameInst( 771 MCCFIInstruction::createDefCfaOffset(nullptr, -MFI.getStackSize())); 772 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 773 .addCFIIndex(CFIIndex) 774 .setMIFlags(MachineInstr::FrameSetup); 775 } 776 777 // Now emit the moves for whatever callee saved regs we have (including FP, 778 // LR if those are saved). 779 emitCalleeSavedFrameMoves(MBB, MBBI); 780 } 781 } 782 783 void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, 784 MachineBasicBlock &MBB) const { 785 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 786 MachineFrameInfo &MFI = MF.getFrameInfo(); 787 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 788 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 789 DebugLoc DL; 790 bool IsTailCallReturn = false; 791 if (MBB.end() != MBBI) { 792 DL = MBBI->getDebugLoc(); 793 unsigned RetOpcode = MBBI->getOpcode(); 794 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi || 795 RetOpcode == AArch64::TCRETURNri; 796 } 797 int NumBytes = MFI.getStackSize(); 798 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 799 800 // All calls are tail calls in GHC calling conv, and functions have no 801 // prologue/epilogue. 802 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 803 return; 804 805 // Initial and residual are named for consistency with the prologue. Note that 806 // in the epilogue, the residual adjustment is executed first. 807 uint64_t ArgumentPopSize = 0; 808 if (IsTailCallReturn) { 809 MachineOperand &StackAdjust = MBBI->getOperand(1); 810 811 // For a tail-call in a callee-pops-arguments environment, some or all of 812 // the stack may actually be in use for the call's arguments, this is 813 // calculated during LowerCall and consumed here... 814 ArgumentPopSize = StackAdjust.getImm(); 815 } else { 816 // ... otherwise the amount to pop is *all* of the argument space, 817 // conveniently stored in the MachineFunctionInfo by 818 // LowerFormalArguments. This will, of course, be zero for the C calling 819 // convention. 820 ArgumentPopSize = AFI->getArgumentStackToRestore(); 821 } 822 823 // The stack frame should be like below, 824 // 825 // ---------------------- --- 826 // | | | 827 // | BytesInStackArgArea| CalleeArgStackSize 828 // | (NumReusableBytes) | (of tail call) 829 // | | --- 830 // | | | 831 // ---------------------| --- | 832 // | | | | 833 // | CalleeSavedReg | | | 834 // | (CalleeSavedStackSize)| | | 835 // | | | | 836 // ---------------------| | NumBytes 837 // | | StackSize (StackAdjustUp) 838 // | LocalStackSize | | | 839 // | (covering callee | | | 840 // | args) | | | 841 // | | | | 842 // ---------------------- --- --- 843 // 844 // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize 845 // = StackSize + ArgumentPopSize 846 // 847 // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps 848 // it as the 2nd argument of AArch64ISD::TC_RETURN. 849 850 bool IsWin64 = 851 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 852 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; 853 854 uint64_t AfterCSRPopSize = ArgumentPopSize; 855 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 856 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); 857 // Assume we can't combine the last pop with the sp restore. 858 859 if (!CombineSPBump && PrologueSaveSize != 0) { 860 MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator()); 861 // Converting the last ldp to a post-index ldp is valid only if the last 862 // ldp's offset is 0. 863 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1); 864 // If the offset is 0, convert it to a post-index ldp. 865 if (OffsetOp.getImm() == 0) { 866 convertCalleeSaveRestoreToSPPrePostIncDec(MBB, Pop, DL, TII, 867 PrologueSaveSize); 868 } else { 869 // If not, make sure to emit an add after the last ldp. 870 // We're doing this by transfering the size to be restored from the 871 // adjustment *before* the CSR pops to the adjustment *after* the CSR 872 // pops. 873 AfterCSRPopSize += PrologueSaveSize; 874 } 875 } 876 877 // Move past the restores of the callee-saved registers. 878 // If we plan on combining the sp bump of the local stack size and the callee 879 // save stack size, we might need to adjust the CSR save and restore offsets. 880 MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator(); 881 MachineBasicBlock::iterator Begin = MBB.begin(); 882 while (LastPopI != Begin) { 883 --LastPopI; 884 if (!LastPopI->getFlag(MachineInstr::FrameDestroy)) { 885 ++LastPopI; 886 break; 887 } else if (CombineSPBump) 888 fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize()); 889 } 890 891 // If there is a single SP update, insert it before the ret and we're done. 892 if (CombineSPBump) { 893 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, 894 NumBytes + AfterCSRPopSize, TII, 895 MachineInstr::FrameDestroy); 896 return; 897 } 898 899 NumBytes -= PrologueSaveSize; 900 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 901 902 if (!hasFP(MF)) { 903 bool RedZone = canUseRedZone(MF); 904 // If this was a redzone leaf function, we don't need to restore the 905 // stack pointer (but we may need to pop stack args for fastcc). 906 if (RedZone && AfterCSRPopSize == 0) 907 return; 908 909 bool NoCalleeSaveRestore = PrologueSaveSize == 0; 910 int StackRestoreBytes = RedZone ? 0 : NumBytes; 911 if (NoCalleeSaveRestore) 912 StackRestoreBytes += AfterCSRPopSize; 913 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 914 StackRestoreBytes, TII, MachineInstr::FrameDestroy); 915 // If we were able to combine the local stack pop with the argument pop, 916 // then we're done. 917 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) 918 return; 919 NumBytes = 0; 920 } 921 922 // Restore the original stack pointer. 923 // FIXME: Rather than doing the math here, we should instead just use 924 // non-post-indexed loads for the restores if we aren't actually going to 925 // be able to save any instructions. 926 if (MFI.hasVarSizedObjects() || AFI->isStackRealigned()) 927 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP, 928 -AFI->getCalleeSavedStackSize() + 16, TII, 929 MachineInstr::FrameDestroy); 930 else if (NumBytes) 931 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes, TII, 932 MachineInstr::FrameDestroy); 933 934 // This must be placed after the callee-save restore code because that code 935 // assumes the SP is at the same location as it was after the callee-save save 936 // code in the prologue. 937 if (AfterCSRPopSize) { 938 // Sometimes (when we restore in the same order as we save), we can end up 939 // with code like this: 940 // 941 // ldp x26, x25, [sp] 942 // ldp x24, x23, [sp, #16] 943 // ldp x22, x21, [sp, #32] 944 // ldp x20, x19, [sp, #48] 945 // add sp, sp, #64 946 // 947 // In this case, it is always better to put the first ldp at the end, so 948 // that the load-store optimizer can run and merge the ldp and the add into 949 // a post-index ldp. 950 // If we managed to grab the first pop instruction, move it to the end. 951 if (LastPopI != Begin) 952 MBB.splice(MBB.getFirstTerminator(), &MBB, LastPopI); 953 // We should end up with something like this now: 954 // 955 // ldp x24, x23, [sp, #16] 956 // ldp x22, x21, [sp, #32] 957 // ldp x20, x19, [sp, #48] 958 // ldp x26, x25, [sp] 959 // add sp, sp, #64 960 // 961 // and the load-store optimizer can merge the last two instructions into: 962 // 963 // ldp x26, x25, [sp], #64 964 // 965 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, 966 AfterCSRPopSize, TII, MachineInstr::FrameDestroy); 967 } 968 } 969 970 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for 971 /// debug info. It's the same as what we use for resolving the code-gen 972 /// references for now. FIXME: This can go wrong when references are 973 /// SP-relative and simple call frames aren't used. 974 int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF, 975 int FI, 976 unsigned &FrameReg) const { 977 return resolveFrameIndexReference(MF, FI, FrameReg); 978 } 979 980 int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF, 981 int FI, unsigned &FrameReg, 982 bool PreferFP) const { 983 const MachineFrameInfo &MFI = MF.getFrameInfo(); 984 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( 985 MF.getSubtarget().getRegisterInfo()); 986 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 987 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 988 bool IsWin64 = 989 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 990 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; 991 int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16; 992 int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize(); 993 bool isFixed = MFI.isFixedObjectIndex(FI); 994 995 // Use frame pointer to reference fixed objects. Use it for locals if 996 // there are VLAs or a dynamically realigned SP (and thus the SP isn't 997 // reliable as a base). Make sure useFPForScavengingIndex() does the 998 // right thing for the emergency spill slot. 999 bool UseFP = false; 1000 if (AFI->hasStackFrame()) { 1001 // Note: Keeping the following as multiple 'if' statements rather than 1002 // merging to a single expression for readability. 1003 // 1004 // Argument access should always use the FP. 1005 if (isFixed) { 1006 UseFP = hasFP(MF); 1007 } else if (hasFP(MF) && !RegInfo->hasBasePointer(MF) && 1008 !RegInfo->needsStackRealignment(MF)) { 1009 // Use SP or FP, whichever gives us the best chance of the offset 1010 // being in range for direct access. If the FPOffset is positive, 1011 // that'll always be best, as the SP will be even further away. 1012 // If the FPOffset is negative, we have to keep in mind that the 1013 // available offset range for negative offsets is smaller than for 1014 // positive ones. If we have variable sized objects, we're stuck with 1015 // using the FP regardless, though, as the SP offset is unknown 1016 // and we don't have a base pointer available. If an offset is 1017 // available via the FP and the SP, use whichever is closest. 1018 if (PreferFP || MFI.hasVarSizedObjects() || FPOffset >= 0 || 1019 (FPOffset >= -256 && Offset > -FPOffset)) 1020 UseFP = true; 1021 } 1022 } 1023 1024 assert((isFixed || !RegInfo->needsStackRealignment(MF) || !UseFP) && 1025 "In the presence of dynamic stack pointer realignment, " 1026 "non-argument objects cannot be accessed through the frame pointer"); 1027 1028 if (UseFP) { 1029 FrameReg = RegInfo->getFrameRegister(MF); 1030 return FPOffset; 1031 } 1032 1033 // Use the base pointer if we have one. 1034 if (RegInfo->hasBasePointer(MF)) 1035 FrameReg = RegInfo->getBaseRegister(); 1036 else { 1037 FrameReg = AArch64::SP; 1038 // If we're using the red zone for this function, the SP won't actually 1039 // be adjusted, so the offsets will be negative. They're also all 1040 // within range of the signed 9-bit immediate instructions. 1041 if (canUseRedZone(MF)) 1042 Offset -= AFI->getLocalStackSize(); 1043 } 1044 1045 return Offset; 1046 } 1047 1048 static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { 1049 // Do not set a kill flag on values that are also marked as live-in. This 1050 // happens with the @llvm-returnaddress intrinsic and with arguments passed in 1051 // callee saved registers. 1052 // Omitting the kill flags is conservatively correct even if the live-in 1053 // is not used after all. 1054 bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg); 1055 return getKillRegState(!IsLiveIn); 1056 } 1057 1058 static bool produceCompactUnwindFrame(MachineFunction &MF) { 1059 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1060 AttributeList Attrs = MF.getFunction().getAttributes(); 1061 return Subtarget.isTargetMachO() && 1062 !(Subtarget.getTargetLowering()->supportSwiftError() && 1063 Attrs.hasAttrSomewhere(Attribute::SwiftError)); 1064 } 1065 1066 namespace { 1067 1068 struct RegPairInfo { 1069 unsigned Reg1 = AArch64::NoRegister; 1070 unsigned Reg2 = AArch64::NoRegister; 1071 int FrameIdx; 1072 int Offset; 1073 bool IsGPR; 1074 1075 RegPairInfo() = default; 1076 1077 bool isPaired() const { return Reg2 != AArch64::NoRegister; } 1078 }; 1079 1080 } // end anonymous namespace 1081 1082 static void computeCalleeSaveRegisterPairs( 1083 MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI, 1084 const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs) { 1085 1086 if (CSI.empty()) 1087 return; 1088 1089 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1090 MachineFrameInfo &MFI = MF.getFrameInfo(); 1091 CallingConv::ID CC = MF.getFunction().getCallingConv(); 1092 unsigned Count = CSI.size(); 1093 (void)CC; 1094 // MachO's compact unwind format relies on all registers being stored in 1095 // pairs. 1096 assert((!produceCompactUnwindFrame(MF) || 1097 CC == CallingConv::PreserveMost || 1098 (Count & 1) == 0) && 1099 "Odd number of callee-saved regs to spill!"); 1100 int Offset = AFI->getCalleeSavedStackSize(); 1101 1102 for (unsigned i = 0; i < Count; ++i) { 1103 RegPairInfo RPI; 1104 RPI.Reg1 = CSI[i].getReg(); 1105 1106 assert(AArch64::GPR64RegClass.contains(RPI.Reg1) || 1107 AArch64::FPR64RegClass.contains(RPI.Reg1)); 1108 RPI.IsGPR = AArch64::GPR64RegClass.contains(RPI.Reg1); 1109 1110 // Add the next reg to the pair if it is in the same register class. 1111 if (i + 1 < Count) { 1112 unsigned NextReg = CSI[i + 1].getReg(); 1113 if ((RPI.IsGPR && AArch64::GPR64RegClass.contains(NextReg)) || 1114 (!RPI.IsGPR && AArch64::FPR64RegClass.contains(NextReg))) 1115 RPI.Reg2 = NextReg; 1116 } 1117 1118 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI 1119 // list to come in sorted by frame index so that we can issue the store 1120 // pair instructions directly. Assert if we see anything otherwise. 1121 // 1122 // The order of the registers in the list is controlled by 1123 // getCalleeSavedRegs(), so they will always be in-order, as well. 1124 assert((!RPI.isPaired() || 1125 (CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx())) && 1126 "Out of order callee saved regs!"); 1127 1128 // MachO's compact unwind format relies on all registers being stored in 1129 // adjacent register pairs. 1130 assert((!produceCompactUnwindFrame(MF) || 1131 CC == CallingConv::PreserveMost || 1132 (RPI.isPaired() && 1133 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) || 1134 RPI.Reg1 + 1 == RPI.Reg2))) && 1135 "Callee-save registers not saved as adjacent register pair!"); 1136 1137 RPI.FrameIdx = CSI[i].getFrameIdx(); 1138 1139 if (Count * 8 != AFI->getCalleeSavedStackSize() && !RPI.isPaired()) { 1140 // Round up size of non-pair to pair size if we need to pad the 1141 // callee-save area to ensure 16-byte alignment. 1142 Offset -= 16; 1143 assert(MFI.getObjectAlignment(RPI.FrameIdx) <= 16); 1144 MFI.setObjectAlignment(RPI.FrameIdx, 16); 1145 AFI->setCalleeSaveStackHasFreeSpace(true); 1146 } else 1147 Offset -= RPI.isPaired() ? 16 : 8; 1148 assert(Offset % 8 == 0); 1149 RPI.Offset = Offset / 8; 1150 assert((RPI.Offset >= -64 && RPI.Offset <= 63) && 1151 "Offset out of bounds for LDP/STP immediate"); 1152 1153 RegPairs.push_back(RPI); 1154 if (RPI.isPaired()) 1155 ++i; 1156 } 1157 } 1158 1159 bool AArch64FrameLowering::spillCalleeSavedRegisters( 1160 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 1161 const std::vector<CalleeSavedInfo> &CSI, 1162 const TargetRegisterInfo *TRI) const { 1163 MachineFunction &MF = *MBB.getParent(); 1164 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1165 DebugLoc DL; 1166 SmallVector<RegPairInfo, 8> RegPairs; 1167 1168 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs); 1169 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1170 1171 for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE; 1172 ++RPII) { 1173 RegPairInfo RPI = *RPII; 1174 unsigned Reg1 = RPI.Reg1; 1175 unsigned Reg2 = RPI.Reg2; 1176 unsigned StrOpc; 1177 1178 // Issue sequence of spills for cs regs. The first spill may be converted 1179 // to a pre-decrement store later by emitPrologue if the callee-save stack 1180 // area allocation can't be combined with the local stack area allocation. 1181 // For example: 1182 // stp x22, x21, [sp, #0] // addImm(+0) 1183 // stp x20, x19, [sp, #16] // addImm(+2) 1184 // stp fp, lr, [sp, #32] // addImm(+4) 1185 // Rationale: This sequence saves uop updates compared to a sequence of 1186 // pre-increment spills like stp xi,xj,[sp,#-16]! 1187 // Note: Similar rationale and sequence for restores in epilog. 1188 if (RPI.IsGPR) 1189 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; 1190 else 1191 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; 1192 DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI); 1193 if (RPI.isPaired()) 1194 dbgs() << ", " << printReg(Reg2, TRI); 1195 dbgs() << ") -> fi#(" << RPI.FrameIdx; 1196 if (RPI.isPaired()) 1197 dbgs() << ", " << RPI.FrameIdx+1; 1198 dbgs() << ")\n"); 1199 1200 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); 1201 if (!MRI.isReserved(Reg1)) 1202 MBB.addLiveIn(Reg1); 1203 if (RPI.isPaired()) { 1204 if (!MRI.isReserved(Reg2)) 1205 MBB.addLiveIn(Reg2); 1206 MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)); 1207 MIB.addMemOperand(MF.getMachineMemOperand( 1208 MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1), 1209 MachineMemOperand::MOStore, 8, 8)); 1210 } 1211 MIB.addReg(Reg1, getPrologueDeath(MF, Reg1)) 1212 .addReg(AArch64::SP) 1213 .addImm(RPI.Offset) // [sp, #offset*8], where factor*8 is implicit 1214 .setMIFlag(MachineInstr::FrameSetup); 1215 MIB.addMemOperand(MF.getMachineMemOperand( 1216 MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx), 1217 MachineMemOperand::MOStore, 8, 8)); 1218 } 1219 return true; 1220 } 1221 1222 bool AArch64FrameLowering::restoreCalleeSavedRegisters( 1223 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 1224 std::vector<CalleeSavedInfo> &CSI, 1225 const TargetRegisterInfo *TRI) const { 1226 MachineFunction &MF = *MBB.getParent(); 1227 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1228 DebugLoc DL; 1229 SmallVector<RegPairInfo, 8> RegPairs; 1230 1231 if (MI != MBB.end()) 1232 DL = MI->getDebugLoc(); 1233 1234 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs); 1235 1236 auto EmitMI = [&](const RegPairInfo &RPI) { 1237 unsigned Reg1 = RPI.Reg1; 1238 unsigned Reg2 = RPI.Reg2; 1239 1240 // Issue sequence of restores for cs regs. The last restore may be converted 1241 // to a post-increment load later by emitEpilogue if the callee-save stack 1242 // area allocation can't be combined with the local stack area allocation. 1243 // For example: 1244 // ldp fp, lr, [sp, #32] // addImm(+4) 1245 // ldp x20, x19, [sp, #16] // addImm(+2) 1246 // ldp x22, x21, [sp, #0] // addImm(+0) 1247 // Note: see comment in spillCalleeSavedRegisters() 1248 unsigned LdrOpc; 1249 if (RPI.IsGPR) 1250 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; 1251 else 1252 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; 1253 DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI); 1254 if (RPI.isPaired()) 1255 dbgs() << ", " << printReg(Reg2, TRI); 1256 dbgs() << ") -> fi#(" << RPI.FrameIdx; 1257 if (RPI.isPaired()) 1258 dbgs() << ", " << RPI.FrameIdx+1; 1259 dbgs() << ")\n"); 1260 1261 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc)); 1262 if (RPI.isPaired()) { 1263 MIB.addReg(Reg2, getDefRegState(true)); 1264 MIB.addMemOperand(MF.getMachineMemOperand( 1265 MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1), 1266 MachineMemOperand::MOLoad, 8, 8)); 1267 } 1268 MIB.addReg(Reg1, getDefRegState(true)) 1269 .addReg(AArch64::SP) 1270 .addImm(RPI.Offset) // [sp, #offset*8] where the factor*8 is implicit 1271 .setMIFlag(MachineInstr::FrameDestroy); 1272 MIB.addMemOperand(MF.getMachineMemOperand( 1273 MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx), 1274 MachineMemOperand::MOLoad, 8, 8)); 1275 }; 1276 1277 if (ReverseCSRRestoreSeq) 1278 for (const RegPairInfo &RPI : reverse(RegPairs)) 1279 EmitMI(RPI); 1280 else 1281 for (const RegPairInfo &RPI : RegPairs) 1282 EmitMI(RPI); 1283 return true; 1284 } 1285 1286 void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, 1287 BitVector &SavedRegs, 1288 RegScavenger *RS) const { 1289 // All calls are tail calls in GHC calling conv, and functions have no 1290 // prologue/epilogue. 1291 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 1292 return; 1293 1294 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1295 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( 1296 MF.getSubtarget().getRegisterInfo()); 1297 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1298 unsigned UnspilledCSGPR = AArch64::NoRegister; 1299 unsigned UnspilledCSGPRPaired = AArch64::NoRegister; 1300 1301 MachineFrameInfo &MFI = MF.getFrameInfo(); 1302 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF); 1303 1304 unsigned BasePointerReg = RegInfo->hasBasePointer(MF) 1305 ? RegInfo->getBaseRegister() 1306 : (unsigned)AArch64::NoRegister; 1307 1308 unsigned SpillEstimate = SavedRegs.count(); 1309 for (unsigned i = 0; CSRegs[i]; ++i) { 1310 unsigned Reg = CSRegs[i]; 1311 unsigned PairedReg = CSRegs[i ^ 1]; 1312 if (Reg == BasePointerReg) 1313 SpillEstimate++; 1314 if (produceCompactUnwindFrame(MF) && !SavedRegs.test(PairedReg)) 1315 SpillEstimate++; 1316 } 1317 SpillEstimate += 2; // Conservatively include FP+LR in the estimate 1318 unsigned StackEstimate = MFI.estimateStackSize(MF) + 8 * SpillEstimate; 1319 1320 // The frame record needs to be created by saving the appropriate registers 1321 if (hasFP(MF) || windowsRequiresStackProbe(MF, StackEstimate)) { 1322 SavedRegs.set(AArch64::FP); 1323 SavedRegs.set(AArch64::LR); 1324 } 1325 1326 unsigned ExtraCSSpill = 0; 1327 // Figure out which callee-saved registers to save/restore. 1328 for (unsigned i = 0; CSRegs[i]; ++i) { 1329 const unsigned Reg = CSRegs[i]; 1330 1331 // Add the base pointer register to SavedRegs if it is callee-save. 1332 if (Reg == BasePointerReg) 1333 SavedRegs.set(Reg); 1334 1335 bool RegUsed = SavedRegs.test(Reg); 1336 unsigned PairedReg = CSRegs[i ^ 1]; 1337 if (!RegUsed) { 1338 if (AArch64::GPR64RegClass.contains(Reg) && 1339 !RegInfo->isReservedReg(MF, Reg)) { 1340 UnspilledCSGPR = Reg; 1341 UnspilledCSGPRPaired = PairedReg; 1342 } 1343 continue; 1344 } 1345 1346 // MachO's compact unwind format relies on all registers being stored in 1347 // pairs. 1348 // FIXME: the usual format is actually better if unwinding isn't needed. 1349 if (produceCompactUnwindFrame(MF) && !SavedRegs.test(PairedReg)) { 1350 SavedRegs.set(PairedReg); 1351 if (AArch64::GPR64RegClass.contains(PairedReg) && 1352 !RegInfo->isReservedReg(MF, PairedReg)) 1353 ExtraCSSpill = PairedReg; 1354 } 1355 } 1356 1357 DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:"; 1358 for (unsigned Reg : SavedRegs.set_bits()) 1359 dbgs() << ' ' << printReg(Reg, RegInfo); 1360 dbgs() << "\n";); 1361 1362 // If any callee-saved registers are used, the frame cannot be eliminated. 1363 unsigned NumRegsSpilled = SavedRegs.count(); 1364 bool CanEliminateFrame = NumRegsSpilled == 0; 1365 1366 // The CSR spill slots have not been allocated yet, so estimateStackSize 1367 // won't include them. 1368 unsigned CFSize = MFI.estimateStackSize(MF) + 8 * NumRegsSpilled; 1369 DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n"); 1370 unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF); 1371 bool BigStack = (CFSize > EstimatedStackSizeLimit); 1372 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) 1373 AFI->setHasStackFrame(true); 1374 1375 // Estimate if we might need to scavenge a register at some point in order 1376 // to materialize a stack offset. If so, either spill one additional 1377 // callee-saved register or reserve a special spill slot to facilitate 1378 // register scavenging. If we already spilled an extra callee-saved register 1379 // above to keep the number of spills even, we don't need to do anything else 1380 // here. 1381 if (BigStack) { 1382 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) { 1383 DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo) 1384 << " to get a scratch register.\n"); 1385 SavedRegs.set(UnspilledCSGPR); 1386 // MachO's compact unwind format relies on all registers being stored in 1387 // pairs, so if we need to spill one extra for BigStack, then we need to 1388 // store the pair. 1389 if (produceCompactUnwindFrame(MF)) 1390 SavedRegs.set(UnspilledCSGPRPaired); 1391 ExtraCSSpill = UnspilledCSGPRPaired; 1392 NumRegsSpilled = SavedRegs.count(); 1393 } 1394 1395 // If we didn't find an extra callee-saved register to spill, create 1396 // an emergency spill slot. 1397 if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) { 1398 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 1399 const TargetRegisterClass &RC = AArch64::GPR64RegClass; 1400 unsigned Size = TRI->getSpillSize(RC); 1401 unsigned Align = TRI->getSpillAlignment(RC); 1402 int FI = MFI.CreateStackObject(Size, Align, false); 1403 RS->addScavengingFrameIndex(FI); 1404 DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI 1405 << " as the emergency spill slot.\n"); 1406 } 1407 } 1408 1409 // Round up to register pair alignment to avoid additional SP adjustment 1410 // instructions. 1411 AFI->setCalleeSavedStackSize(alignTo(8 * NumRegsSpilled, 16)); 1412 } 1413 1414 bool AArch64FrameLowering::enableStackSlotScavenging( 1415 const MachineFunction &MF) const { 1416 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1417 return AFI->hasCalleeSaveStackFreeSpace(); 1418 } 1419