1 //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the AArch64 implementation of TargetFrameLowering class. 11 // 12 // On AArch64, stack frames are structured as follows: 13 // 14 // The stack grows downward. 15 // 16 // All of the individual frame areas on the frame below are optional, i.e. it's 17 // possible to create a function so that the particular area isn't present 18 // in the frame. 19 // 20 // At function entry, the "frame" looks as follows: 21 // 22 // | | Higher address 23 // |-----------------------------------| 24 // | | 25 // | arguments passed on the stack | 26 // | | 27 // |-----------------------------------| <- sp 28 // | | Lower address 29 // 30 // 31 // After the prologue has run, the frame has the following general structure. 32 // Note that this doesn't depict the case where a red-zone is used. Also, 33 // technically the last frame area (VLAs) doesn't get created until in the 34 // main function body, after the prologue is run. However, it's depicted here 35 // for completeness. 36 // 37 // | | Higher address 38 // |-----------------------------------| 39 // | | 40 // | arguments passed on the stack | 41 // | | 42 // |-----------------------------------| 43 // | | 44 // | (Win64 only) varargs from reg | 45 // | | 46 // |-----------------------------------| 47 // | | 48 // | prev_fp, prev_lr | 49 // | (a.k.a. "frame record") | 50 // |-----------------------------------| <- fp(=x29) 51 // | | 52 // | other callee-saved registers | 53 // | | 54 // |-----------------------------------| 55 // |.empty.space.to.make.part.below....| 56 // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at 57 // |.the.standard.16-byte.alignment....| compile time; if present) 58 // |-----------------------------------| 59 // | | 60 // | local variables of fixed size | 61 // | including spill slots | 62 // |-----------------------------------| <- bp(not defined by ABI, 63 // |.variable-sized.local.variables....| LLVM chooses X19) 64 // |.(VLAs)............................| (size of this area is unknown at 65 // |...................................| compile time) 66 // |-----------------------------------| <- sp 67 // | | Lower address 68 // 69 // 70 // To access the data in a frame, at-compile time, a constant offset must be 71 // computable from one of the pointers (fp, bp, sp) to access it. The size 72 // of the areas with a dotted background cannot be computed at compile-time 73 // if they are present, making it required to have all three of fp, bp and 74 // sp to be set up to be able to access all contents in the frame areas, 75 // assuming all of the frame areas are non-empty. 76 // 77 // For most functions, some of the frame areas are empty. For those functions, 78 // it may not be necessary to set up fp or bp: 79 // * A base pointer is definitely needed when there are both VLAs and local 80 // variables with more-than-default alignment requirements. 81 // * A frame pointer is definitely needed when there are local variables with 82 // more-than-default alignment requirements. 83 // 84 // In some cases when a base pointer is not strictly needed, it is generated 85 // anyway when offsets from the frame pointer to access local variables become 86 // so large that the offset can't be encoded in the immediate fields of loads 87 // or stores. 88 // 89 // FIXME: also explain the redzone concept. 90 // FIXME: also explain the concept of reserved call frames. 91 // 92 //===----------------------------------------------------------------------===// 93 94 #include "AArch64FrameLowering.h" 95 #include "AArch64InstrInfo.h" 96 #include "AArch64MachineFunctionInfo.h" 97 #include "AArch64RegisterInfo.h" 98 #include "AArch64Subtarget.h" 99 #include "AArch64TargetMachine.h" 100 #include "MCTargetDesc/AArch64AddressingModes.h" 101 #include "llvm/ADT/ScopeExit.h" 102 #include "llvm/ADT/SmallVector.h" 103 #include "llvm/ADT/Statistic.h" 104 #include "llvm/CodeGen/LivePhysRegs.h" 105 #include "llvm/CodeGen/MachineBasicBlock.h" 106 #include "llvm/CodeGen/MachineFrameInfo.h" 107 #include "llvm/CodeGen/MachineFunction.h" 108 #include "llvm/CodeGen/MachineInstr.h" 109 #include "llvm/CodeGen/MachineInstrBuilder.h" 110 #include "llvm/CodeGen/MachineMemOperand.h" 111 #include "llvm/CodeGen/MachineModuleInfo.h" 112 #include "llvm/CodeGen/MachineOperand.h" 113 #include "llvm/CodeGen/MachineRegisterInfo.h" 114 #include "llvm/CodeGen/RegisterScavenging.h" 115 #include "llvm/CodeGen/TargetInstrInfo.h" 116 #include "llvm/CodeGen/TargetRegisterInfo.h" 117 #include "llvm/CodeGen/TargetSubtargetInfo.h" 118 #include "llvm/CodeGen/WinEHFuncInfo.h" 119 #include "llvm/IR/Attributes.h" 120 #include "llvm/IR/CallingConv.h" 121 #include "llvm/IR/DataLayout.h" 122 #include "llvm/IR/DebugLoc.h" 123 #include "llvm/IR/Function.h" 124 #include "llvm/MC/MCAsmInfo.h" 125 #include "llvm/MC/MCDwarf.h" 126 #include "llvm/Support/CommandLine.h" 127 #include "llvm/Support/Debug.h" 128 #include "llvm/Support/ErrorHandling.h" 129 #include "llvm/Support/MathExtras.h" 130 #include "llvm/Support/raw_ostream.h" 131 #include "llvm/Target/TargetMachine.h" 132 #include "llvm/Target/TargetOptions.h" 133 #include <cassert> 134 #include <cstdint> 135 #include <iterator> 136 #include <vector> 137 138 using namespace llvm; 139 140 #define DEBUG_TYPE "frame-info" 141 142 static cl::opt<bool> EnableRedZone("aarch64-redzone", 143 cl::desc("enable use of redzone on AArch64"), 144 cl::init(false), cl::Hidden); 145 146 static cl::opt<bool> 147 ReverseCSRRestoreSeq("reverse-csr-restore-seq", 148 cl::desc("reverse the CSR restore sequence"), 149 cl::init(false), cl::Hidden); 150 151 STATISTIC(NumRedZoneFunctions, "Number of functions using red zone"); 152 153 /// This is the biggest offset to the stack pointer we can encode in aarch64 154 /// instructions (without using a separate calculation and a temp register). 155 /// Note that the exception here are vector stores/loads which cannot encode any 156 /// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()). 157 static const unsigned DefaultSafeSPDisplacement = 255; 158 159 /// Look at each instruction that references stack frames and return the stack 160 /// size limit beyond which some of these instructions will require a scratch 161 /// register during their expansion later. 162 static unsigned estimateRSStackSizeLimit(MachineFunction &MF) { 163 // FIXME: For now, just conservatively guestimate based on unscaled indexing 164 // range. We'll end up allocating an unnecessary spill slot a lot, but 165 // realistically that's not a big deal at this stage of the game. 166 for (MachineBasicBlock &MBB : MF) { 167 for (MachineInstr &MI : MBB) { 168 if (MI.isDebugInstr() || MI.isPseudo() || 169 MI.getOpcode() == AArch64::ADDXri || 170 MI.getOpcode() == AArch64::ADDSXri) 171 continue; 172 173 for (const MachineOperand &MO : MI.operands()) { 174 if (!MO.isFI()) 175 continue; 176 177 int Offset = 0; 178 if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) == 179 AArch64FrameOffsetCannotUpdate) 180 return 0; 181 } 182 } 183 } 184 return DefaultSafeSPDisplacement; 185 } 186 187 bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { 188 if (!EnableRedZone) 189 return false; 190 // Don't use the red zone if the function explicitly asks us not to. 191 // This is typically used for kernel code. 192 if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone)) 193 return false; 194 195 const MachineFrameInfo &MFI = MF.getFrameInfo(); 196 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 197 unsigned NumBytes = AFI->getLocalStackSize(); 198 199 return !(MFI.hasCalls() || hasFP(MF) || NumBytes > 128); 200 } 201 202 /// hasFP - Return true if the specified function should have a dedicated frame 203 /// pointer register. 204 bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const { 205 const MachineFrameInfo &MFI = MF.getFrameInfo(); 206 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 207 // Win64 EH requires a frame pointer if funclets are present, as the locals 208 // are accessed off the frame pointer in both the parent function and the 209 // funclets. 210 if (MF.hasEHFunclets()) 211 return true; 212 // Retain behavior of always omitting the FP for leaf functions when possible. 213 if (MFI.hasCalls() && MF.getTarget().Options.DisableFramePointerElim(MF)) 214 return true; 215 if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || 216 MFI.hasStackMap() || MFI.hasPatchPoint() || 217 RegInfo->needsStackRealignment(MF)) 218 return true; 219 // With large callframes around we may need to use FP to access the scavenging 220 // emergency spillslot. 221 // 222 // Unfortunately some calls to hasFP() like machine verifier -> 223 // getReservedReg() -> hasFP in the middle of global isel are too early 224 // to know the max call frame size. Hopefully conservatively returning "true" 225 // in those cases is fine. 226 // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs. 227 if (!MFI.isMaxCallFrameSizeComputed() || 228 MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement) 229 return true; 230 231 return false; 232 } 233 234 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is 235 /// not required, we reserve argument space for call sites in the function 236 /// immediately on entry to the current function. This eliminates the need for 237 /// add/sub sp brackets around call sites. Returns true if the call frame is 238 /// included as part of the stack frame. 239 bool 240 AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 241 return !MF.getFrameInfo().hasVarSizedObjects(); 242 } 243 244 MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr( 245 MachineFunction &MF, MachineBasicBlock &MBB, 246 MachineBasicBlock::iterator I) const { 247 const AArch64InstrInfo *TII = 248 static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); 249 DebugLoc DL = I->getDebugLoc(); 250 unsigned Opc = I->getOpcode(); 251 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 252 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 253 254 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 255 if (!TFI->hasReservedCallFrame(MF)) { 256 unsigned Align = getStackAlignment(); 257 258 int64_t Amount = I->getOperand(0).getImm(); 259 Amount = alignTo(Amount, Align); 260 if (!IsDestroy) 261 Amount = -Amount; 262 263 // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it 264 // doesn't have to pop anything), then the first operand will be zero too so 265 // this adjustment is a no-op. 266 if (CalleePopAmount == 0) { 267 // FIXME: in-function stack adjustment for calls is limited to 24-bits 268 // because there's no guaranteed temporary register available. 269 // 270 // ADD/SUB (immediate) has only LSL #0 and LSL #12 available. 271 // 1) For offset <= 12-bit, we use LSL #0 272 // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses 273 // LSL #0, and the other uses LSL #12. 274 // 275 // Most call frames will be allocated at the start of a function so 276 // this is OK, but it is a limitation that needs dealing with. 277 assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large"); 278 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, Amount, TII); 279 } 280 } else if (CalleePopAmount != 0) { 281 // If the calling convention demands that the callee pops arguments from the 282 // stack, we want to add it back if we have a reserved call frame. 283 assert(CalleePopAmount < 0xffffff && "call frame too large"); 284 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, -CalleePopAmount, 285 TII); 286 } 287 return MBB.erase(I); 288 } 289 290 static bool ShouldSignReturnAddress(MachineFunction &MF) { 291 // The function should be signed in the following situations: 292 // - sign-return-address=all 293 // - sign-return-address=non-leaf and the functions spills the LR 294 295 const Function &F = MF.getFunction(); 296 if (!F.hasFnAttribute("sign-return-address")) 297 return false; 298 299 StringRef Scope = F.getFnAttribute("sign-return-address").getValueAsString(); 300 if (Scope.equals("none")) 301 return false; 302 303 if (Scope.equals("all")) 304 return true; 305 306 assert(Scope.equals("non-leaf") && "Expected all, none or non-leaf"); 307 308 for (const auto &Info : MF.getFrameInfo().getCalleeSavedInfo()) 309 if (Info.getReg() == AArch64::LR) 310 return true; 311 312 return false; 313 } 314 315 void AArch64FrameLowering::emitCalleeSavedFrameMoves( 316 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 317 MachineFunction &MF = *MBB.getParent(); 318 MachineFrameInfo &MFI = MF.getFrameInfo(); 319 const TargetSubtargetInfo &STI = MF.getSubtarget(); 320 const MCRegisterInfo *MRI = STI.getRegisterInfo(); 321 const TargetInstrInfo *TII = STI.getInstrInfo(); 322 DebugLoc DL = MBB.findDebugLoc(MBBI); 323 324 // Add callee saved registers to move list. 325 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 326 if (CSI.empty()) 327 return; 328 329 for (const auto &Info : CSI) { 330 unsigned Reg = Info.getReg(); 331 int64_t Offset = 332 MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea(); 333 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 334 unsigned CFIIndex = MF.addFrameInst( 335 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 336 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 337 .addCFIIndex(CFIIndex) 338 .setMIFlags(MachineInstr::FrameSetup); 339 } 340 } 341 342 // Find a scratch register that we can use at the start of the prologue to 343 // re-align the stack pointer. We avoid using callee-save registers since they 344 // may appear to be free when this is called from canUseAsPrologue (during 345 // shrink wrapping), but then no longer be free when this is called from 346 // emitPrologue. 347 // 348 // FIXME: This is a bit conservative, since in the above case we could use one 349 // of the callee-save registers as a scratch temp to re-align the stack pointer, 350 // but we would then have to make sure that we were in fact saving at least one 351 // callee-save register in the prologue, which is additional complexity that 352 // doesn't seem worth the benefit. 353 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) { 354 MachineFunction *MF = MBB->getParent(); 355 356 // If MBB is an entry block, use X9 as the scratch register 357 if (&MF->front() == MBB) 358 return AArch64::X9; 359 360 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 361 const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo(); 362 LivePhysRegs LiveRegs(TRI); 363 LiveRegs.addLiveIns(*MBB); 364 365 // Mark callee saved registers as used so we will not choose them. 366 const MCPhysReg *CSRegs = MF->getRegInfo().getCalleeSavedRegs(); 367 for (unsigned i = 0; CSRegs[i]; ++i) 368 LiveRegs.addReg(CSRegs[i]); 369 370 // Prefer X9 since it was historically used for the prologue scratch reg. 371 const MachineRegisterInfo &MRI = MF->getRegInfo(); 372 if (LiveRegs.available(MRI, AArch64::X9)) 373 return AArch64::X9; 374 375 for (unsigned Reg : AArch64::GPR64RegClass) { 376 if (LiveRegs.available(MRI, Reg)) 377 return Reg; 378 } 379 return AArch64::NoRegister; 380 } 381 382 bool AArch64FrameLowering::canUseAsPrologue( 383 const MachineBasicBlock &MBB) const { 384 const MachineFunction *MF = MBB.getParent(); 385 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB); 386 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 387 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 388 389 // Don't need a scratch register if we're not going to re-align the stack. 390 if (!RegInfo->needsStackRealignment(*MF)) 391 return true; 392 // Otherwise, we can use any block as long as it has a scratch register 393 // available. 394 return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister; 395 } 396 397 static bool windowsRequiresStackProbe(MachineFunction &MF, 398 unsigned StackSizeInBytes) { 399 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 400 if (!Subtarget.isTargetWindows()) 401 return false; 402 const Function &F = MF.getFunction(); 403 // TODO: When implementing stack protectors, take that into account 404 // for the probe threshold. 405 unsigned StackProbeSize = 4096; 406 if (F.hasFnAttribute("stack-probe-size")) 407 F.getFnAttribute("stack-probe-size") 408 .getValueAsString() 409 .getAsInteger(0, StackProbeSize); 410 return (StackSizeInBytes >= StackProbeSize) && 411 !F.hasFnAttribute("no-stack-arg-probe"); 412 } 413 414 bool AArch64FrameLowering::shouldCombineCSRLocalStackBump( 415 MachineFunction &MF, unsigned StackBumpBytes) const { 416 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 417 const MachineFrameInfo &MFI = MF.getFrameInfo(); 418 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 419 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 420 421 if (AFI->getLocalStackSize() == 0) 422 return false; 423 424 // 512 is the maximum immediate for stp/ldp that will be used for 425 // callee-save save/restores 426 if (StackBumpBytes >= 512 || windowsRequiresStackProbe(MF, StackBumpBytes)) 427 return false; 428 429 if (MFI.hasVarSizedObjects()) 430 return false; 431 432 if (RegInfo->needsStackRealignment(MF)) 433 return false; 434 435 // This isn't strictly necessary, but it simplifies things a bit since the 436 // current RedZone handling code assumes the SP is adjusted by the 437 // callee-save save/restore code. 438 if (canUseRedZone(MF)) 439 return false; 440 441 return true; 442 } 443 444 // Given a load or a store instruction, generate an appropriate unwinding SEH 445 // code on Windows. 446 static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, 447 const TargetInstrInfo &TII, 448 MachineInstr::MIFlag Flag) { 449 unsigned Opc = MBBI->getOpcode(); 450 MachineBasicBlock *MBB = MBBI->getParent(); 451 MachineFunction &MF = *MBB->getParent(); 452 DebugLoc DL = MBBI->getDebugLoc(); 453 unsigned ImmIdx = MBBI->getNumOperands() - 1; 454 int Imm = MBBI->getOperand(ImmIdx).getImm(); 455 MachineInstrBuilder MIB; 456 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 457 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 458 459 switch (Opc) { 460 default: 461 llvm_unreachable("No SEH Opcode for this instruction"); 462 case AArch64::LDPDpost: 463 Imm = -Imm; 464 LLVM_FALLTHROUGH; 465 case AArch64::STPDpre: { 466 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 467 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg()); 468 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP_X)) 469 .addImm(Reg0) 470 .addImm(Reg1) 471 .addImm(Imm * 8) 472 .setMIFlag(Flag); 473 break; 474 } 475 case AArch64::LDPXpost: 476 Imm = -Imm; 477 LLVM_FALLTHROUGH; 478 case AArch64::STPXpre: { 479 unsigned Reg0 = MBBI->getOperand(1).getReg(); 480 unsigned Reg1 = MBBI->getOperand(2).getReg(); 481 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 482 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR_X)) 483 .addImm(Imm * 8) 484 .setMIFlag(Flag); 485 else 486 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP_X)) 487 .addImm(RegInfo->getSEHRegNum(Reg0)) 488 .addImm(RegInfo->getSEHRegNum(Reg1)) 489 .addImm(Imm * 8) 490 .setMIFlag(Flag); 491 break; 492 } 493 case AArch64::LDRDpost: 494 Imm = -Imm; 495 LLVM_FALLTHROUGH; 496 case AArch64::STRDpre: { 497 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 498 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg_X)) 499 .addImm(Reg) 500 .addImm(Imm) 501 .setMIFlag(Flag); 502 break; 503 } 504 case AArch64::LDRXpost: 505 Imm = -Imm; 506 LLVM_FALLTHROUGH; 507 case AArch64::STRXpre: { 508 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 509 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg_X)) 510 .addImm(Reg) 511 .addImm(Imm) 512 .setMIFlag(Flag); 513 break; 514 } 515 case AArch64::STPDi: 516 case AArch64::LDPDi: { 517 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 518 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 519 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP)) 520 .addImm(Reg0) 521 .addImm(Reg1) 522 .addImm(Imm * 8) 523 .setMIFlag(Flag); 524 break; 525 } 526 case AArch64::STPXi: 527 case AArch64::LDPXi: { 528 unsigned Reg0 = MBBI->getOperand(0).getReg(); 529 unsigned Reg1 = MBBI->getOperand(1).getReg(); 530 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 531 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR)) 532 .addImm(Imm * 8) 533 .setMIFlag(Flag); 534 else 535 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP)) 536 .addImm(RegInfo->getSEHRegNum(Reg0)) 537 .addImm(RegInfo->getSEHRegNum(Reg1)) 538 .addImm(Imm * 8) 539 .setMIFlag(Flag); 540 break; 541 } 542 case AArch64::STRXui: 543 case AArch64::LDRXui: { 544 int Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 545 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg)) 546 .addImm(Reg) 547 .addImm(Imm * 8) 548 .setMIFlag(Flag); 549 break; 550 } 551 case AArch64::STRDui: 552 case AArch64::LDRDui: { 553 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 554 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg)) 555 .addImm(Reg) 556 .addImm(Imm * 8) 557 .setMIFlag(Flag); 558 break; 559 } 560 } 561 auto I = MBB->insertAfter(MBBI, MIB); 562 return I; 563 } 564 565 // Fix up the SEH opcode associated with the save/restore instruction. 566 static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, 567 unsigned LocalStackSize) { 568 MachineOperand *ImmOpnd = nullptr; 569 unsigned ImmIdx = MBBI->getNumOperands() - 1; 570 switch (MBBI->getOpcode()) { 571 default: 572 llvm_unreachable("Fix the offset in the SEH instruction"); 573 case AArch64::SEH_SaveFPLR: 574 case AArch64::SEH_SaveRegP: 575 case AArch64::SEH_SaveReg: 576 case AArch64::SEH_SaveFRegP: 577 case AArch64::SEH_SaveFReg: 578 ImmOpnd = &MBBI->getOperand(ImmIdx); 579 break; 580 } 581 if (ImmOpnd) 582 ImmOpnd->setImm(ImmOpnd->getImm() + LocalStackSize); 583 } 584 585 // Convert callee-save register save/restore instruction to do stack pointer 586 // decrement/increment to allocate/deallocate the callee-save stack area by 587 // converting store/load to use pre/post increment version. 588 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec( 589 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 590 const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, 591 bool NeedsWinCFI, bool InProlog = true) { 592 // Ignore instructions that do not operate on SP, i.e. shadow call stack 593 // instructions. 594 while (MBBI->getOpcode() == AArch64::STRXpost || 595 MBBI->getOpcode() == AArch64::LDRXpre) { 596 assert(MBBI->getOperand(0).getReg() != AArch64::SP); 597 ++MBBI; 598 } 599 unsigned NewOpc; 600 int Scale = 1; 601 switch (MBBI->getOpcode()) { 602 default: 603 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 604 case AArch64::STPXi: 605 NewOpc = AArch64::STPXpre; 606 Scale = 8; 607 break; 608 case AArch64::STPDi: 609 NewOpc = AArch64::STPDpre; 610 Scale = 8; 611 break; 612 case AArch64::STPQi: 613 NewOpc = AArch64::STPQpre; 614 Scale = 16; 615 break; 616 case AArch64::STRXui: 617 NewOpc = AArch64::STRXpre; 618 break; 619 case AArch64::STRDui: 620 NewOpc = AArch64::STRDpre; 621 break; 622 case AArch64::STRQui: 623 NewOpc = AArch64::STRQpre; 624 break; 625 case AArch64::LDPXi: 626 NewOpc = AArch64::LDPXpost; 627 Scale = 8; 628 break; 629 case AArch64::LDPDi: 630 NewOpc = AArch64::LDPDpost; 631 Scale = 8; 632 break; 633 case AArch64::LDPQi: 634 NewOpc = AArch64::LDPQpost; 635 Scale = 16; 636 break; 637 case AArch64::LDRXui: 638 NewOpc = AArch64::LDRXpost; 639 break; 640 case AArch64::LDRDui: 641 NewOpc = AArch64::LDRDpost; 642 break; 643 case AArch64::LDRQui: 644 NewOpc = AArch64::LDRQpost; 645 break; 646 } 647 // Get rid of the SEH code associated with the old instruction. 648 if (NeedsWinCFI) { 649 auto SEH = std::next(MBBI); 650 if (AArch64InstrInfo::isSEHInstruction(*SEH)) 651 SEH->eraseFromParent(); 652 } 653 654 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)); 655 MIB.addReg(AArch64::SP, RegState::Define); 656 657 // Copy all operands other than the immediate offset. 658 unsigned OpndIdx = 0; 659 for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd; 660 ++OpndIdx) 661 MIB.add(MBBI->getOperand(OpndIdx)); 662 663 assert(MBBI->getOperand(OpndIdx).getImm() == 0 && 664 "Unexpected immediate offset in first/last callee-save save/restore " 665 "instruction!"); 666 assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP && 667 "Unexpected base register in callee-save save/restore instruction!"); 668 assert(CSStackSizeInc % Scale == 0); 669 MIB.addImm(CSStackSizeInc / Scale); 670 671 MIB.setMIFlags(MBBI->getFlags()); 672 MIB.setMemRefs(MBBI->memoperands()); 673 674 // Generate a new SEH code that corresponds to the new instruction. 675 if (NeedsWinCFI) 676 InsertSEH(*MIB, *TII, 677 InProlog ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy); 678 679 return std::prev(MBB.erase(MBBI)); 680 } 681 682 // Fixup callee-save register save/restore instructions to take into account 683 // combined SP bump by adding the local stack size to the stack offsets. 684 static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, 685 unsigned LocalStackSize, 686 bool NeedsWinCFI) { 687 if (AArch64InstrInfo::isSEHInstruction(MI)) 688 return; 689 690 unsigned Opc = MI.getOpcode(); 691 692 // Ignore instructions that do not operate on SP, i.e. shadow call stack 693 // instructions. 694 if (Opc == AArch64::STRXpost || Opc == AArch64::LDRXpre) { 695 assert(MI.getOperand(0).getReg() != AArch64::SP); 696 return; 697 } 698 699 unsigned Scale; 700 switch (Opc) { 701 case AArch64::STPXi: 702 case AArch64::STRXui: 703 case AArch64::STPDi: 704 case AArch64::STRDui: 705 case AArch64::LDPXi: 706 case AArch64::LDRXui: 707 case AArch64::LDPDi: 708 case AArch64::LDRDui: 709 Scale = 8; 710 break; 711 case AArch64::STPQi: 712 case AArch64::STRQui: 713 case AArch64::LDPQi: 714 case AArch64::LDRQui: 715 Scale = 16; 716 break; 717 default: 718 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 719 } 720 721 unsigned OffsetIdx = MI.getNumExplicitOperands() - 1; 722 assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP && 723 "Unexpected base register in callee-save save/restore instruction!"); 724 // Last operand is immediate offset that needs fixing. 725 MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx); 726 // All generated opcodes have scaled offsets. 727 assert(LocalStackSize % Scale == 0); 728 OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / Scale); 729 730 if (NeedsWinCFI) { 731 auto MBBI = std::next(MachineBasicBlock::iterator(MI)); 732 assert(MBBI != MI.getParent()->end() && "Expecting a valid instruction"); 733 assert(AArch64InstrInfo::isSEHInstruction(*MBBI) && 734 "Expecting a SEH instruction"); 735 fixupSEHOpcode(MBBI, LocalStackSize); 736 } 737 } 738 739 static void adaptForLdStOpt(MachineBasicBlock &MBB, 740 MachineBasicBlock::iterator FirstSPPopI, 741 MachineBasicBlock::iterator LastPopI) { 742 // Sometimes (when we restore in the same order as we save), we can end up 743 // with code like this: 744 // 745 // ldp x26, x25, [sp] 746 // ldp x24, x23, [sp, #16] 747 // ldp x22, x21, [sp, #32] 748 // ldp x20, x19, [sp, #48] 749 // add sp, sp, #64 750 // 751 // In this case, it is always better to put the first ldp at the end, so 752 // that the load-store optimizer can run and merge the ldp and the add into 753 // a post-index ldp. 754 // If we managed to grab the first pop instruction, move it to the end. 755 if (ReverseCSRRestoreSeq) 756 MBB.splice(FirstSPPopI, &MBB, LastPopI); 757 // We should end up with something like this now: 758 // 759 // ldp x24, x23, [sp, #16] 760 // ldp x22, x21, [sp, #32] 761 // ldp x20, x19, [sp, #48] 762 // ldp x26, x25, [sp] 763 // add sp, sp, #64 764 // 765 // and the load-store optimizer can merge the last two instructions into: 766 // 767 // ldp x26, x25, [sp], #64 768 // 769 } 770 771 static bool ShouldSignWithAKey(MachineFunction &MF) { 772 const Function &F = MF.getFunction(); 773 if (!F.hasFnAttribute("sign-return-address-key")) 774 return true; 775 776 const StringRef Key = 777 F.getFnAttribute("sign-return-address-key").getValueAsString(); 778 assert(Key.equals_lower("a_key") || Key.equals_lower("b_key")); 779 return Key.equals_lower("a_key"); 780 } 781 782 static bool needsWinCFI(const MachineFunction &MF) { 783 const Function &F = MF.getFunction(); 784 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI() && 785 F.needsUnwindTableEntry(); 786 } 787 788 void AArch64FrameLowering::emitPrologue(MachineFunction &MF, 789 MachineBasicBlock &MBB) const { 790 MachineBasicBlock::iterator MBBI = MBB.begin(); 791 const MachineFrameInfo &MFI = MF.getFrameInfo(); 792 const Function &F = MF.getFunction(); 793 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 794 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 795 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 796 MachineModuleInfo &MMI = MF.getMMI(); 797 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 798 bool needsFrameMoves = (MMI.hasDebugInfo() || F.needsUnwindTableEntry()) && 799 !MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 800 bool HasFP = hasFP(MF); 801 bool NeedsWinCFI = needsWinCFI(MF); 802 MF.setHasWinCFI(NeedsWinCFI); 803 bool IsFunclet = MBB.isEHFuncletEntry(); 804 805 // At this point, we're going to decide whether or not the function uses a 806 // redzone. In most cases, the function doesn't have a redzone so let's 807 // assume that's false and set it to true in the case that there's a redzone. 808 AFI->setHasRedZone(false); 809 810 // Debug location must be unknown since the first debug location is used 811 // to determine the end of the prologue. 812 DebugLoc DL; 813 814 if (ShouldSignReturnAddress(MF)) { 815 BuildMI( 816 MBB, MBBI, DL, 817 TII->get(ShouldSignWithAKey(MF) ? AArch64::PACIASP : AArch64::PACIBSP)) 818 .setMIFlag(MachineInstr::FrameSetup); 819 } 820 821 // All calls are tail calls in GHC calling conv, and functions have no 822 // prologue/epilogue. 823 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 824 return; 825 826 // getStackSize() includes all the locals in its size calculation. We don't 827 // include these locals when computing the stack size of a funclet, as they 828 // are allocated in the parent's stack frame and accessed via the frame 829 // pointer from the funclet. We only save the callee saved registers in the 830 // funclet, which are really the callee saved registers of the parent 831 // function, including the funclet. 832 int NumBytes = IsFunclet ? (int)getWinEHFuncletFrameSize(MF) 833 : (int)MFI.getStackSize(); 834 if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) { 835 assert(!HasFP && "unexpected function without stack frame but with FP"); 836 // All of the stack allocation is for locals. 837 AFI->setLocalStackSize(NumBytes); 838 if (!NumBytes) 839 return; 840 // REDZONE: If the stack size is less than 128 bytes, we don't need 841 // to actually allocate. 842 if (canUseRedZone(MF)) { 843 AFI->setHasRedZone(true); 844 ++NumRedZoneFunctions; 845 } else { 846 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII, 847 MachineInstr::FrameSetup, false, NeedsWinCFI); 848 if (!NeedsWinCFI) { 849 // Label used to tie together the PROLOG_LABEL and the MachineMoves. 850 MCSymbol *FrameLabel = MMI.getContext().createTempSymbol(); 851 // Encode the stack size of the leaf function. 852 unsigned CFIIndex = MF.addFrameInst( 853 MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes)); 854 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 855 .addCFIIndex(CFIIndex) 856 .setMIFlags(MachineInstr::FrameSetup); 857 } 858 } 859 860 if (NeedsWinCFI) 861 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 862 .setMIFlag(MachineInstr::FrameSetup); 863 864 return; 865 } 866 867 bool IsWin64 = 868 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 869 // Var args are accounted for in the containing function, so don't 870 // include them for funclets. 871 unsigned FixedObject = (IsWin64 && !IsFunclet) ? 872 alignTo(AFI->getVarArgsGPRSize(), 16) : 0; 873 874 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 875 // All of the remaining stack allocations are for locals. 876 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 877 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); 878 if (CombineSPBump) { 879 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII, 880 MachineInstr::FrameSetup, false, NeedsWinCFI); 881 NumBytes = 0; 882 } else if (PrologueSaveSize != 0) { 883 MBBI = convertCalleeSaveRestoreToSPPrePostIncDec( 884 MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI); 885 NumBytes -= PrologueSaveSize; 886 } 887 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 888 889 // Move past the saves of the callee-saved registers, fixing up the offsets 890 // and pre-inc if we decided to combine the callee-save and local stack 891 // pointer bump above. 892 MachineBasicBlock::iterator End = MBB.end(); 893 while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup)) { 894 if (CombineSPBump) 895 fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize(), 896 NeedsWinCFI); 897 ++MBBI; 898 } 899 900 // The code below is not applicable to funclets. We have emitted all the SEH 901 // opcodes that we needed to emit. The FP and BP belong to the containing 902 // function. 903 if (IsFunclet) { 904 if (NeedsWinCFI) 905 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 906 .setMIFlag(MachineInstr::FrameSetup); 907 return; 908 } 909 910 if (HasFP) { 911 // Only set up FP if we actually need to. Frame pointer is fp = 912 // sp - fixedobject - 16. 913 int FPOffset = AFI->getCalleeSavedStackSize() - 16; 914 if (CombineSPBump) 915 FPOffset += AFI->getLocalStackSize(); 916 917 // Issue sub fp, sp, FPOffset or 918 // mov fp,sp when FPOffset is zero. 919 // Note: All stores of callee-saved registers are marked as "FrameSetup". 920 // This code marks the instruction(s) that set the FP also. 921 emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII, 922 MachineInstr::FrameSetup, false, NeedsWinCFI); 923 } 924 925 if (windowsRequiresStackProbe(MF, NumBytes)) { 926 uint32_t NumWords = NumBytes >> 4; 927 if (NeedsWinCFI) { 928 // alloc_l can hold at most 256MB, so assume that NumBytes doesn't 929 // exceed this amount. We need to move at most 2^24 - 1 into x15. 930 // This is at most two instructions, MOVZ follwed by MOVK. 931 // TODO: Fix to use multiple stack alloc unwind codes for stacks 932 // exceeding 256MB in size. 933 if (NumBytes >= (1 << 28)) 934 report_fatal_error("Stack size cannot exceed 256MB for stack " 935 "unwinding purposes"); 936 937 uint32_t LowNumWords = NumWords & 0xFFFF; 938 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X15) 939 .addImm(LowNumWords) 940 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) 941 .setMIFlag(MachineInstr::FrameSetup); 942 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 943 .setMIFlag(MachineInstr::FrameSetup); 944 if ((NumWords & 0xFFFF0000) != 0) { 945 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X15) 946 .addReg(AArch64::X15) 947 .addImm((NumWords & 0xFFFF0000) >> 16) // High half 948 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 16)) 949 .setMIFlag(MachineInstr::FrameSetup); 950 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 951 .setMIFlag(MachineInstr::FrameSetup); 952 } 953 } else { 954 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), AArch64::X15) 955 .addImm(NumWords) 956 .setMIFlags(MachineInstr::FrameSetup); 957 } 958 959 switch (MF.getTarget().getCodeModel()) { 960 case CodeModel::Tiny: 961 case CodeModel::Small: 962 case CodeModel::Medium: 963 case CodeModel::Kernel: 964 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL)) 965 .addExternalSymbol("__chkstk") 966 .addReg(AArch64::X15, RegState::Implicit) 967 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 968 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 969 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 970 .setMIFlags(MachineInstr::FrameSetup); 971 if (NeedsWinCFI) 972 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 973 .setMIFlag(MachineInstr::FrameSetup); 974 break; 975 case CodeModel::Large: 976 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVaddrEXT)) 977 .addReg(AArch64::X16, RegState::Define) 978 .addExternalSymbol("__chkstk") 979 .addExternalSymbol("__chkstk") 980 .setMIFlags(MachineInstr::FrameSetup); 981 if (NeedsWinCFI) 982 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 983 .setMIFlag(MachineInstr::FrameSetup); 984 985 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BLR)) 986 .addReg(AArch64::X16, RegState::Kill) 987 .addReg(AArch64::X15, RegState::Implicit | RegState::Define) 988 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 989 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 990 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 991 .setMIFlags(MachineInstr::FrameSetup); 992 if (NeedsWinCFI) 993 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 994 .setMIFlag(MachineInstr::FrameSetup); 995 break; 996 } 997 998 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP) 999 .addReg(AArch64::SP, RegState::Kill) 1000 .addReg(AArch64::X15, RegState::Kill) 1001 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 4)) 1002 .setMIFlags(MachineInstr::FrameSetup); 1003 if (NeedsWinCFI) 1004 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)) 1005 .addImm(NumBytes) 1006 .setMIFlag(MachineInstr::FrameSetup); 1007 NumBytes = 0; 1008 } 1009 1010 // Allocate space for the rest of the frame. 1011 if (NumBytes) { 1012 const bool NeedsRealignment = RegInfo->needsStackRealignment(MF); 1013 unsigned scratchSPReg = AArch64::SP; 1014 1015 if (NeedsRealignment) { 1016 scratchSPReg = findScratchNonCalleeSaveRegister(&MBB); 1017 assert(scratchSPReg != AArch64::NoRegister); 1018 } 1019 1020 // If we're a leaf function, try using the red zone. 1021 if (!canUseRedZone(MF)) 1022 // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have 1023 // the correct value here, as NumBytes also includes padding bytes, 1024 // which shouldn't be counted here. 1025 emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII, 1026 MachineInstr::FrameSetup, false, NeedsWinCFI); 1027 1028 if (NeedsRealignment) { 1029 const unsigned Alignment = MFI.getMaxAlignment(); 1030 const unsigned NrBitsToZero = countTrailingZeros(Alignment); 1031 assert(NrBitsToZero > 1); 1032 assert(scratchSPReg != AArch64::SP); 1033 1034 // SUB X9, SP, NumBytes 1035 // -- X9 is temporary register, so shouldn't contain any live data here, 1036 // -- free to use. This is already produced by emitFrameOffset above. 1037 // AND SP, X9, 0b11111...0000 1038 // The logical immediates have a non-trivial encoding. The following 1039 // formula computes the encoded immediate with all ones but 1040 // NrBitsToZero zero bits as least significant bits. 1041 uint32_t andMaskEncoded = (1 << 12) // = N 1042 | ((64 - NrBitsToZero) << 6) // immr 1043 | ((64 - NrBitsToZero - 1) << 0); // imms 1044 1045 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP) 1046 .addReg(scratchSPReg, RegState::Kill) 1047 .addImm(andMaskEncoded); 1048 AFI->setStackRealigned(true); 1049 if (NeedsWinCFI) 1050 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)) 1051 .addImm(NumBytes & andMaskEncoded) 1052 .setMIFlag(MachineInstr::FrameSetup); 1053 } 1054 } 1055 1056 // If we need a base pointer, set it up here. It's whatever the value of the 1057 // stack pointer is at this point. Any variable size objects will be allocated 1058 // after this, so we can still use the base pointer to reference locals. 1059 // 1060 // FIXME: Clarify FrameSetup flags here. 1061 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is 1062 // needed. 1063 if (RegInfo->hasBasePointer(MF)) { 1064 TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP, 1065 false); 1066 if (NeedsWinCFI) 1067 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1068 .setMIFlag(MachineInstr::FrameSetup); 1069 } 1070 1071 // The very last FrameSetup instruction indicates the end of prologue. Emit a 1072 // SEH opcode indicating the prologue end. 1073 if (NeedsWinCFI) 1074 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 1075 .setMIFlag(MachineInstr::FrameSetup); 1076 1077 if (needsFrameMoves) { 1078 const DataLayout &TD = MF.getDataLayout(); 1079 const int StackGrowth = -TD.getPointerSize(0); 1080 unsigned FramePtr = RegInfo->getFrameRegister(MF); 1081 // An example of the prologue: 1082 // 1083 // .globl __foo 1084 // .align 2 1085 // __foo: 1086 // Ltmp0: 1087 // .cfi_startproc 1088 // .cfi_personality 155, ___gxx_personality_v0 1089 // Leh_func_begin: 1090 // .cfi_lsda 16, Lexception33 1091 // 1092 // stp xa,bx, [sp, -#offset]! 1093 // ... 1094 // stp x28, x27, [sp, #offset-32] 1095 // stp fp, lr, [sp, #offset-16] 1096 // add fp, sp, #offset - 16 1097 // sub sp, sp, #1360 1098 // 1099 // The Stack: 1100 // +-------------------------------------------+ 1101 // 10000 | ........ | ........ | ........ | ........ | 1102 // 10004 | ........ | ........ | ........ | ........ | 1103 // +-------------------------------------------+ 1104 // 10008 | ........ | ........ | ........ | ........ | 1105 // 1000c | ........ | ........ | ........ | ........ | 1106 // +===========================================+ 1107 // 10010 | X28 Register | 1108 // 10014 | X28 Register | 1109 // +-------------------------------------------+ 1110 // 10018 | X27 Register | 1111 // 1001c | X27 Register | 1112 // +===========================================+ 1113 // 10020 | Frame Pointer | 1114 // 10024 | Frame Pointer | 1115 // +-------------------------------------------+ 1116 // 10028 | Link Register | 1117 // 1002c | Link Register | 1118 // +===========================================+ 1119 // 10030 | ........ | ........ | ........ | ........ | 1120 // 10034 | ........ | ........ | ........ | ........ | 1121 // +-------------------------------------------+ 1122 // 10038 | ........ | ........ | ........ | ........ | 1123 // 1003c | ........ | ........ | ........ | ........ | 1124 // +-------------------------------------------+ 1125 // 1126 // [sp] = 10030 :: >>initial value<< 1127 // sp = 10020 :: stp fp, lr, [sp, #-16]! 1128 // fp = sp == 10020 :: mov fp, sp 1129 // [sp] == 10020 :: stp x28, x27, [sp, #-16]! 1130 // sp == 10010 :: >>final value<< 1131 // 1132 // The frame pointer (w29) points to address 10020. If we use an offset of 1133 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24 1134 // for w27, and -32 for w28: 1135 // 1136 // Ltmp1: 1137 // .cfi_def_cfa w29, 16 1138 // Ltmp2: 1139 // .cfi_offset w30, -8 1140 // Ltmp3: 1141 // .cfi_offset w29, -16 1142 // Ltmp4: 1143 // .cfi_offset w27, -24 1144 // Ltmp5: 1145 // .cfi_offset w28, -32 1146 1147 if (HasFP) { 1148 // Define the current CFA rule to use the provided FP. 1149 unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true); 1150 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( 1151 nullptr, Reg, 2 * StackGrowth - FixedObject)); 1152 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1153 .addCFIIndex(CFIIndex) 1154 .setMIFlags(MachineInstr::FrameSetup); 1155 } else { 1156 // Encode the stack size of the leaf function. 1157 unsigned CFIIndex = MF.addFrameInst( 1158 MCCFIInstruction::createDefCfaOffset(nullptr, -MFI.getStackSize())); 1159 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1160 .addCFIIndex(CFIIndex) 1161 .setMIFlags(MachineInstr::FrameSetup); 1162 } 1163 1164 // Now emit the moves for whatever callee saved regs we have (including FP, 1165 // LR if those are saved). 1166 emitCalleeSavedFrameMoves(MBB, MBBI); 1167 } 1168 } 1169 1170 static void InsertReturnAddressAuth(MachineFunction &MF, 1171 MachineBasicBlock &MBB) { 1172 if (!ShouldSignReturnAddress(MF)) 1173 return; 1174 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1175 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1176 1177 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1178 DebugLoc DL; 1179 if (MBBI != MBB.end()) 1180 DL = MBBI->getDebugLoc(); 1181 1182 // The AUTIASP instruction assembles to a hint instruction before v8.3a so 1183 // this instruction can safely used for any v8a architecture. 1184 // From v8.3a onwards there are optimised authenticate LR and return 1185 // instructions, namely RETA{A,B}, that can be used instead. 1186 if (Subtarget.hasV8_3aOps() && MBBI != MBB.end() && 1187 MBBI->getOpcode() == AArch64::RET_ReallyLR) { 1188 BuildMI(MBB, MBBI, DL, 1189 TII->get(ShouldSignWithAKey(MF) ? AArch64::RETAA : AArch64::RETAB)) 1190 .copyImplicitOps(*MBBI); 1191 MBB.erase(MBBI); 1192 } else { 1193 BuildMI( 1194 MBB, MBBI, DL, 1195 TII->get(ShouldSignWithAKey(MF) ? AArch64::AUTIASP : AArch64::AUTIBSP)) 1196 .setMIFlag(MachineInstr::FrameDestroy); 1197 } 1198 } 1199 1200 static bool isFuncletReturnInstr(const MachineInstr &MI) { 1201 switch (MI.getOpcode()) { 1202 default: 1203 return false; 1204 case AArch64::CATCHRET: 1205 case AArch64::CLEANUPRET: 1206 return true; 1207 } 1208 } 1209 1210 void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, 1211 MachineBasicBlock &MBB) const { 1212 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 1213 MachineFrameInfo &MFI = MF.getFrameInfo(); 1214 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1215 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1216 DebugLoc DL; 1217 bool IsTailCallReturn = false; 1218 bool NeedsWinCFI = needsWinCFI(MF); 1219 bool IsFunclet = false; 1220 1221 if (MBB.end() != MBBI) { 1222 DL = MBBI->getDebugLoc(); 1223 unsigned RetOpcode = MBBI->getOpcode(); 1224 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi || 1225 RetOpcode == AArch64::TCRETURNri || 1226 RetOpcode == AArch64::TCRETURNriBTI; 1227 IsFunclet = isFuncletReturnInstr(*MBBI); 1228 } 1229 1230 int NumBytes = IsFunclet ? (int)getWinEHFuncletFrameSize(MF) 1231 : MFI.getStackSize(); 1232 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1233 1234 // All calls are tail calls in GHC calling conv, and functions have no 1235 // prologue/epilogue. 1236 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 1237 return; 1238 1239 // Initial and residual are named for consistency with the prologue. Note that 1240 // in the epilogue, the residual adjustment is executed first. 1241 uint64_t ArgumentPopSize = 0; 1242 if (IsTailCallReturn) { 1243 MachineOperand &StackAdjust = MBBI->getOperand(1); 1244 1245 // For a tail-call in a callee-pops-arguments environment, some or all of 1246 // the stack may actually be in use for the call's arguments, this is 1247 // calculated during LowerCall and consumed here... 1248 ArgumentPopSize = StackAdjust.getImm(); 1249 } else { 1250 // ... otherwise the amount to pop is *all* of the argument space, 1251 // conveniently stored in the MachineFunctionInfo by 1252 // LowerFormalArguments. This will, of course, be zero for the C calling 1253 // convention. 1254 ArgumentPopSize = AFI->getArgumentStackToRestore(); 1255 } 1256 1257 // The stack frame should be like below, 1258 // 1259 // ---------------------- --- 1260 // | | | 1261 // | BytesInStackArgArea| CalleeArgStackSize 1262 // | (NumReusableBytes) | (of tail call) 1263 // | | --- 1264 // | | | 1265 // ---------------------| --- | 1266 // | | | | 1267 // | CalleeSavedReg | | | 1268 // | (CalleeSavedStackSize)| | | 1269 // | | | | 1270 // ---------------------| | NumBytes 1271 // | | StackSize (StackAdjustUp) 1272 // | LocalStackSize | | | 1273 // | (covering callee | | | 1274 // | args) | | | 1275 // | | | | 1276 // ---------------------- --- --- 1277 // 1278 // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize 1279 // = StackSize + ArgumentPopSize 1280 // 1281 // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps 1282 // it as the 2nd argument of AArch64ISD::TC_RETURN. 1283 1284 auto Cleanup = make_scope_exit([&] { InsertReturnAddressAuth(MF, MBB); }); 1285 1286 bool IsWin64 = 1287 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 1288 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; 1289 1290 uint64_t AfterCSRPopSize = ArgumentPopSize; 1291 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 1292 // Var args are accounted for in the containting function, so don't 1293 // include them for funclets. 1294 if (MF.hasEHFunclets()) 1295 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 1296 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); 1297 // Assume we can't combine the last pop with the sp restore. 1298 1299 if (!CombineSPBump && PrologueSaveSize != 0) { 1300 MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator()); 1301 while (AArch64InstrInfo::isSEHInstruction(*Pop)) 1302 Pop = std::prev(Pop); 1303 // Converting the last ldp to a post-index ldp is valid only if the last 1304 // ldp's offset is 0. 1305 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1); 1306 // If the offset is 0, convert it to a post-index ldp. 1307 if (OffsetOp.getImm() == 0) 1308 convertCalleeSaveRestoreToSPPrePostIncDec( 1309 MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, false); 1310 else { 1311 // If not, make sure to emit an add after the last ldp. 1312 // We're doing this by transfering the size to be restored from the 1313 // adjustment *before* the CSR pops to the adjustment *after* the CSR 1314 // pops. 1315 AfterCSRPopSize += PrologueSaveSize; 1316 } 1317 } 1318 1319 // Move past the restores of the callee-saved registers. 1320 // If we plan on combining the sp bump of the local stack size and the callee 1321 // save stack size, we might need to adjust the CSR save and restore offsets. 1322 MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator(); 1323 MachineBasicBlock::iterator Begin = MBB.begin(); 1324 while (LastPopI != Begin) { 1325 --LastPopI; 1326 if (!LastPopI->getFlag(MachineInstr::FrameDestroy)) { 1327 ++LastPopI; 1328 break; 1329 } else if (CombineSPBump) 1330 fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize(), 1331 NeedsWinCFI); 1332 } 1333 1334 if (NeedsWinCFI) 1335 BuildMI(MBB, LastPopI, DL, TII->get(AArch64::SEH_EpilogStart)) 1336 .setMIFlag(MachineInstr::FrameDestroy); 1337 1338 // If there is a single SP update, insert it before the ret and we're done. 1339 if (CombineSPBump) { 1340 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, 1341 NumBytes + AfterCSRPopSize, TII, MachineInstr::FrameDestroy, 1342 false, NeedsWinCFI); 1343 if (NeedsWinCFI) 1344 BuildMI(MBB, MBB.getFirstTerminator(), DL, 1345 TII->get(AArch64::SEH_EpilogEnd)) 1346 .setMIFlag(MachineInstr::FrameDestroy); 1347 return; 1348 } 1349 1350 NumBytes -= PrologueSaveSize; 1351 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 1352 1353 if (!hasFP(MF)) { 1354 bool RedZone = canUseRedZone(MF); 1355 // If this was a redzone leaf function, we don't need to restore the 1356 // stack pointer (but we may need to pop stack args for fastcc). 1357 if (RedZone && AfterCSRPopSize == 0) 1358 return; 1359 1360 bool NoCalleeSaveRestore = PrologueSaveSize == 0; 1361 int StackRestoreBytes = RedZone ? 0 : NumBytes; 1362 if (NoCalleeSaveRestore) 1363 StackRestoreBytes += AfterCSRPopSize; 1364 1365 // If we were able to combine the local stack pop with the argument pop, 1366 // then we're done. 1367 bool Done = NoCalleeSaveRestore || AfterCSRPopSize == 0; 1368 1369 // If we're done after this, make sure to help the load store optimizer. 1370 if (Done) 1371 adaptForLdStOpt(MBB, MBB.getFirstTerminator(), LastPopI); 1372 1373 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 1374 StackRestoreBytes, TII, MachineInstr::FrameDestroy, false, 1375 NeedsWinCFI); 1376 if (Done) { 1377 if (NeedsWinCFI) 1378 BuildMI(MBB, MBB.getFirstTerminator(), DL, 1379 TII->get(AArch64::SEH_EpilogEnd)) 1380 .setMIFlag(MachineInstr::FrameDestroy); 1381 return; 1382 } 1383 1384 NumBytes = 0; 1385 } 1386 1387 // Restore the original stack pointer. 1388 // FIXME: Rather than doing the math here, we should instead just use 1389 // non-post-indexed loads for the restores if we aren't actually going to 1390 // be able to save any instructions. 1391 if (!IsFunclet && (MFI.hasVarSizedObjects() || AFI->isStackRealigned())) 1392 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP, 1393 -AFI->getCalleeSavedStackSize() + 16, TII, 1394 MachineInstr::FrameDestroy, false, NeedsWinCFI); 1395 else if (NumBytes) 1396 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes, TII, 1397 MachineInstr::FrameDestroy, false, NeedsWinCFI); 1398 1399 // This must be placed after the callee-save restore code because that code 1400 // assumes the SP is at the same location as it was after the callee-save save 1401 // code in the prologue. 1402 if (AfterCSRPopSize) { 1403 // Find an insertion point for the first ldp so that it goes before the 1404 // shadow call stack epilog instruction. This ensures that the restore of 1405 // lr from x18 is placed after the restore from sp. 1406 auto FirstSPPopI = MBB.getFirstTerminator(); 1407 while (FirstSPPopI != Begin) { 1408 auto Prev = std::prev(FirstSPPopI); 1409 if (Prev->getOpcode() != AArch64::LDRXpre || 1410 Prev->getOperand(0).getReg() == AArch64::SP) 1411 break; 1412 FirstSPPopI = Prev; 1413 } 1414 1415 adaptForLdStOpt(MBB, FirstSPPopI, LastPopI); 1416 1417 emitFrameOffset(MBB, FirstSPPopI, DL, AArch64::SP, AArch64::SP, 1418 AfterCSRPopSize, TII, MachineInstr::FrameDestroy, false, 1419 NeedsWinCFI); 1420 } 1421 if (NeedsWinCFI) 1422 BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::SEH_EpilogEnd)) 1423 .setMIFlag(MachineInstr::FrameDestroy); 1424 } 1425 1426 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for 1427 /// debug info. It's the same as what we use for resolving the code-gen 1428 /// references for now. FIXME: This can go wrong when references are 1429 /// SP-relative and simple call frames aren't used. 1430 int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF, 1431 int FI, 1432 unsigned &FrameReg) const { 1433 return resolveFrameIndexReference(MF, FI, FrameReg); 1434 } 1435 1436 int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF, 1437 int FI, unsigned &FrameReg, 1438 bool PreferFP) const { 1439 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1440 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( 1441 MF.getSubtarget().getRegisterInfo()); 1442 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1443 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1444 bool IsWin64 = 1445 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 1446 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; 1447 int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16; 1448 int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize(); 1449 bool isFixed = MFI.isFixedObjectIndex(FI); 1450 bool isCSR = !isFixed && MFI.getObjectOffset(FI) >= 1451 -((int)AFI->getCalleeSavedStackSize()); 1452 1453 // Use frame pointer to reference fixed objects. Use it for locals if 1454 // there are VLAs or a dynamically realigned SP (and thus the SP isn't 1455 // reliable as a base). Make sure useFPForScavengingIndex() does the 1456 // right thing for the emergency spill slot. 1457 bool UseFP = false; 1458 if (AFI->hasStackFrame()) { 1459 // Note: Keeping the following as multiple 'if' statements rather than 1460 // merging to a single expression for readability. 1461 // 1462 // Argument access should always use the FP. 1463 if (isFixed) { 1464 UseFP = hasFP(MF); 1465 } else if (isCSR && RegInfo->needsStackRealignment(MF)) { 1466 // References to the CSR area must use FP if we're re-aligning the stack 1467 // since the dynamically-sized alignment padding is between the SP/BP and 1468 // the CSR area. 1469 assert(hasFP(MF) && "Re-aligned stack must have frame pointer"); 1470 UseFP = true; 1471 } else if (hasFP(MF) && !RegInfo->needsStackRealignment(MF)) { 1472 // If the FPOffset is negative, we have to keep in mind that the 1473 // available offset range for negative offsets is smaller than for 1474 // positive ones. If an offset is 1475 // available via the FP and the SP, use whichever is closest. 1476 bool FPOffsetFits = FPOffset >= -256; 1477 PreferFP |= Offset > -FPOffset; 1478 1479 if (MFI.hasVarSizedObjects()) { 1480 // If we have variable sized objects, we can use either FP or BP, as the 1481 // SP offset is unknown. We can use the base pointer if we have one and 1482 // FP is not preferred. If not, we're stuck with using FP. 1483 bool CanUseBP = RegInfo->hasBasePointer(MF); 1484 if (FPOffsetFits && CanUseBP) // Both are ok. Pick the best. 1485 UseFP = PreferFP; 1486 else if (!CanUseBP) // Can't use BP. Forced to use FP. 1487 UseFP = true; 1488 // else we can use BP and FP, but the offset from FP won't fit. 1489 // That will make us scavenge registers which we can probably avoid by 1490 // using BP. If it won't fit for BP either, we'll scavenge anyway. 1491 } else if (FPOffset >= 0) { 1492 // Use SP or FP, whichever gives us the best chance of the offset 1493 // being in range for direct access. If the FPOffset is positive, 1494 // that'll always be best, as the SP will be even further away. 1495 UseFP = true; 1496 } else if (MF.hasEHFunclets() && !RegInfo->hasBasePointer(MF)) { 1497 // Funclets access the locals contained in the parent's stack frame 1498 // via the frame pointer, so we have to use the FP in the parent 1499 // function. 1500 assert( 1501 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()) && 1502 "Funclets should only be present on Win64"); 1503 UseFP = true; 1504 } else { 1505 // We have the choice between FP and (SP or BP). 1506 if (FPOffsetFits && PreferFP) // If FP is the best fit, use it. 1507 UseFP = true; 1508 } 1509 } 1510 } 1511 1512 assert(((isFixed || isCSR) || !RegInfo->needsStackRealignment(MF) || !UseFP) && 1513 "In the presence of dynamic stack pointer realignment, " 1514 "non-argument/CSR objects cannot be accessed through the frame pointer"); 1515 1516 if (UseFP) { 1517 FrameReg = RegInfo->getFrameRegister(MF); 1518 return FPOffset; 1519 } 1520 1521 // Use the base pointer if we have one. 1522 if (RegInfo->hasBasePointer(MF)) 1523 FrameReg = RegInfo->getBaseRegister(); 1524 else { 1525 assert(!MFI.hasVarSizedObjects() && 1526 "Can't use SP when we have var sized objects."); 1527 FrameReg = AArch64::SP; 1528 // If we're using the red zone for this function, the SP won't actually 1529 // be adjusted, so the offsets will be negative. They're also all 1530 // within range of the signed 9-bit immediate instructions. 1531 if (canUseRedZone(MF)) 1532 Offset -= AFI->getLocalStackSize(); 1533 } 1534 1535 return Offset; 1536 } 1537 1538 static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { 1539 // Do not set a kill flag on values that are also marked as live-in. This 1540 // happens with the @llvm-returnaddress intrinsic and with arguments passed in 1541 // callee saved registers. 1542 // Omitting the kill flags is conservatively correct even if the live-in 1543 // is not used after all. 1544 bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg); 1545 return getKillRegState(!IsLiveIn); 1546 } 1547 1548 static bool produceCompactUnwindFrame(MachineFunction &MF) { 1549 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1550 AttributeList Attrs = MF.getFunction().getAttributes(); 1551 return Subtarget.isTargetMachO() && 1552 !(Subtarget.getTargetLowering()->supportSwiftError() && 1553 Attrs.hasAttrSomewhere(Attribute::SwiftError)); 1554 } 1555 1556 static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, 1557 bool NeedsWinCFI) { 1558 // If we are generating register pairs for a Windows function that requires 1559 // EH support, then pair consecutive registers only. There are no unwind 1560 // opcodes for saves/restores of non-consectuve register pairs. 1561 // The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x. 1562 // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling 1563 1564 // TODO: LR can be paired with any register. We don't support this yet in 1565 // the MCLayer. We need to add support for the save_lrpair unwind code. 1566 if (!NeedsWinCFI) 1567 return false; 1568 if (Reg2 == Reg1 + 1) 1569 return false; 1570 return true; 1571 } 1572 1573 namespace { 1574 1575 struct RegPairInfo { 1576 unsigned Reg1 = AArch64::NoRegister; 1577 unsigned Reg2 = AArch64::NoRegister; 1578 int FrameIdx; 1579 int Offset; 1580 enum RegType { GPR, FPR64, FPR128 } Type; 1581 1582 RegPairInfo() = default; 1583 1584 bool isPaired() const { return Reg2 != AArch64::NoRegister; } 1585 }; 1586 1587 } // end anonymous namespace 1588 1589 static void computeCalleeSaveRegisterPairs( 1590 MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI, 1591 const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs, 1592 bool &NeedShadowCallStackProlog) { 1593 1594 if (CSI.empty()) 1595 return; 1596 1597 bool NeedsWinCFI = needsWinCFI(MF); 1598 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1599 MachineFrameInfo &MFI = MF.getFrameInfo(); 1600 CallingConv::ID CC = MF.getFunction().getCallingConv(); 1601 unsigned Count = CSI.size(); 1602 (void)CC; 1603 // MachO's compact unwind format relies on all registers being stored in 1604 // pairs. 1605 assert((!produceCompactUnwindFrame(MF) || 1606 CC == CallingConv::PreserveMost || 1607 (Count & 1) == 0) && 1608 "Odd number of callee-saved regs to spill!"); 1609 int Offset = AFI->getCalleeSavedStackSize(); 1610 // On Linux, we will have either one or zero non-paired register. On Windows 1611 // with CFI, we can have multiple unpaired registers in order to utilize the 1612 // available unwind codes. This flag assures that the alignment fixup is done 1613 // only once, as intened. 1614 bool FixupDone = false; 1615 for (unsigned i = 0; i < Count; ++i) { 1616 RegPairInfo RPI; 1617 RPI.Reg1 = CSI[i].getReg(); 1618 1619 if (AArch64::GPR64RegClass.contains(RPI.Reg1)) 1620 RPI.Type = RegPairInfo::GPR; 1621 else if (AArch64::FPR64RegClass.contains(RPI.Reg1)) 1622 RPI.Type = RegPairInfo::FPR64; 1623 else if (AArch64::FPR128RegClass.contains(RPI.Reg1)) 1624 RPI.Type = RegPairInfo::FPR128; 1625 else 1626 llvm_unreachable("Unsupported register class."); 1627 1628 // Add the next reg to the pair if it is in the same register class. 1629 if (i + 1 < Count) { 1630 unsigned NextReg = CSI[i + 1].getReg(); 1631 switch (RPI.Type) { 1632 case RegPairInfo::GPR: 1633 if (AArch64::GPR64RegClass.contains(NextReg) && 1634 !invalidateWindowsRegisterPairing(RPI.Reg1, NextReg, NeedsWinCFI)) 1635 RPI.Reg2 = NextReg; 1636 break; 1637 case RegPairInfo::FPR64: 1638 if (AArch64::FPR64RegClass.contains(NextReg) && 1639 !invalidateWindowsRegisterPairing(RPI.Reg1, NextReg, NeedsWinCFI)) 1640 RPI.Reg2 = NextReg; 1641 break; 1642 case RegPairInfo::FPR128: 1643 if (AArch64::FPR128RegClass.contains(NextReg)) 1644 RPI.Reg2 = NextReg; 1645 break; 1646 } 1647 } 1648 1649 // If either of the registers to be saved is the lr register, it means that 1650 // we also need to save lr in the shadow call stack. 1651 if ((RPI.Reg1 == AArch64::LR || RPI.Reg2 == AArch64::LR) && 1652 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack)) { 1653 if (!MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(18)) 1654 report_fatal_error("Must reserve x18 to use shadow call stack"); 1655 NeedShadowCallStackProlog = true; 1656 } 1657 1658 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI 1659 // list to come in sorted by frame index so that we can issue the store 1660 // pair instructions directly. Assert if we see anything otherwise. 1661 // 1662 // The order of the registers in the list is controlled by 1663 // getCalleeSavedRegs(), so they will always be in-order, as well. 1664 assert((!RPI.isPaired() || 1665 (CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx())) && 1666 "Out of order callee saved regs!"); 1667 1668 // MachO's compact unwind format relies on all registers being stored in 1669 // adjacent register pairs. 1670 assert((!produceCompactUnwindFrame(MF) || 1671 CC == CallingConv::PreserveMost || 1672 (RPI.isPaired() && 1673 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) || 1674 RPI.Reg1 + 1 == RPI.Reg2))) && 1675 "Callee-save registers not saved as adjacent register pair!"); 1676 1677 RPI.FrameIdx = CSI[i].getFrameIdx(); 1678 1679 int Scale = RPI.Type == RegPairInfo::FPR128 ? 16 : 8; 1680 Offset -= RPI.isPaired() ? 2 * Scale : Scale; 1681 1682 // Round up size of non-pair to pair size if we need to pad the 1683 // callee-save area to ensure 16-byte alignment. 1684 if (AFI->hasCalleeSaveStackFreeSpace() && !FixupDone && 1685 RPI.Type != RegPairInfo::FPR128 && !RPI.isPaired()) { 1686 FixupDone = true; 1687 Offset -= 8; 1688 assert(Offset % 16 == 0); 1689 assert(MFI.getObjectAlignment(RPI.FrameIdx) <= 16); 1690 MFI.setObjectAlignment(RPI.FrameIdx, 16); 1691 } 1692 1693 assert(Offset % Scale == 0); 1694 RPI.Offset = Offset / Scale; 1695 assert((RPI.Offset >= -64 && RPI.Offset <= 63) && 1696 "Offset out of bounds for LDP/STP immediate"); 1697 1698 RegPairs.push_back(RPI); 1699 if (RPI.isPaired()) 1700 ++i; 1701 } 1702 } 1703 1704 bool AArch64FrameLowering::spillCalleeSavedRegisters( 1705 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 1706 const std::vector<CalleeSavedInfo> &CSI, 1707 const TargetRegisterInfo *TRI) const { 1708 MachineFunction &MF = *MBB.getParent(); 1709 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1710 bool NeedsWinCFI = needsWinCFI(MF); 1711 DebugLoc DL; 1712 SmallVector<RegPairInfo, 8> RegPairs; 1713 1714 bool NeedShadowCallStackProlog = false; 1715 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, 1716 NeedShadowCallStackProlog); 1717 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1718 1719 if (NeedShadowCallStackProlog) { 1720 // Shadow call stack prolog: str x30, [x18], #8 1721 BuildMI(MBB, MI, DL, TII.get(AArch64::STRXpost)) 1722 .addReg(AArch64::X18, RegState::Define) 1723 .addReg(AArch64::LR) 1724 .addReg(AArch64::X18) 1725 .addImm(8) 1726 .setMIFlag(MachineInstr::FrameSetup); 1727 1728 if (NeedsWinCFI) 1729 BuildMI(MBB, MI, DL, TII.get(AArch64::SEH_Nop)) 1730 .setMIFlag(MachineInstr::FrameSetup); 1731 1732 // This instruction also makes x18 live-in to the entry block. 1733 MBB.addLiveIn(AArch64::X18); 1734 } 1735 1736 for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE; 1737 ++RPII) { 1738 RegPairInfo RPI = *RPII; 1739 unsigned Reg1 = RPI.Reg1; 1740 unsigned Reg2 = RPI.Reg2; 1741 unsigned StrOpc; 1742 1743 // Issue sequence of spills for cs regs. The first spill may be converted 1744 // to a pre-decrement store later by emitPrologue if the callee-save stack 1745 // area allocation can't be combined with the local stack area allocation. 1746 // For example: 1747 // stp x22, x21, [sp, #0] // addImm(+0) 1748 // stp x20, x19, [sp, #16] // addImm(+2) 1749 // stp fp, lr, [sp, #32] // addImm(+4) 1750 // Rationale: This sequence saves uop updates compared to a sequence of 1751 // pre-increment spills like stp xi,xj,[sp,#-16]! 1752 // Note: Similar rationale and sequence for restores in epilog. 1753 unsigned Size, Align; 1754 switch (RPI.Type) { 1755 case RegPairInfo::GPR: 1756 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; 1757 Size = 8; 1758 Align = 8; 1759 break; 1760 case RegPairInfo::FPR64: 1761 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; 1762 Size = 8; 1763 Align = 8; 1764 break; 1765 case RegPairInfo::FPR128: 1766 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui; 1767 Size = 16; 1768 Align = 16; 1769 break; 1770 } 1771 LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI); 1772 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 1773 dbgs() << ") -> fi#(" << RPI.FrameIdx; 1774 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 1775 dbgs() << ")\n"); 1776 1777 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) && 1778 "Windows unwdinding requires a consecutive (FP,LR) pair"); 1779 // Windows unwind codes require consecutive registers if registers are 1780 // paired. Make the switch here, so that the code below will save (x,x+1) 1781 // and not (x+1,x). 1782 unsigned FrameIdxReg1 = RPI.FrameIdx; 1783 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 1784 if (NeedsWinCFI && RPI.isPaired()) { 1785 std::swap(Reg1, Reg2); 1786 std::swap(FrameIdxReg1, FrameIdxReg2); 1787 } 1788 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); 1789 if (!MRI.isReserved(Reg1)) 1790 MBB.addLiveIn(Reg1); 1791 if (RPI.isPaired()) { 1792 if (!MRI.isReserved(Reg2)) 1793 MBB.addLiveIn(Reg2); 1794 MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)); 1795 MIB.addMemOperand(MF.getMachineMemOperand( 1796 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 1797 MachineMemOperand::MOStore, Size, Align)); 1798 } 1799 MIB.addReg(Reg1, getPrologueDeath(MF, Reg1)) 1800 .addReg(AArch64::SP) 1801 .addImm(RPI.Offset) // [sp, #offset*scale], 1802 // where factor*scale is implicit 1803 .setMIFlag(MachineInstr::FrameSetup); 1804 MIB.addMemOperand(MF.getMachineMemOperand( 1805 MachinePointerInfo::getFixedStack(MF,FrameIdxReg1), 1806 MachineMemOperand::MOStore, Size, Align)); 1807 if (NeedsWinCFI) 1808 InsertSEH(MIB, TII, MachineInstr::FrameSetup); 1809 1810 } 1811 return true; 1812 } 1813 1814 bool AArch64FrameLowering::restoreCalleeSavedRegisters( 1815 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 1816 std::vector<CalleeSavedInfo> &CSI, 1817 const TargetRegisterInfo *TRI) const { 1818 MachineFunction &MF = *MBB.getParent(); 1819 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1820 DebugLoc DL; 1821 SmallVector<RegPairInfo, 8> RegPairs; 1822 bool NeedsWinCFI = needsWinCFI(MF); 1823 1824 if (MI != MBB.end()) 1825 DL = MI->getDebugLoc(); 1826 1827 bool NeedShadowCallStackProlog = false; 1828 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, 1829 NeedShadowCallStackProlog); 1830 1831 auto EmitMI = [&](const RegPairInfo &RPI) { 1832 unsigned Reg1 = RPI.Reg1; 1833 unsigned Reg2 = RPI.Reg2; 1834 1835 // Issue sequence of restores for cs regs. The last restore may be converted 1836 // to a post-increment load later by emitEpilogue if the callee-save stack 1837 // area allocation can't be combined with the local stack area allocation. 1838 // For example: 1839 // ldp fp, lr, [sp, #32] // addImm(+4) 1840 // ldp x20, x19, [sp, #16] // addImm(+2) 1841 // ldp x22, x21, [sp, #0] // addImm(+0) 1842 // Note: see comment in spillCalleeSavedRegisters() 1843 unsigned LdrOpc; 1844 unsigned Size, Align; 1845 switch (RPI.Type) { 1846 case RegPairInfo::GPR: 1847 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; 1848 Size = 8; 1849 Align = 8; 1850 break; 1851 case RegPairInfo::FPR64: 1852 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; 1853 Size = 8; 1854 Align = 8; 1855 break; 1856 case RegPairInfo::FPR128: 1857 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui; 1858 Size = 16; 1859 Align = 16; 1860 break; 1861 } 1862 LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI); 1863 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 1864 dbgs() << ") -> fi#(" << RPI.FrameIdx; 1865 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 1866 dbgs() << ")\n"); 1867 1868 // Windows unwind codes require consecutive registers if registers are 1869 // paired. Make the switch here, so that the code below will save (x,x+1) 1870 // and not (x+1,x). 1871 unsigned FrameIdxReg1 = RPI.FrameIdx; 1872 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 1873 if (NeedsWinCFI && RPI.isPaired()) { 1874 std::swap(Reg1, Reg2); 1875 std::swap(FrameIdxReg1, FrameIdxReg2); 1876 } 1877 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc)); 1878 if (RPI.isPaired()) { 1879 MIB.addReg(Reg2, getDefRegState(true)); 1880 MIB.addMemOperand(MF.getMachineMemOperand( 1881 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 1882 MachineMemOperand::MOLoad, Size, Align)); 1883 } 1884 MIB.addReg(Reg1, getDefRegState(true)) 1885 .addReg(AArch64::SP) 1886 .addImm(RPI.Offset) // [sp, #offset*scale] 1887 // where factor*scale is implicit 1888 .setMIFlag(MachineInstr::FrameDestroy); 1889 MIB.addMemOperand(MF.getMachineMemOperand( 1890 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 1891 MachineMemOperand::MOLoad, Size, Align)); 1892 if (NeedsWinCFI) 1893 InsertSEH(MIB, TII, MachineInstr::FrameDestroy); 1894 }; 1895 if (ReverseCSRRestoreSeq) 1896 for (const RegPairInfo &RPI : reverse(RegPairs)) 1897 EmitMI(RPI); 1898 else 1899 for (const RegPairInfo &RPI : RegPairs) 1900 EmitMI(RPI); 1901 1902 if (NeedShadowCallStackProlog) { 1903 // Shadow call stack epilog: ldr x30, [x18, #-8]! 1904 BuildMI(MBB, MI, DL, TII.get(AArch64::LDRXpre)) 1905 .addReg(AArch64::X18, RegState::Define) 1906 .addReg(AArch64::LR, RegState::Define) 1907 .addReg(AArch64::X18) 1908 .addImm(-8) 1909 .setMIFlag(MachineInstr::FrameDestroy); 1910 } 1911 1912 return true; 1913 } 1914 1915 void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, 1916 BitVector &SavedRegs, 1917 RegScavenger *RS) const { 1918 // All calls are tail calls in GHC calling conv, and functions have no 1919 // prologue/epilogue. 1920 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 1921 return; 1922 1923 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1924 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( 1925 MF.getSubtarget().getRegisterInfo()); 1926 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1927 unsigned UnspilledCSGPR = AArch64::NoRegister; 1928 unsigned UnspilledCSGPRPaired = AArch64::NoRegister; 1929 1930 MachineFrameInfo &MFI = MF.getFrameInfo(); 1931 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); 1932 1933 unsigned BasePointerReg = RegInfo->hasBasePointer(MF) 1934 ? RegInfo->getBaseRegister() 1935 : (unsigned)AArch64::NoRegister; 1936 1937 unsigned ExtraCSSpill = 0; 1938 // Figure out which callee-saved registers to save/restore. 1939 for (unsigned i = 0; CSRegs[i]; ++i) { 1940 const unsigned Reg = CSRegs[i]; 1941 1942 // Add the base pointer register to SavedRegs if it is callee-save. 1943 if (Reg == BasePointerReg) 1944 SavedRegs.set(Reg); 1945 1946 bool RegUsed = SavedRegs.test(Reg); 1947 unsigned PairedReg = CSRegs[i ^ 1]; 1948 if (!RegUsed) { 1949 if (AArch64::GPR64RegClass.contains(Reg) && 1950 !RegInfo->isReservedReg(MF, Reg)) { 1951 UnspilledCSGPR = Reg; 1952 UnspilledCSGPRPaired = PairedReg; 1953 } 1954 continue; 1955 } 1956 1957 // MachO's compact unwind format relies on all registers being stored in 1958 // pairs. 1959 // FIXME: the usual format is actually better if unwinding isn't needed. 1960 if (produceCompactUnwindFrame(MF) && PairedReg != AArch64::NoRegister && 1961 !SavedRegs.test(PairedReg)) { 1962 SavedRegs.set(PairedReg); 1963 if (AArch64::GPR64RegClass.contains(PairedReg) && 1964 !RegInfo->isReservedReg(MF, PairedReg)) 1965 ExtraCSSpill = PairedReg; 1966 } 1967 } 1968 1969 // Calculates the callee saved stack size. 1970 unsigned CSStackSize = 0; 1971 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 1972 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1973 for (unsigned Reg : SavedRegs.set_bits()) 1974 CSStackSize += TRI->getRegSizeInBits(Reg, MRI) / 8; 1975 1976 // Save number of saved regs, so we can easily update CSStackSize later. 1977 unsigned NumSavedRegs = SavedRegs.count(); 1978 1979 // The frame record needs to be created by saving the appropriate registers 1980 unsigned EstimatedStackSize = MFI.estimateStackSize(MF); 1981 if (hasFP(MF) || 1982 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) { 1983 SavedRegs.set(AArch64::FP); 1984 SavedRegs.set(AArch64::LR); 1985 } 1986 1987 LLVM_DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:"; 1988 for (unsigned Reg 1989 : SavedRegs.set_bits()) dbgs() 1990 << ' ' << printReg(Reg, RegInfo); 1991 dbgs() << "\n";); 1992 1993 // If any callee-saved registers are used, the frame cannot be eliminated. 1994 bool CanEliminateFrame = SavedRegs.count() == 0; 1995 1996 // The CSR spill slots have not been allocated yet, so estimateStackSize 1997 // won't include them. 1998 unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF); 1999 bool BigStack = (EstimatedStackSize + CSStackSize) > EstimatedStackSizeLimit; 2000 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) 2001 AFI->setHasStackFrame(true); 2002 2003 // Estimate if we might need to scavenge a register at some point in order 2004 // to materialize a stack offset. If so, either spill one additional 2005 // callee-saved register or reserve a special spill slot to facilitate 2006 // register scavenging. If we already spilled an extra callee-saved register 2007 // above to keep the number of spills even, we don't need to do anything else 2008 // here. 2009 if (BigStack) { 2010 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) { 2011 LLVM_DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo) 2012 << " to get a scratch register.\n"); 2013 SavedRegs.set(UnspilledCSGPR); 2014 // MachO's compact unwind format relies on all registers being stored in 2015 // pairs, so if we need to spill one extra for BigStack, then we need to 2016 // store the pair. 2017 if (produceCompactUnwindFrame(MF)) 2018 SavedRegs.set(UnspilledCSGPRPaired); 2019 ExtraCSSpill = UnspilledCSGPRPaired; 2020 } 2021 2022 // If we didn't find an extra callee-saved register to spill, create 2023 // an emergency spill slot. 2024 if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) { 2025 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 2026 const TargetRegisterClass &RC = AArch64::GPR64RegClass; 2027 unsigned Size = TRI->getSpillSize(RC); 2028 unsigned Align = TRI->getSpillAlignment(RC); 2029 int FI = MFI.CreateStackObject(Size, Align, false); 2030 RS->addScavengingFrameIndex(FI); 2031 LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI 2032 << " as the emergency spill slot.\n"); 2033 } 2034 } 2035 2036 // Adding the size of additional 64bit GPR saves. 2037 CSStackSize += 8 * (SavedRegs.count() - NumSavedRegs); 2038 unsigned AlignedCSStackSize = alignTo(CSStackSize, 16); 2039 LLVM_DEBUG(dbgs() << "Estimated stack frame size: " 2040 << EstimatedStackSize + AlignedCSStackSize 2041 << " bytes.\n"); 2042 2043 // Round up to register pair alignment to avoid additional SP adjustment 2044 // instructions. 2045 AFI->setCalleeSavedStackSize(AlignedCSStackSize); 2046 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize); 2047 } 2048 2049 bool AArch64FrameLowering::enableStackSlotScavenging( 2050 const MachineFunction &MF) const { 2051 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2052 return AFI->hasCalleeSaveStackFreeSpace(); 2053 } 2054 2055 void AArch64FrameLowering::processFunctionBeforeFrameFinalized( 2056 MachineFunction &MF, RegScavenger *RS) const { 2057 // If this function isn't doing Win64-style C++ EH, we don't need to do 2058 // anything. 2059 if (!MF.hasEHFunclets()) 2060 return; 2061 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2062 MachineFrameInfo &MFI = MF.getFrameInfo(); 2063 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); 2064 2065 MachineBasicBlock &MBB = MF.front(); 2066 auto MBBI = MBB.begin(); 2067 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) 2068 ++MBBI; 2069 2070 if (MBBI->isTerminator()) 2071 return; 2072 2073 // Create an UnwindHelp object. 2074 int UnwindHelpFI = 2075 MFI.CreateStackObject(/*size*/8, /*alignment*/16, false); 2076 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; 2077 // We need to store -2 into the UnwindHelp object at the start of the 2078 // function. 2079 DebugLoc DL; 2080 RS->enterBasicBlock(MBB); 2081 unsigned DstReg = RS->scavengeRegister(&AArch64::GPR64RegClass, MBBI, 0); 2082 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), DstReg).addImm(-2); 2083 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STURXi)) 2084 .addReg(DstReg, getKillRegState(true)) 2085 .addFrameIndex(UnwindHelpFI) 2086 .addImm(0); 2087 } 2088 2089 /// For Win64 AArch64 EH, the offset to the Unwind object is from the SP before 2090 /// the update. This is easily retrieved as it is exactly the offset that is set 2091 /// in processFunctionBeforeFrameFinalized. 2092 int AArch64FrameLowering::getFrameIndexReferencePreferSP( 2093 const MachineFunction &MF, int FI, unsigned &FrameReg, 2094 bool IgnoreSPUpdates) const { 2095 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2096 LLVM_DEBUG(dbgs() << "Offset from the SP for " << FI << " is " 2097 << MFI.getObjectOffset(FI) << "\n"); 2098 FrameReg = AArch64::SP; 2099 return MFI.getObjectOffset(FI); 2100 } 2101 2102 /// The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve 2103 /// the parent's frame pointer 2104 unsigned AArch64FrameLowering::getWinEHParentFrameOffset( 2105 const MachineFunction &MF) const { 2106 return 0; 2107 } 2108 2109 /// Funclets only need to account for space for the callee saved registers, 2110 /// as the locals are accounted for in the parent's stack frame. 2111 unsigned AArch64FrameLowering::getWinEHFuncletFrameSize( 2112 const MachineFunction &MF) const { 2113 // This is the size of the pushed CSRs. 2114 unsigned CSSize = 2115 MF.getInfo<AArch64FunctionInfo>()->getCalleeSavedStackSize(); 2116 // This is the amount of stack a funclet needs to allocate. 2117 return alignTo(CSSize + MF.getFrameInfo().getMaxCallFrameSize(), 2118 getStackAlignment()); 2119 } 2120