1 //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the AArch64 implementation of TargetFrameLowering class.
11 //
12 // On AArch64, stack frames are structured as follows:
13 //
14 // The stack grows downward.
15 //
16 // All of the individual frame areas on the frame below are optional, i.e. it's
17 // possible to create a function so that the particular area isn't present
18 // in the frame.
19 //
20 // At function entry, the "frame" looks as follows:
21 //
22 // |                                   | Higher address
23 // |-----------------------------------|
24 // |                                   |
25 // | arguments passed on the stack     |
26 // |                                   |
27 // |-----------------------------------| <- sp
28 // |                                   | Lower address
29 //
30 //
31 // After the prologue has run, the frame has the following general structure.
32 // Note that this doesn't depict the case where a red-zone is used. Also,
33 // technically the last frame area (VLAs) doesn't get created until in the
34 // main function body, after the prologue is run. However, it's depicted here
35 // for completeness.
36 //
37 // |                                   | Higher address
38 // |-----------------------------------|
39 // |                                   |
40 // | arguments passed on the stack     |
41 // |                                   |
42 // |-----------------------------------|
43 // |                                   |
44 // | (Win64 only) varargs from reg     |
45 // |                                   |
46 // |-----------------------------------|
47 // |                                   |
48 // | prev_fp, prev_lr                  |
49 // | (a.k.a. "frame record")           |
50 // |-----------------------------------| <- fp(=x29)
51 // |                                   |
52 // | other callee-saved registers      |
53 // |                                   |
54 // |-----------------------------------|
55 // |.empty.space.to.make.part.below....|
56 // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at
57 // |.the.standard.16-byte.alignment....|  compile time; if present)
58 // |-----------------------------------|
59 // |                                   |
60 // | local variables of fixed size     |
61 // | including spill slots             |
62 // |-----------------------------------| <- bp(not defined by ABI,
63 // |.variable-sized.local.variables....|       LLVM chooses X19)
64 // |.(VLAs)............................| (size of this area is unknown at
65 // |...................................|  compile time)
66 // |-----------------------------------| <- sp
67 // |                                   | Lower address
68 //
69 //
70 // To access the data in a frame, at-compile time, a constant offset must be
71 // computable from one of the pointers (fp, bp, sp) to access it. The size
72 // of the areas with a dotted background cannot be computed at compile-time
73 // if they are present, making it required to have all three of fp, bp and
74 // sp to be set up to be able to access all contents in the frame areas,
75 // assuming all of the frame areas are non-empty.
76 //
77 // For most functions, some of the frame areas are empty. For those functions,
78 // it may not be necessary to set up fp or bp:
79 // * A base pointer is definitely needed when there are both VLAs and local
80 //   variables with more-than-default alignment requirements.
81 // * A frame pointer is definitely needed when there are local variables with
82 //   more-than-default alignment requirements.
83 //
84 // In some cases when a base pointer is not strictly needed, it is generated
85 // anyway when offsets from the frame pointer to access local variables become
86 // so large that the offset can't be encoded in the immediate fields of loads
87 // or stores.
88 //
89 // FIXME: also explain the redzone concept.
90 // FIXME: also explain the concept of reserved call frames.
91 //
92 //===----------------------------------------------------------------------===//
93 
94 #include "AArch64FrameLowering.h"
95 #include "AArch64InstrInfo.h"
96 #include "AArch64MachineFunctionInfo.h"
97 #include "AArch64RegisterInfo.h"
98 #include "AArch64Subtarget.h"
99 #include "AArch64TargetMachine.h"
100 #include "MCTargetDesc/AArch64AddressingModes.h"
101 #include "llvm/ADT/SmallVector.h"
102 #include "llvm/ADT/Statistic.h"
103 #include "llvm/CodeGen/LivePhysRegs.h"
104 #include "llvm/CodeGen/MachineBasicBlock.h"
105 #include "llvm/CodeGen/MachineFrameInfo.h"
106 #include "llvm/CodeGen/MachineFunction.h"
107 #include "llvm/CodeGen/MachineInstr.h"
108 #include "llvm/CodeGen/MachineInstrBuilder.h"
109 #include "llvm/CodeGen/MachineMemOperand.h"
110 #include "llvm/CodeGen/MachineModuleInfo.h"
111 #include "llvm/CodeGen/MachineOperand.h"
112 #include "llvm/CodeGen/MachineRegisterInfo.h"
113 #include "llvm/CodeGen/RegisterScavenging.h"
114 #include "llvm/CodeGen/TargetInstrInfo.h"
115 #include "llvm/CodeGen/TargetRegisterInfo.h"
116 #include "llvm/CodeGen/TargetSubtargetInfo.h"
117 #include "llvm/IR/Attributes.h"
118 #include "llvm/IR/CallingConv.h"
119 #include "llvm/IR/DataLayout.h"
120 #include "llvm/IR/DebugLoc.h"
121 #include "llvm/IR/Function.h"
122 #include "llvm/MC/MCDwarf.h"
123 #include "llvm/Support/CommandLine.h"
124 #include "llvm/Support/Debug.h"
125 #include "llvm/Support/ErrorHandling.h"
126 #include "llvm/Support/MathExtras.h"
127 #include "llvm/Support/raw_ostream.h"
128 #include "llvm/Target/TargetMachine.h"
129 #include "llvm/Target/TargetOptions.h"
130 #include <cassert>
131 #include <cstdint>
132 #include <iterator>
133 #include <vector>
134 
135 using namespace llvm;
136 
137 #define DEBUG_TYPE "frame-info"
138 
139 static cl::opt<bool> EnableRedZone("aarch64-redzone",
140                                    cl::desc("enable use of redzone on AArch64"),
141                                    cl::init(false), cl::Hidden);
142 
143 static cl::opt<bool>
144     ReverseCSRRestoreSeq("reverse-csr-restore-seq",
145                          cl::desc("reverse the CSR restore sequence"),
146                          cl::init(false), cl::Hidden);
147 
148 STATISTIC(NumRedZoneFunctions, "Number of functions using red zone");
149 
150 /// This is the biggest offset to the stack pointer we can encode in aarch64
151 /// instructions (without using a separate calculation and a temp register).
152 /// Note that the exception here are vector stores/loads which cannot encode any
153 /// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()).
154 static const unsigned DefaultSafeSPDisplacement = 255;
155 
156 /// Look at each instruction that references stack frames and return the stack
157 /// size limit beyond which some of these instructions will require a scratch
158 /// register during their expansion later.
159 static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
160   // FIXME: For now, just conservatively guestimate based on unscaled indexing
161   // range. We'll end up allocating an unnecessary spill slot a lot, but
162   // realistically that's not a big deal at this stage of the game.
163   for (MachineBasicBlock &MBB : MF) {
164     for (MachineInstr &MI : MBB) {
165       if (MI.isDebugValue() || MI.isPseudo() ||
166           MI.getOpcode() == AArch64::ADDXri ||
167           MI.getOpcode() == AArch64::ADDSXri)
168         continue;
169 
170       for (const MachineOperand &MO : MI.operands()) {
171         if (!MO.isFI())
172           continue;
173 
174         int Offset = 0;
175         if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) ==
176             AArch64FrameOffsetCannotUpdate)
177           return 0;
178       }
179     }
180   }
181   return DefaultSafeSPDisplacement;
182 }
183 
184 bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
185   if (!EnableRedZone)
186     return false;
187   // Don't use the red zone if the function explicitly asks us not to.
188   // This is typically used for kernel code.
189   if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone))
190     return false;
191 
192   const MachineFrameInfo &MFI = MF.getFrameInfo();
193   const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
194   unsigned NumBytes = AFI->getLocalStackSize();
195 
196   return !(MFI.hasCalls() || hasFP(MF) || NumBytes > 128);
197 }
198 
199 /// hasFP - Return true if the specified function should have a dedicated frame
200 /// pointer register.
201 bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
202   const MachineFrameInfo &MFI = MF.getFrameInfo();
203   const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
204   // Retain behavior of always omitting the FP for leaf functions when possible.
205   if (MFI.hasCalls() && MF.getTarget().Options.DisableFramePointerElim(MF))
206     return true;
207   if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
208       MFI.hasStackMap() || MFI.hasPatchPoint() ||
209       RegInfo->needsStackRealignment(MF))
210     return true;
211   // With large callframes around we may need to use FP to access the scavenging
212   // emergency spillslot.
213   //
214   // Unfortunately some calls to hasFP() like machine verifier ->
215   // getReservedReg() -> hasFP in the middle of global isel are too early
216   // to know the max call frame size. Hopefully conservatively returning "true"
217   // in those cases is fine.
218   // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs.
219   if (!MFI.isMaxCallFrameSizeComputed() ||
220       MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement)
221     return true;
222 
223   return false;
224 }
225 
226 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
227 /// not required, we reserve argument space for call sites in the function
228 /// immediately on entry to the current function.  This eliminates the need for
229 /// add/sub sp brackets around call sites.  Returns true if the call frame is
230 /// included as part of the stack frame.
231 bool
232 AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
233   return !MF.getFrameInfo().hasVarSizedObjects();
234 }
235 
236 MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr(
237     MachineFunction &MF, MachineBasicBlock &MBB,
238     MachineBasicBlock::iterator I) const {
239   const AArch64InstrInfo *TII =
240       static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
241   DebugLoc DL = I->getDebugLoc();
242   unsigned Opc = I->getOpcode();
243   bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
244   uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
245 
246   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
247   if (!TFI->hasReservedCallFrame(MF)) {
248     unsigned Align = getStackAlignment();
249 
250     int64_t Amount = I->getOperand(0).getImm();
251     Amount = alignTo(Amount, Align);
252     if (!IsDestroy)
253       Amount = -Amount;
254 
255     // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it
256     // doesn't have to pop anything), then the first operand will be zero too so
257     // this adjustment is a no-op.
258     if (CalleePopAmount == 0) {
259       // FIXME: in-function stack adjustment for calls is limited to 24-bits
260       // because there's no guaranteed temporary register available.
261       //
262       // ADD/SUB (immediate) has only LSL #0 and LSL #12 available.
263       // 1) For offset <= 12-bit, we use LSL #0
264       // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses
265       // LSL #0, and the other uses LSL #12.
266       //
267       // Most call frames will be allocated at the start of a function so
268       // this is OK, but it is a limitation that needs dealing with.
269       assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large");
270       emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, Amount, TII);
271     }
272   } else if (CalleePopAmount != 0) {
273     // If the calling convention demands that the callee pops arguments from the
274     // stack, we want to add it back if we have a reserved call frame.
275     assert(CalleePopAmount < 0xffffff && "call frame too large");
276     emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, -CalleePopAmount,
277                     TII);
278   }
279   return MBB.erase(I);
280 }
281 
282 void AArch64FrameLowering::emitCalleeSavedFrameMoves(
283     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
284   MachineFunction &MF = *MBB.getParent();
285   MachineFrameInfo &MFI = MF.getFrameInfo();
286   const TargetSubtargetInfo &STI = MF.getSubtarget();
287   const MCRegisterInfo *MRI = STI.getRegisterInfo();
288   const TargetInstrInfo *TII = STI.getInstrInfo();
289   DebugLoc DL = MBB.findDebugLoc(MBBI);
290 
291   // Add callee saved registers to move list.
292   const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
293   if (CSI.empty())
294     return;
295 
296   for (const auto &Info : CSI) {
297     unsigned Reg = Info.getReg();
298     int64_t Offset =
299         MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea();
300     unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
301     unsigned CFIIndex = MF.addFrameInst(
302         MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
303     BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
304         .addCFIIndex(CFIIndex)
305         .setMIFlags(MachineInstr::FrameSetup);
306   }
307 }
308 
309 // Find a scratch register that we can use at the start of the prologue to
310 // re-align the stack pointer.  We avoid using callee-save registers since they
311 // may appear to be free when this is called from canUseAsPrologue (during
312 // shrink wrapping), but then no longer be free when this is called from
313 // emitPrologue.
314 //
315 // FIXME: This is a bit conservative, since in the above case we could use one
316 // of the callee-save registers as a scratch temp to re-align the stack pointer,
317 // but we would then have to make sure that we were in fact saving at least one
318 // callee-save register in the prologue, which is additional complexity that
319 // doesn't seem worth the benefit.
320 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) {
321   MachineFunction *MF = MBB->getParent();
322 
323   // If MBB is an entry block, use X9 as the scratch register
324   if (&MF->front() == MBB)
325     return AArch64::X9;
326 
327   const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
328   const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo();
329   LivePhysRegs LiveRegs(TRI);
330   LiveRegs.addLiveIns(*MBB);
331 
332   // Mark callee saved registers as used so we will not choose them.
333   const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(MF);
334   for (unsigned i = 0; CSRegs[i]; ++i)
335     LiveRegs.addReg(CSRegs[i]);
336 
337   // Prefer X9 since it was historically used for the prologue scratch reg.
338   const MachineRegisterInfo &MRI = MF->getRegInfo();
339   if (LiveRegs.available(MRI, AArch64::X9))
340     return AArch64::X9;
341 
342   for (unsigned Reg : AArch64::GPR64RegClass) {
343     if (LiveRegs.available(MRI, Reg))
344       return Reg;
345   }
346   return AArch64::NoRegister;
347 }
348 
349 bool AArch64FrameLowering::canUseAsPrologue(
350     const MachineBasicBlock &MBB) const {
351   const MachineFunction *MF = MBB.getParent();
352   MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
353   const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
354   const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
355 
356   // Don't need a scratch register if we're not going to re-align the stack.
357   if (!RegInfo->needsStackRealignment(*MF))
358     return true;
359   // Otherwise, we can use any block as long as it has a scratch register
360   // available.
361   return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister;
362 }
363 
364 static bool windowsRequiresStackProbe(MachineFunction &MF,
365                                       unsigned StackSizeInBytes) {
366   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
367   if (!Subtarget.isTargetWindows())
368     return false;
369   const Function &F = MF.getFunction();
370   // TODO: When implementing stack protectors, take that into account
371   // for the probe threshold.
372   unsigned StackProbeSize = 4096;
373   if (F.hasFnAttribute("stack-probe-size"))
374     F.getFnAttribute("stack-probe-size")
375         .getValueAsString()
376         .getAsInteger(0, StackProbeSize);
377   return (StackSizeInBytes >= StackProbeSize) &&
378          !F.hasFnAttribute("no-stack-arg-probe");
379 }
380 
381 bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
382     MachineFunction &MF, unsigned StackBumpBytes) const {
383   AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
384   const MachineFrameInfo &MFI = MF.getFrameInfo();
385   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
386   const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
387 
388   if (AFI->getLocalStackSize() == 0)
389     return false;
390 
391   // 512 is the maximum immediate for stp/ldp that will be used for
392   // callee-save save/restores
393   if (StackBumpBytes >= 512 || windowsRequiresStackProbe(MF, StackBumpBytes))
394     return false;
395 
396   if (MFI.hasVarSizedObjects())
397     return false;
398 
399   if (RegInfo->needsStackRealignment(MF))
400     return false;
401 
402   // This isn't strictly necessary, but it simplifies things a bit since the
403   // current RedZone handling code assumes the SP is adjusted by the
404   // callee-save save/restore code.
405   if (canUseRedZone(MF))
406     return false;
407 
408   return true;
409 }
410 
411 // Convert callee-save register save/restore instruction to do stack pointer
412 // decrement/increment to allocate/deallocate the callee-save stack area by
413 // converting store/load to use pre/post increment version.
414 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
415     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
416     const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc) {
417   // Ignore instructions that do not operate on SP, i.e. shadow call stack
418   // instructions.
419   while (MBBI->getOpcode() == AArch64::STRXpost ||
420          MBBI->getOpcode() == AArch64::LDRXpre) {
421     assert(MBBI->getOperand(0).getReg() != AArch64::SP);
422     ++MBBI;
423   }
424 
425   unsigned NewOpc;
426   bool NewIsUnscaled = false;
427   switch (MBBI->getOpcode()) {
428   default:
429     llvm_unreachable("Unexpected callee-save save/restore opcode!");
430   case AArch64::STPXi:
431     NewOpc = AArch64::STPXpre;
432     break;
433   case AArch64::STPDi:
434     NewOpc = AArch64::STPDpre;
435     break;
436   case AArch64::STRXui:
437     NewOpc = AArch64::STRXpre;
438     NewIsUnscaled = true;
439     break;
440   case AArch64::STRDui:
441     NewOpc = AArch64::STRDpre;
442     NewIsUnscaled = true;
443     break;
444   case AArch64::LDPXi:
445     NewOpc = AArch64::LDPXpost;
446     break;
447   case AArch64::LDPDi:
448     NewOpc = AArch64::LDPDpost;
449     break;
450   case AArch64::LDRXui:
451     NewOpc = AArch64::LDRXpost;
452     NewIsUnscaled = true;
453     break;
454   case AArch64::LDRDui:
455     NewOpc = AArch64::LDRDpost;
456     NewIsUnscaled = true;
457     break;
458   }
459 
460   MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc));
461   MIB.addReg(AArch64::SP, RegState::Define);
462 
463   // Copy all operands other than the immediate offset.
464   unsigned OpndIdx = 0;
465   for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
466        ++OpndIdx)
467     MIB.add(MBBI->getOperand(OpndIdx));
468 
469   assert(MBBI->getOperand(OpndIdx).getImm() == 0 &&
470          "Unexpected immediate offset in first/last callee-save save/restore "
471          "instruction!");
472   assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
473          "Unexpected base register in callee-save save/restore instruction!");
474   // Last operand is immediate offset that needs fixing.
475   assert(CSStackSizeInc % 8 == 0);
476   int64_t CSStackSizeIncImm = CSStackSizeInc;
477   if (!NewIsUnscaled)
478     CSStackSizeIncImm /= 8;
479   MIB.addImm(CSStackSizeIncImm);
480 
481   MIB.setMIFlags(MBBI->getFlags());
482   MIB.setMemRefs(MBBI->memoperands_begin(), MBBI->memoperands_end());
483 
484   return std::prev(MBB.erase(MBBI));
485 }
486 
487 // Fixup callee-save register save/restore instructions to take into account
488 // combined SP bump by adding the local stack size to the stack offsets.
489 static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI,
490                                               unsigned LocalStackSize) {
491   unsigned Opc = MI.getOpcode();
492 
493   // Ignore instructions that do not operate on SP, i.e. shadow call stack
494   // instructions.
495   if (Opc == AArch64::STRXpost || Opc == AArch64::LDRXpre) {
496     assert(MI.getOperand(0).getReg() != AArch64::SP);
497     return;
498   }
499 
500   (void)Opc;
501   assert((Opc == AArch64::STPXi || Opc == AArch64::STPDi ||
502           Opc == AArch64::STRXui || Opc == AArch64::STRDui ||
503           Opc == AArch64::LDPXi || Opc == AArch64::LDPDi ||
504           Opc == AArch64::LDRXui || Opc == AArch64::LDRDui) &&
505          "Unexpected callee-save save/restore opcode!");
506 
507   unsigned OffsetIdx = MI.getNumExplicitOperands() - 1;
508   assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
509          "Unexpected base register in callee-save save/restore instruction!");
510   // Last operand is immediate offset that needs fixing.
511   MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx);
512   // All generated opcodes have scaled offsets.
513   assert(LocalStackSize % 8 == 0);
514   OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / 8);
515 }
516 
517 void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
518                                         MachineBasicBlock &MBB) const {
519   MachineBasicBlock::iterator MBBI = MBB.begin();
520   const MachineFrameInfo &MFI = MF.getFrameInfo();
521   const Function &F = MF.getFunction();
522   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
523   const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
524   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
525   MachineModuleInfo &MMI = MF.getMMI();
526   AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
527   bool needsFrameMoves = MMI.hasDebugInfo() || F.needsUnwindTableEntry();
528   bool HasFP = hasFP(MF);
529 
530   // At this point, we're going to decide whether or not the function uses a
531   // redzone. In most cases, the function doesn't have a redzone so let's
532   // assume that's false and set it to true in the case that there's a redzone.
533   AFI->setHasRedZone(false);
534 
535   // Debug location must be unknown since the first debug location is used
536   // to determine the end of the prologue.
537   DebugLoc DL;
538 
539   // All calls are tail calls in GHC calling conv, and functions have no
540   // prologue/epilogue.
541   if (MF.getFunction().getCallingConv() == CallingConv::GHC)
542     return;
543 
544   int NumBytes = (int)MFI.getStackSize();
545   if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) {
546     assert(!HasFP && "unexpected function without stack frame but with FP");
547 
548     // All of the stack allocation is for locals.
549     AFI->setLocalStackSize(NumBytes);
550 
551     if (!NumBytes)
552       return;
553     // REDZONE: If the stack size is less than 128 bytes, we don't need
554     // to actually allocate.
555     if (canUseRedZone(MF)) {
556       AFI->setHasRedZone(true);
557       ++NumRedZoneFunctions;
558     } else {
559       emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
560                       MachineInstr::FrameSetup);
561 
562       // Label used to tie together the PROLOG_LABEL and the MachineMoves.
563       MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
564       // Encode the stack size of the leaf function.
565       unsigned CFIIndex = MF.addFrameInst(
566           MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
567       BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
568           .addCFIIndex(CFIIndex)
569           .setMIFlags(MachineInstr::FrameSetup);
570     }
571     return;
572   }
573 
574   bool IsWin64 =
575       Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
576   unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
577 
578   auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
579   // All of the remaining stack allocations are for locals.
580   AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
581 
582   bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
583   if (CombineSPBump) {
584     emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
585                     MachineInstr::FrameSetup);
586     NumBytes = 0;
587   } else if (PrologueSaveSize != 0) {
588     MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(MBB, MBBI, DL, TII,
589                                                      -PrologueSaveSize);
590     NumBytes -= PrologueSaveSize;
591   }
592   assert(NumBytes >= 0 && "Negative stack allocation size!?");
593 
594   // Move past the saves of the callee-saved registers, fixing up the offsets
595   // and pre-inc if we decided to combine the callee-save and local stack
596   // pointer bump above.
597   MachineBasicBlock::iterator End = MBB.end();
598   while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup)) {
599     if (CombineSPBump)
600       fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize());
601     ++MBBI;
602   }
603   if (HasFP) {
604     // Only set up FP if we actually need to. Frame pointer is fp =
605     // sp - fixedobject - 16.
606     int FPOffset = AFI->getCalleeSavedStackSize() - 16;
607     if (CombineSPBump)
608       FPOffset += AFI->getLocalStackSize();
609 
610     // Issue    sub fp, sp, FPOffset or
611     //          mov fp,sp          when FPOffset is zero.
612     // Note: All stores of callee-saved registers are marked as "FrameSetup".
613     // This code marks the instruction(s) that set the FP also.
614     emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII,
615                     MachineInstr::FrameSetup);
616   }
617 
618   if (windowsRequiresStackProbe(MF, NumBytes)) {
619     uint32_t NumWords = NumBytes >> 4;
620 
621     BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), AArch64::X15)
622         .addImm(NumWords)
623         .setMIFlags(MachineInstr::FrameSetup);
624 
625     switch (MF.getTarget().getCodeModel()) {
626     case CodeModel::Small:
627     case CodeModel::Medium:
628     case CodeModel::Kernel:
629       BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL))
630           .addExternalSymbol("__chkstk")
631           .addReg(AArch64::X15, RegState::Implicit)
632           .setMIFlags(MachineInstr::FrameSetup);
633       break;
634     case CodeModel::Large:
635       BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVaddrEXT))
636           .addReg(AArch64::X16, RegState::Define)
637           .addExternalSymbol("__chkstk")
638           .addExternalSymbol("__chkstk")
639           .setMIFlags(MachineInstr::FrameSetup);
640 
641       BuildMI(MBB, MBBI, DL, TII->get(AArch64::BLR))
642           .addReg(AArch64::X16, RegState::Kill)
643           .addReg(AArch64::X15, RegState::Implicit | RegState::Define)
644           .setMIFlags(MachineInstr::FrameSetup);
645       break;
646     }
647 
648     BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP)
649         .addReg(AArch64::SP, RegState::Kill)
650         .addReg(AArch64::X15, RegState::Kill)
651         .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 4))
652         .setMIFlags(MachineInstr::FrameSetup);
653     NumBytes = 0;
654   }
655 
656   // Allocate space for the rest of the frame.
657   if (NumBytes) {
658     const bool NeedsRealignment = RegInfo->needsStackRealignment(MF);
659     unsigned scratchSPReg = AArch64::SP;
660 
661     if (NeedsRealignment) {
662       scratchSPReg = findScratchNonCalleeSaveRegister(&MBB);
663       assert(scratchSPReg != AArch64::NoRegister);
664     }
665 
666     // If we're a leaf function, try using the red zone.
667     if (!canUseRedZone(MF))
668       // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
669       // the correct value here, as NumBytes also includes padding bytes,
670       // which shouldn't be counted here.
671       emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII,
672                       MachineInstr::FrameSetup);
673 
674     if (NeedsRealignment) {
675       const unsigned Alignment = MFI.getMaxAlignment();
676       const unsigned NrBitsToZero = countTrailingZeros(Alignment);
677       assert(NrBitsToZero > 1);
678       assert(scratchSPReg != AArch64::SP);
679 
680       // SUB X9, SP, NumBytes
681       //   -- X9 is temporary register, so shouldn't contain any live data here,
682       //   -- free to use. This is already produced by emitFrameOffset above.
683       // AND SP, X9, 0b11111...0000
684       // The logical immediates have a non-trivial encoding. The following
685       // formula computes the encoded immediate with all ones but
686       // NrBitsToZero zero bits as least significant bits.
687       uint32_t andMaskEncoded = (1 << 12)                         // = N
688                                 | ((64 - NrBitsToZero) << 6)      // immr
689                                 | ((64 - NrBitsToZero - 1) << 0); // imms
690 
691       BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP)
692           .addReg(scratchSPReg, RegState::Kill)
693           .addImm(andMaskEncoded);
694       AFI->setStackRealigned(true);
695     }
696   }
697 
698   // If we need a base pointer, set it up here. It's whatever the value of the
699   // stack pointer is at this point. Any variable size objects will be allocated
700   // after this, so we can still use the base pointer to reference locals.
701   //
702   // FIXME: Clarify FrameSetup flags here.
703   // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is
704   // needed.
705   if (RegInfo->hasBasePointer(MF)) {
706     TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP,
707                      false);
708   }
709 
710   if (needsFrameMoves) {
711     const DataLayout &TD = MF.getDataLayout();
712     const int StackGrowth = -TD.getPointerSize(0);
713     unsigned FramePtr = RegInfo->getFrameRegister(MF);
714     // An example of the prologue:
715     //
716     //     .globl __foo
717     //     .align 2
718     //  __foo:
719     // Ltmp0:
720     //     .cfi_startproc
721     //     .cfi_personality 155, ___gxx_personality_v0
722     // Leh_func_begin:
723     //     .cfi_lsda 16, Lexception33
724     //
725     //     stp  xa,bx, [sp, -#offset]!
726     //     ...
727     //     stp  x28, x27, [sp, #offset-32]
728     //     stp  fp, lr, [sp, #offset-16]
729     //     add  fp, sp, #offset - 16
730     //     sub  sp, sp, #1360
731     //
732     // The Stack:
733     //       +-------------------------------------------+
734     // 10000 | ........ | ........ | ........ | ........ |
735     // 10004 | ........ | ........ | ........ | ........ |
736     //       +-------------------------------------------+
737     // 10008 | ........ | ........ | ........ | ........ |
738     // 1000c | ........ | ........ | ........ | ........ |
739     //       +===========================================+
740     // 10010 |                X28 Register               |
741     // 10014 |                X28 Register               |
742     //       +-------------------------------------------+
743     // 10018 |                X27 Register               |
744     // 1001c |                X27 Register               |
745     //       +===========================================+
746     // 10020 |                Frame Pointer              |
747     // 10024 |                Frame Pointer              |
748     //       +-------------------------------------------+
749     // 10028 |                Link Register              |
750     // 1002c |                Link Register              |
751     //       +===========================================+
752     // 10030 | ........ | ........ | ........ | ........ |
753     // 10034 | ........ | ........ | ........ | ........ |
754     //       +-------------------------------------------+
755     // 10038 | ........ | ........ | ........ | ........ |
756     // 1003c | ........ | ........ | ........ | ........ |
757     //       +-------------------------------------------+
758     //
759     //     [sp] = 10030        ::    >>initial value<<
760     //     sp = 10020          ::  stp fp, lr, [sp, #-16]!
761     //     fp = sp == 10020    ::  mov fp, sp
762     //     [sp] == 10020       ::  stp x28, x27, [sp, #-16]!
763     //     sp == 10010         ::    >>final value<<
764     //
765     // The frame pointer (w29) points to address 10020. If we use an offset of
766     // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24
767     // for w27, and -32 for w28:
768     //
769     //  Ltmp1:
770     //     .cfi_def_cfa w29, 16
771     //  Ltmp2:
772     //     .cfi_offset w30, -8
773     //  Ltmp3:
774     //     .cfi_offset w29, -16
775     //  Ltmp4:
776     //     .cfi_offset w27, -24
777     //  Ltmp5:
778     //     .cfi_offset w28, -32
779 
780     if (HasFP) {
781       // Define the current CFA rule to use the provided FP.
782       unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true);
783       unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa(
784           nullptr, Reg, 2 * StackGrowth - FixedObject));
785       BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
786           .addCFIIndex(CFIIndex)
787           .setMIFlags(MachineInstr::FrameSetup);
788     } else {
789       // Encode the stack size of the leaf function.
790       unsigned CFIIndex = MF.addFrameInst(
791           MCCFIInstruction::createDefCfaOffset(nullptr, -MFI.getStackSize()));
792       BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
793           .addCFIIndex(CFIIndex)
794           .setMIFlags(MachineInstr::FrameSetup);
795     }
796 
797     // Now emit the moves for whatever callee saved regs we have (including FP,
798     // LR if those are saved).
799     emitCalleeSavedFrameMoves(MBB, MBBI);
800   }
801 }
802 
803 void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
804                                         MachineBasicBlock &MBB) const {
805   MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
806   MachineFrameInfo &MFI = MF.getFrameInfo();
807   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
808   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
809   DebugLoc DL;
810   bool IsTailCallReturn = false;
811   if (MBB.end() != MBBI) {
812     DL = MBBI->getDebugLoc();
813     unsigned RetOpcode = MBBI->getOpcode();
814     IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
815       RetOpcode == AArch64::TCRETURNri;
816   }
817   int NumBytes = MFI.getStackSize();
818   const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
819 
820   // All calls are tail calls in GHC calling conv, and functions have no
821   // prologue/epilogue.
822   if (MF.getFunction().getCallingConv() == CallingConv::GHC)
823     return;
824 
825   // Initial and residual are named for consistency with the prologue. Note that
826   // in the epilogue, the residual adjustment is executed first.
827   uint64_t ArgumentPopSize = 0;
828   if (IsTailCallReturn) {
829     MachineOperand &StackAdjust = MBBI->getOperand(1);
830 
831     // For a tail-call in a callee-pops-arguments environment, some or all of
832     // the stack may actually be in use for the call's arguments, this is
833     // calculated during LowerCall and consumed here...
834     ArgumentPopSize = StackAdjust.getImm();
835   } else {
836     // ... otherwise the amount to pop is *all* of the argument space,
837     // conveniently stored in the MachineFunctionInfo by
838     // LowerFormalArguments. This will, of course, be zero for the C calling
839     // convention.
840     ArgumentPopSize = AFI->getArgumentStackToRestore();
841   }
842 
843   // The stack frame should be like below,
844   //
845   //      ----------------------                     ---
846   //      |                    |                      |
847   //      | BytesInStackArgArea|              CalleeArgStackSize
848   //      | (NumReusableBytes) |                (of tail call)
849   //      |                    |                     ---
850   //      |                    |                      |
851   //      ---------------------|        ---           |
852   //      |                    |         |            |
853   //      |   CalleeSavedReg   |         |            |
854   //      | (CalleeSavedStackSize)|      |            |
855   //      |                    |         |            |
856   //      ---------------------|         |         NumBytes
857   //      |                    |     StackSize  (StackAdjustUp)
858   //      |   LocalStackSize   |         |            |
859   //      | (covering callee   |         |            |
860   //      |       args)        |         |            |
861   //      |                    |         |            |
862   //      ----------------------        ---          ---
863   //
864   // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize
865   //             = StackSize + ArgumentPopSize
866   //
867   // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps
868   // it as the 2nd argument of AArch64ISD::TC_RETURN.
869 
870   bool IsWin64 =
871       Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
872   unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
873 
874   uint64_t AfterCSRPopSize = ArgumentPopSize;
875   auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
876   bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
877   // Assume we can't combine the last pop with the sp restore.
878 
879   if (!CombineSPBump && PrologueSaveSize != 0) {
880     MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator());
881     // Converting the last ldp to a post-index ldp is valid only if the last
882     // ldp's offset is 0.
883     const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
884     // If the offset is 0, convert it to a post-index ldp.
885     if (OffsetOp.getImm() == 0) {
886       convertCalleeSaveRestoreToSPPrePostIncDec(MBB, Pop, DL, TII,
887                                                 PrologueSaveSize);
888     } else {
889       // If not, make sure to emit an add after the last ldp.
890       // We're doing this by transfering the size to be restored from the
891       // adjustment *before* the CSR pops to the adjustment *after* the CSR
892       // pops.
893       AfterCSRPopSize += PrologueSaveSize;
894     }
895   }
896 
897   // Move past the restores of the callee-saved registers.
898   // If we plan on combining the sp bump of the local stack size and the callee
899   // save stack size, we might need to adjust the CSR save and restore offsets.
900   MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator();
901   MachineBasicBlock::iterator Begin = MBB.begin();
902   while (LastPopI != Begin) {
903     --LastPopI;
904     if (!LastPopI->getFlag(MachineInstr::FrameDestroy)) {
905       ++LastPopI;
906       break;
907     } else if (CombineSPBump)
908       fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize());
909   }
910 
911   // If there is a single SP update, insert it before the ret and we're done.
912   if (CombineSPBump) {
913     emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
914                     NumBytes + AfterCSRPopSize, TII,
915                     MachineInstr::FrameDestroy);
916     return;
917   }
918 
919   NumBytes -= PrologueSaveSize;
920   assert(NumBytes >= 0 && "Negative stack allocation size!?");
921 
922   if (!hasFP(MF)) {
923     bool RedZone = canUseRedZone(MF);
924     // If this was a redzone leaf function, we don't need to restore the
925     // stack pointer (but we may need to pop stack args for fastcc).
926     if (RedZone && AfterCSRPopSize == 0)
927       return;
928 
929     bool NoCalleeSaveRestore = PrologueSaveSize == 0;
930     int StackRestoreBytes = RedZone ? 0 : NumBytes;
931     if (NoCalleeSaveRestore)
932       StackRestoreBytes += AfterCSRPopSize;
933     emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
934                     StackRestoreBytes, TII, MachineInstr::FrameDestroy);
935     // If we were able to combine the local stack pop with the argument pop,
936     // then we're done.
937     if (NoCalleeSaveRestore || AfterCSRPopSize == 0)
938       return;
939     NumBytes = 0;
940   }
941 
942   // Restore the original stack pointer.
943   // FIXME: Rather than doing the math here, we should instead just use
944   // non-post-indexed loads for the restores if we aren't actually going to
945   // be able to save any instructions.
946   if (MFI.hasVarSizedObjects() || AFI->isStackRealigned())
947     emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP,
948                     -AFI->getCalleeSavedStackSize() + 16, TII,
949                     MachineInstr::FrameDestroy);
950   else if (NumBytes)
951     emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes, TII,
952                     MachineInstr::FrameDestroy);
953 
954   // This must be placed after the callee-save restore code because that code
955   // assumes the SP is at the same location as it was after the callee-save save
956   // code in the prologue.
957   if (AfterCSRPopSize) {
958     // Find an insertion point for the first ldp so that it goes before the
959     // shadow call stack epilog instruction. This ensures that the restore of
960     // lr from x18 is placed after the restore from sp.
961     auto FirstSPPopI = MBB.getFirstTerminator();
962     while (FirstSPPopI != Begin) {
963       auto Prev = std::prev(FirstSPPopI);
964       if (Prev->getOpcode() != AArch64::LDRXpre ||
965           Prev->getOperand(0).getReg() == AArch64::SP)
966         break;
967       FirstSPPopI = Prev;
968     }
969 
970     // Sometimes (when we restore in the same order as we save), we can end up
971     // with code like this:
972     //
973     // ldp      x26, x25, [sp]
974     // ldp      x24, x23, [sp, #16]
975     // ldp      x22, x21, [sp, #32]
976     // ldp      x20, x19, [sp, #48]
977     // add      sp, sp, #64
978     //
979     // In this case, it is always better to put the first ldp at the end, so
980     // that the load-store optimizer can run and merge the ldp and the add into
981     // a post-index ldp.
982     // If we managed to grab the first pop instruction, move it to the end.
983     if (LastPopI != Begin)
984       MBB.splice(FirstSPPopI, &MBB, LastPopI);
985     // We should end up with something like this now:
986     //
987     // ldp      x24, x23, [sp, #16]
988     // ldp      x22, x21, [sp, #32]
989     // ldp      x20, x19, [sp, #48]
990     // ldp      x26, x25, [sp]
991     // add      sp, sp, #64
992     //
993     // and the load-store optimizer can merge the last two instructions into:
994     //
995     // ldp      x26, x25, [sp], #64
996     //
997     emitFrameOffset(MBB, FirstSPPopI, DL, AArch64::SP, AArch64::SP,
998                     AfterCSRPopSize, TII, MachineInstr::FrameDestroy);
999   }
1000 }
1001 
1002 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for
1003 /// debug info.  It's the same as what we use for resolving the code-gen
1004 /// references for now.  FIXME: This can go wrong when references are
1005 /// SP-relative and simple call frames aren't used.
1006 int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF,
1007                                                  int FI,
1008                                                  unsigned &FrameReg) const {
1009   return resolveFrameIndexReference(MF, FI, FrameReg);
1010 }
1011 
1012 int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
1013                                                      int FI, unsigned &FrameReg,
1014                                                      bool PreferFP) const {
1015   const MachineFrameInfo &MFI = MF.getFrameInfo();
1016   const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
1017       MF.getSubtarget().getRegisterInfo());
1018   const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1019   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1020   bool IsWin64 =
1021       Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
1022   unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
1023   int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16;
1024   int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize();
1025   bool isFixed = MFI.isFixedObjectIndex(FI);
1026 
1027   // Use frame pointer to reference fixed objects. Use it for locals if
1028   // there are VLAs or a dynamically realigned SP (and thus the SP isn't
1029   // reliable as a base). Make sure useFPForScavengingIndex() does the
1030   // right thing for the emergency spill slot.
1031   bool UseFP = false;
1032   if (AFI->hasStackFrame()) {
1033     // Note: Keeping the following as multiple 'if' statements rather than
1034     // merging to a single expression for readability.
1035     //
1036     // Argument access should always use the FP.
1037     if (isFixed) {
1038       UseFP = hasFP(MF);
1039     } else if (hasFP(MF) && !RegInfo->needsStackRealignment(MF)) {
1040       // If the FPOffset is negative, we have to keep in mind that the
1041       // available offset range for negative offsets is smaller than for
1042       // positive ones. If an offset is
1043       // available via the FP and the SP, use whichever is closest.
1044       bool FPOffsetFits = FPOffset >= -256;
1045       PreferFP |= Offset > -FPOffset;
1046 
1047       if (MFI.hasVarSizedObjects()) {
1048         // If we have variable sized objects, we can use either FP or BP, as the
1049         // SP offset is unknown. We can use the base pointer if we have one and
1050         // FP is not preferred. If not, we're stuck with using FP.
1051         bool CanUseBP = RegInfo->hasBasePointer(MF);
1052         if (FPOffsetFits && CanUseBP) // Both are ok. Pick the best.
1053           UseFP = PreferFP;
1054         else if (!CanUseBP) // Can't use BP. Forced to use FP.
1055           UseFP = true;
1056         // else we can use BP and FP, but the offset from FP won't fit.
1057         // That will make us scavenge registers which we can probably avoid by
1058         // using BP. If it won't fit for BP either, we'll scavenge anyway.
1059       } else if (FPOffset >= 0) {
1060         // Use SP or FP, whichever gives us the best chance of the offset
1061         // being in range for direct access. If the FPOffset is positive,
1062         // that'll always be best, as the SP will be even further away.
1063         UseFP = true;
1064       } else {
1065         // We have the choice between FP and (SP or BP).
1066         if (FPOffsetFits && PreferFP) // If FP is the best fit, use it.
1067           UseFP = true;
1068       }
1069     }
1070   }
1071 
1072   assert((isFixed || !RegInfo->needsStackRealignment(MF) || !UseFP) &&
1073          "In the presence of dynamic stack pointer realignment, "
1074          "non-argument objects cannot be accessed through the frame pointer");
1075 
1076   if (UseFP) {
1077     FrameReg = RegInfo->getFrameRegister(MF);
1078     return FPOffset;
1079   }
1080 
1081   // Use the base pointer if we have one.
1082   if (RegInfo->hasBasePointer(MF))
1083     FrameReg = RegInfo->getBaseRegister();
1084   else {
1085     assert(!MFI.hasVarSizedObjects() &&
1086            "Can't use SP when we have var sized objects.");
1087     FrameReg = AArch64::SP;
1088     // If we're using the red zone for this function, the SP won't actually
1089     // be adjusted, so the offsets will be negative. They're also all
1090     // within range of the signed 9-bit immediate instructions.
1091     if (canUseRedZone(MF))
1092       Offset -= AFI->getLocalStackSize();
1093   }
1094 
1095   return Offset;
1096 }
1097 
1098 static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
1099   // Do not set a kill flag on values that are also marked as live-in. This
1100   // happens with the @llvm-returnaddress intrinsic and with arguments passed in
1101   // callee saved registers.
1102   // Omitting the kill flags is conservatively correct even if the live-in
1103   // is not used after all.
1104   bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg);
1105   return getKillRegState(!IsLiveIn);
1106 }
1107 
1108 static bool produceCompactUnwindFrame(MachineFunction &MF) {
1109   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1110   AttributeList Attrs = MF.getFunction().getAttributes();
1111   return Subtarget.isTargetMachO() &&
1112          !(Subtarget.getTargetLowering()->supportSwiftError() &&
1113            Attrs.hasAttrSomewhere(Attribute::SwiftError));
1114 }
1115 
1116 namespace {
1117 
1118 struct RegPairInfo {
1119   unsigned Reg1 = AArch64::NoRegister;
1120   unsigned Reg2 = AArch64::NoRegister;
1121   int FrameIdx;
1122   int Offset;
1123   bool IsGPR;
1124 
1125   RegPairInfo() = default;
1126 
1127   bool isPaired() const { return Reg2 != AArch64::NoRegister; }
1128 };
1129 
1130 } // end anonymous namespace
1131 
1132 static void computeCalleeSaveRegisterPairs(
1133     MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI,
1134     const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs,
1135     bool &NeedShadowCallStackProlog) {
1136 
1137   if (CSI.empty())
1138     return;
1139 
1140   AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1141   MachineFrameInfo &MFI = MF.getFrameInfo();
1142   CallingConv::ID CC = MF.getFunction().getCallingConv();
1143   unsigned Count = CSI.size();
1144   (void)CC;
1145   // MachO's compact unwind format relies on all registers being stored in
1146   // pairs.
1147   assert((!produceCompactUnwindFrame(MF) ||
1148           CC == CallingConv::PreserveMost ||
1149           (Count & 1) == 0) &&
1150          "Odd number of callee-saved regs to spill!");
1151   int Offset = AFI->getCalleeSavedStackSize();
1152 
1153   for (unsigned i = 0; i < Count; ++i) {
1154     RegPairInfo RPI;
1155     RPI.Reg1 = CSI[i].getReg();
1156 
1157     assert(AArch64::GPR64RegClass.contains(RPI.Reg1) ||
1158            AArch64::FPR64RegClass.contains(RPI.Reg1));
1159     RPI.IsGPR = AArch64::GPR64RegClass.contains(RPI.Reg1);
1160 
1161     // Add the next reg to the pair if it is in the same register class.
1162     if (i + 1 < Count) {
1163       unsigned NextReg = CSI[i + 1].getReg();
1164       if ((RPI.IsGPR && AArch64::GPR64RegClass.contains(NextReg)) ||
1165           (!RPI.IsGPR && AArch64::FPR64RegClass.contains(NextReg)))
1166         RPI.Reg2 = NextReg;
1167     }
1168 
1169     // If either of the registers to be saved is the lr register, it means that
1170     // we also need to save lr in the shadow call stack.
1171     if ((RPI.Reg1 == AArch64::LR || RPI.Reg2 == AArch64::LR) &&
1172         MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack)) {
1173       if (!MF.getSubtarget<AArch64Subtarget>().isX18Reserved())
1174         report_fatal_error("Must reserve x18 to use shadow call stack");
1175       NeedShadowCallStackProlog = true;
1176     }
1177 
1178     // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
1179     // list to come in sorted by frame index so that we can issue the store
1180     // pair instructions directly. Assert if we see anything otherwise.
1181     //
1182     // The order of the registers in the list is controlled by
1183     // getCalleeSavedRegs(), so they will always be in-order, as well.
1184     assert((!RPI.isPaired() ||
1185             (CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx())) &&
1186            "Out of order callee saved regs!");
1187 
1188     // MachO's compact unwind format relies on all registers being stored in
1189     // adjacent register pairs.
1190     assert((!produceCompactUnwindFrame(MF) ||
1191             CC == CallingConv::PreserveMost ||
1192             (RPI.isPaired() &&
1193              ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
1194               RPI.Reg1 + 1 == RPI.Reg2))) &&
1195            "Callee-save registers not saved as adjacent register pair!");
1196 
1197     RPI.FrameIdx = CSI[i].getFrameIdx();
1198 
1199     if (Count * 8 != AFI->getCalleeSavedStackSize() && !RPI.isPaired()) {
1200       // Round up size of non-pair to pair size if we need to pad the
1201       // callee-save area to ensure 16-byte alignment.
1202       Offset -= 16;
1203       assert(MFI.getObjectAlignment(RPI.FrameIdx) <= 16);
1204       MFI.setObjectAlignment(RPI.FrameIdx, 16);
1205       AFI->setCalleeSaveStackHasFreeSpace(true);
1206     } else
1207       Offset -= RPI.isPaired() ? 16 : 8;
1208     assert(Offset % 8 == 0);
1209     RPI.Offset = Offset / 8;
1210     assert((RPI.Offset >= -64 && RPI.Offset <= 63) &&
1211            "Offset out of bounds for LDP/STP immediate");
1212 
1213     RegPairs.push_back(RPI);
1214     if (RPI.isPaired())
1215       ++i;
1216   }
1217 }
1218 
1219 bool AArch64FrameLowering::spillCalleeSavedRegisters(
1220     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1221     const std::vector<CalleeSavedInfo> &CSI,
1222     const TargetRegisterInfo *TRI) const {
1223   MachineFunction &MF = *MBB.getParent();
1224   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1225   DebugLoc DL;
1226   SmallVector<RegPairInfo, 8> RegPairs;
1227 
1228   bool NeedShadowCallStackProlog = false;
1229   computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs,
1230                                  NeedShadowCallStackProlog);
1231   const MachineRegisterInfo &MRI = MF.getRegInfo();
1232 
1233   if (NeedShadowCallStackProlog) {
1234     // Shadow call stack prolog: str x30, [x18], #8
1235     BuildMI(MBB, MI, DL, TII.get(AArch64::STRXpost))
1236         .addReg(AArch64::X18, RegState::Define)
1237         .addReg(AArch64::LR)
1238         .addReg(AArch64::X18)
1239         .addImm(8)
1240         .setMIFlag(MachineInstr::FrameSetup);
1241 
1242     // This instruction also makes x18 live-in to the entry block.
1243     MBB.addLiveIn(AArch64::X18);
1244   }
1245 
1246   for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE;
1247        ++RPII) {
1248     RegPairInfo RPI = *RPII;
1249     unsigned Reg1 = RPI.Reg1;
1250     unsigned Reg2 = RPI.Reg2;
1251     unsigned StrOpc;
1252 
1253     // Issue sequence of spills for cs regs.  The first spill may be converted
1254     // to a pre-decrement store later by emitPrologue if the callee-save stack
1255     // area allocation can't be combined with the local stack area allocation.
1256     // For example:
1257     //    stp     x22, x21, [sp, #0]     // addImm(+0)
1258     //    stp     x20, x19, [sp, #16]    // addImm(+2)
1259     //    stp     fp, lr, [sp, #32]      // addImm(+4)
1260     // Rationale: This sequence saves uop updates compared to a sequence of
1261     // pre-increment spills like stp xi,xj,[sp,#-16]!
1262     // Note: Similar rationale and sequence for restores in epilog.
1263     if (RPI.IsGPR)
1264       StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
1265     else
1266       StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
1267     DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI);
1268           if (RPI.isPaired())
1269             dbgs() << ", " << printReg(Reg2, TRI);
1270           dbgs() << ") -> fi#(" << RPI.FrameIdx;
1271           if (RPI.isPaired())
1272             dbgs() << ", " << RPI.FrameIdx+1;
1273           dbgs() << ")\n");
1274 
1275     MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
1276     if (!MRI.isReserved(Reg1))
1277       MBB.addLiveIn(Reg1);
1278     if (RPI.isPaired()) {
1279       if (!MRI.isReserved(Reg2))
1280         MBB.addLiveIn(Reg2);
1281       MIB.addReg(Reg2, getPrologueDeath(MF, Reg2));
1282       MIB.addMemOperand(MF.getMachineMemOperand(
1283           MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1),
1284           MachineMemOperand::MOStore, 8, 8));
1285     }
1286     MIB.addReg(Reg1, getPrologueDeath(MF, Reg1))
1287         .addReg(AArch64::SP)
1288         .addImm(RPI.Offset) // [sp, #offset*8], where factor*8 is implicit
1289         .setMIFlag(MachineInstr::FrameSetup);
1290     MIB.addMemOperand(MF.getMachineMemOperand(
1291         MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx),
1292         MachineMemOperand::MOStore, 8, 8));
1293   }
1294   return true;
1295 }
1296 
1297 bool AArch64FrameLowering::restoreCalleeSavedRegisters(
1298     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1299     std::vector<CalleeSavedInfo> &CSI,
1300     const TargetRegisterInfo *TRI) const {
1301   MachineFunction &MF = *MBB.getParent();
1302   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1303   DebugLoc DL;
1304   SmallVector<RegPairInfo, 8> RegPairs;
1305 
1306   if (MI != MBB.end())
1307     DL = MI->getDebugLoc();
1308 
1309   bool NeedShadowCallStackProlog = false;
1310   computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs,
1311                                  NeedShadowCallStackProlog);
1312 
1313   auto EmitMI = [&](const RegPairInfo &RPI) {
1314     unsigned Reg1 = RPI.Reg1;
1315     unsigned Reg2 = RPI.Reg2;
1316 
1317     // Issue sequence of restores for cs regs. The last restore may be converted
1318     // to a post-increment load later by emitEpilogue if the callee-save stack
1319     // area allocation can't be combined with the local stack area allocation.
1320     // For example:
1321     //    ldp     fp, lr, [sp, #32]       // addImm(+4)
1322     //    ldp     x20, x19, [sp, #16]     // addImm(+2)
1323     //    ldp     x22, x21, [sp, #0]      // addImm(+0)
1324     // Note: see comment in spillCalleeSavedRegisters()
1325     unsigned LdrOpc;
1326     if (RPI.IsGPR)
1327       LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
1328     else
1329       LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
1330     DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI);
1331           if (RPI.isPaired())
1332             dbgs() << ", " << printReg(Reg2, TRI);
1333           dbgs() << ") -> fi#(" << RPI.FrameIdx;
1334           if (RPI.isPaired())
1335             dbgs() << ", " << RPI.FrameIdx+1;
1336           dbgs() << ")\n");
1337 
1338     MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc));
1339     if (RPI.isPaired()) {
1340       MIB.addReg(Reg2, getDefRegState(true));
1341       MIB.addMemOperand(MF.getMachineMemOperand(
1342           MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1),
1343           MachineMemOperand::MOLoad, 8, 8));
1344     }
1345     MIB.addReg(Reg1, getDefRegState(true))
1346         .addReg(AArch64::SP)
1347         .addImm(RPI.Offset) // [sp, #offset*8] where the factor*8 is implicit
1348         .setMIFlag(MachineInstr::FrameDestroy);
1349     MIB.addMemOperand(MF.getMachineMemOperand(
1350         MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx),
1351         MachineMemOperand::MOLoad, 8, 8));
1352   };
1353 
1354   if (ReverseCSRRestoreSeq)
1355     for (const RegPairInfo &RPI : reverse(RegPairs))
1356       EmitMI(RPI);
1357   else
1358     for (const RegPairInfo &RPI : RegPairs)
1359       EmitMI(RPI);
1360 
1361   if (NeedShadowCallStackProlog) {
1362     // Shadow call stack epilog: ldr x30, [x18, #-8]!
1363     BuildMI(MBB, MI, DL, TII.get(AArch64::LDRXpre))
1364         .addReg(AArch64::X18, RegState::Define)
1365         .addReg(AArch64::LR, RegState::Define)
1366         .addReg(AArch64::X18)
1367         .addImm(-8)
1368         .setMIFlag(MachineInstr::FrameDestroy);
1369   }
1370 
1371   return true;
1372 }
1373 
1374 void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
1375                                                 BitVector &SavedRegs,
1376                                                 RegScavenger *RS) const {
1377   // All calls are tail calls in GHC calling conv, and functions have no
1378   // prologue/epilogue.
1379   if (MF.getFunction().getCallingConv() == CallingConv::GHC)
1380     return;
1381 
1382   TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1383   const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
1384       MF.getSubtarget().getRegisterInfo());
1385   AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1386   unsigned UnspilledCSGPR = AArch64::NoRegister;
1387   unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
1388 
1389   MachineFrameInfo &MFI = MF.getFrameInfo();
1390   const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
1391 
1392   unsigned BasePointerReg = RegInfo->hasBasePointer(MF)
1393                                 ? RegInfo->getBaseRegister()
1394                                 : (unsigned)AArch64::NoRegister;
1395 
1396   unsigned SpillEstimate = SavedRegs.count();
1397   for (unsigned i = 0; CSRegs[i]; ++i) {
1398     unsigned Reg = CSRegs[i];
1399     unsigned PairedReg = CSRegs[i ^ 1];
1400     if (Reg == BasePointerReg)
1401       SpillEstimate++;
1402     if (produceCompactUnwindFrame(MF) && !SavedRegs.test(PairedReg))
1403       SpillEstimate++;
1404   }
1405   SpillEstimate += 2; // Conservatively include FP+LR in the estimate
1406   unsigned StackEstimate = MFI.estimateStackSize(MF) + 8 * SpillEstimate;
1407 
1408   // The frame record needs to be created by saving the appropriate registers
1409   if (hasFP(MF) || windowsRequiresStackProbe(MF, StackEstimate)) {
1410     SavedRegs.set(AArch64::FP);
1411     SavedRegs.set(AArch64::LR);
1412   }
1413 
1414   unsigned ExtraCSSpill = 0;
1415   // Figure out which callee-saved registers to save/restore.
1416   for (unsigned i = 0; CSRegs[i]; ++i) {
1417     const unsigned Reg = CSRegs[i];
1418 
1419     // Add the base pointer register to SavedRegs if it is callee-save.
1420     if (Reg == BasePointerReg)
1421       SavedRegs.set(Reg);
1422 
1423     bool RegUsed = SavedRegs.test(Reg);
1424     unsigned PairedReg = CSRegs[i ^ 1];
1425     if (!RegUsed) {
1426       if (AArch64::GPR64RegClass.contains(Reg) &&
1427           !RegInfo->isReservedReg(MF, Reg)) {
1428         UnspilledCSGPR = Reg;
1429         UnspilledCSGPRPaired = PairedReg;
1430       }
1431       continue;
1432     }
1433 
1434     // MachO's compact unwind format relies on all registers being stored in
1435     // pairs.
1436     // FIXME: the usual format is actually better if unwinding isn't needed.
1437     if (produceCompactUnwindFrame(MF) && !SavedRegs.test(PairedReg)) {
1438       SavedRegs.set(PairedReg);
1439       if (AArch64::GPR64RegClass.contains(PairedReg) &&
1440           !RegInfo->isReservedReg(MF, PairedReg))
1441         ExtraCSSpill = PairedReg;
1442     }
1443   }
1444 
1445   DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:";
1446         for (unsigned Reg : SavedRegs.set_bits())
1447           dbgs() << ' ' << printReg(Reg, RegInfo);
1448         dbgs() << "\n";);
1449 
1450   // If any callee-saved registers are used, the frame cannot be eliminated.
1451   unsigned NumRegsSpilled = SavedRegs.count();
1452   bool CanEliminateFrame = NumRegsSpilled == 0;
1453 
1454   // The CSR spill slots have not been allocated yet, so estimateStackSize
1455   // won't include them.
1456   unsigned CFSize = MFI.estimateStackSize(MF) + 8 * NumRegsSpilled;
1457   DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
1458   unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF);
1459   bool BigStack = (CFSize > EstimatedStackSizeLimit);
1460   if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
1461     AFI->setHasStackFrame(true);
1462 
1463   // Estimate if we might need to scavenge a register at some point in order
1464   // to materialize a stack offset. If so, either spill one additional
1465   // callee-saved register or reserve a special spill slot to facilitate
1466   // register scavenging. If we already spilled an extra callee-saved register
1467   // above to keep the number of spills even, we don't need to do anything else
1468   // here.
1469   if (BigStack) {
1470     if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
1471       DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo)
1472                    << " to get a scratch register.\n");
1473       SavedRegs.set(UnspilledCSGPR);
1474       // MachO's compact unwind format relies on all registers being stored in
1475       // pairs, so if we need to spill one extra for BigStack, then we need to
1476       // store the pair.
1477       if (produceCompactUnwindFrame(MF))
1478         SavedRegs.set(UnspilledCSGPRPaired);
1479       ExtraCSSpill = UnspilledCSGPRPaired;
1480       NumRegsSpilled = SavedRegs.count();
1481     }
1482 
1483     // If we didn't find an extra callee-saved register to spill, create
1484     // an emergency spill slot.
1485     if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) {
1486       const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1487       const TargetRegisterClass &RC = AArch64::GPR64RegClass;
1488       unsigned Size = TRI->getSpillSize(RC);
1489       unsigned Align = TRI->getSpillAlignment(RC);
1490       int FI = MFI.CreateStackObject(Size, Align, false);
1491       RS->addScavengingFrameIndex(FI);
1492       DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
1493                    << " as the emergency spill slot.\n");
1494     }
1495   }
1496 
1497   // Round up to register pair alignment to avoid additional SP adjustment
1498   // instructions.
1499   AFI->setCalleeSavedStackSize(alignTo(8 * NumRegsSpilled, 16));
1500 }
1501 
1502 bool AArch64FrameLowering::enableStackSlotScavenging(
1503     const MachineFunction &MF) const {
1504   const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1505   return AFI->hasCalleeSaveStackFreeSpace();
1506 }
1507