1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of TargetFrameLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "X86FrameLowering.h"
14 #include "X86InstrBuilder.h"
15 #include "X86InstrInfo.h"
16 #include "X86MachineFunctionInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/EHPersonalities.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/WinEHFuncInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCObjectFileInfo.h"
32 #include "llvm/MC/MCSymbol.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include <cstdlib>
36 
37 #define DEBUG_TYPE "x86-fl"
38 
39 STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue");
40 STATISTIC(NumFrameExtraProbe,
41           "Number of extra stack probes generated in prologue");
42 
43 using namespace llvm;
44 
45 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
46                                    MaybeAlign StackAlignOverride)
47     : TargetFrameLowering(StackGrowsDown, StackAlignOverride.valueOrOne(),
48                           STI.is64Bit() ? -8 : -4),
49       STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
50   // Cache a bunch of frame-related predicates for this subtarget.
51   SlotSize = TRI->getSlotSize();
52   Is64Bit = STI.is64Bit();
53   IsLP64 = STI.isTarget64BitLP64();
54   // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
55   Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
56   StackPtr = TRI->getStackRegister();
57 }
58 
59 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
60   return !MF.getFrameInfo().hasVarSizedObjects() &&
61          !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences() &&
62          !MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall();
63 }
64 
65 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
66 /// call frame pseudos can be simplified.  Having a FP, as in the default
67 /// implementation, is not sufficient here since we can't always use it.
68 /// Use a more nuanced condition.
69 bool
70 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
71   return hasReservedCallFrame(MF) ||
72          MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() ||
73          (hasFP(MF) && !TRI->hasStackRealignment(MF)) ||
74          TRI->hasBasePointer(MF);
75 }
76 
77 // needsFrameIndexResolution - Do we need to perform FI resolution for
78 // this function. Normally, this is required only when the function
79 // has any stack objects. However, FI resolution actually has another job,
80 // not apparent from the title - it resolves callframesetup/destroy
81 // that were not simplified earlier.
82 // So, this is required for x86 functions that have push sequences even
83 // when there are no stack objects.
84 bool
85 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
86   return MF.getFrameInfo().hasStackObjects() ||
87          MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
88 }
89 
90 /// hasFP - Return true if the specified function should have a dedicated frame
91 /// pointer register.  This is true if the function has variable sized allocas
92 /// or if frame pointer elimination is disabled.
93 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
94   const MachineFrameInfo &MFI = MF.getFrameInfo();
95   return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
96           TRI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
97           MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() ||
98           MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
99           MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() ||
100           MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() ||
101           MFI.hasStackMap() || MFI.hasPatchPoint() ||
102           MFI.hasCopyImplyingStackAdjustment());
103 }
104 
105 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
106   if (IsLP64) {
107     if (isInt<8>(Imm))
108       return X86::SUB64ri8;
109     return X86::SUB64ri32;
110   } else {
111     if (isInt<8>(Imm))
112       return X86::SUB32ri8;
113     return X86::SUB32ri;
114   }
115 }
116 
117 static unsigned getADDriOpcode(bool IsLP64, int64_t Imm) {
118   if (IsLP64) {
119     if (isInt<8>(Imm))
120       return X86::ADD64ri8;
121     return X86::ADD64ri32;
122   } else {
123     if (isInt<8>(Imm))
124       return X86::ADD32ri8;
125     return X86::ADD32ri;
126   }
127 }
128 
129 static unsigned getSUBrrOpcode(bool IsLP64) {
130   return IsLP64 ? X86::SUB64rr : X86::SUB32rr;
131 }
132 
133 static unsigned getADDrrOpcode(bool IsLP64) {
134   return IsLP64 ? X86::ADD64rr : X86::ADD32rr;
135 }
136 
137 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
138   if (IsLP64) {
139     if (isInt<8>(Imm))
140       return X86::AND64ri8;
141     return X86::AND64ri32;
142   }
143   if (isInt<8>(Imm))
144     return X86::AND32ri8;
145   return X86::AND32ri;
146 }
147 
148 static unsigned getLEArOpcode(bool IsLP64) {
149   return IsLP64 ? X86::LEA64r : X86::LEA32r;
150 }
151 
152 static bool isEAXLiveIn(MachineBasicBlock &MBB) {
153   for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) {
154     unsigned Reg = RegMask.PhysReg;
155 
156     if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
157         Reg == X86::AH || Reg == X86::AL)
158       return true;
159   }
160 
161   return false;
162 }
163 
164 /// Check if the flags need to be preserved before the terminators.
165 /// This would be the case, if the eflags is live-in of the region
166 /// composed by the terminators or live-out of that region, without
167 /// being defined by a terminator.
168 static bool
169 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) {
170   for (const MachineInstr &MI : MBB.terminators()) {
171     bool BreakNext = false;
172     for (const MachineOperand &MO : MI.operands()) {
173       if (!MO.isReg())
174         continue;
175       Register Reg = MO.getReg();
176       if (Reg != X86::EFLAGS)
177         continue;
178 
179       // This terminator needs an eflags that is not defined
180       // by a previous another terminator:
181       // EFLAGS is live-in of the region composed by the terminators.
182       if (!MO.isDef())
183         return true;
184       // This terminator defines the eflags, i.e., we don't need to preserve it.
185       // However, we still need to check this specific terminator does not
186       // read a live-in value.
187       BreakNext = true;
188     }
189     // We found a definition of the eflags, no need to preserve them.
190     if (BreakNext)
191       return false;
192   }
193 
194   // None of the terminators use or define the eflags.
195   // Check if they are live-out, that would imply we need to preserve them.
196   for (const MachineBasicBlock *Succ : MBB.successors())
197     if (Succ->isLiveIn(X86::EFLAGS))
198       return true;
199 
200   return false;
201 }
202 
203 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
204 /// stack pointer by a constant value.
205 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
206                                     MachineBasicBlock::iterator &MBBI,
207                                     const DebugLoc &DL,
208                                     int64_t NumBytes, bool InEpilogue) const {
209   bool isSub = NumBytes < 0;
210   uint64_t Offset = isSub ? -NumBytes : NumBytes;
211   MachineInstr::MIFlag Flag =
212       isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy;
213 
214   uint64_t Chunk = (1LL << 31) - 1;
215 
216   MachineFunction &MF = *MBB.getParent();
217   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
218   const X86TargetLowering &TLI = *STI.getTargetLowering();
219   const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
220 
221   // It's ok to not take into account large chunks when probing, as the
222   // allocation is split in smaller chunks anyway.
223   if (EmitInlineStackProbe && !InEpilogue) {
224 
225     // This pseudo-instruction is going to be expanded, potentially using a
226     // loop, by inlineStackProbe().
227     BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset);
228     return;
229   } else if (Offset > Chunk) {
230     // Rather than emit a long series of instructions for large offsets,
231     // load the offset into a register and do one sub/add
232     unsigned Reg = 0;
233     unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
234 
235     if (isSub && !isEAXLiveIn(MBB))
236       Reg = Rax;
237     else
238       Reg = TRI->findDeadCallerSavedReg(MBB, MBBI);
239 
240     unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
241     unsigned AddSubRROpc =
242         isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit);
243     if (Reg) {
244       BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg)
245           .addImm(Offset)
246           .setMIFlag(Flag);
247       MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr)
248                              .addReg(StackPtr)
249                              .addReg(Reg);
250       MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
251       return;
252     } else if (Offset > 8 * Chunk) {
253       // If we would need more than 8 add or sub instructions (a >16GB stack
254       // frame), it's worth spilling RAX to materialize this immediate.
255       //   pushq %rax
256       //   movabsq +-$Offset+-SlotSize, %rax
257       //   addq %rsp, %rax
258       //   xchg %rax, (%rsp)
259       //   movq (%rsp), %rsp
260       assert(Is64Bit && "can't have 32-bit 16GB stack frame");
261       BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
262           .addReg(Rax, RegState::Kill)
263           .setMIFlag(Flag);
264       // Subtract is not commutative, so negate the offset and always use add.
265       // Subtract 8 less and add 8 more to account for the PUSH we just did.
266       if (isSub)
267         Offset = -(Offset - SlotSize);
268       else
269         Offset = Offset + SlotSize;
270       BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax)
271           .addImm(Offset)
272           .setMIFlag(Flag);
273       MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax)
274                              .addReg(Rax)
275                              .addReg(StackPtr);
276       MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
277       // Exchange the new SP in RAX with the top of the stack.
278       addRegOffset(
279           BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax),
280           StackPtr, false, 0);
281       // Load new SP from the top of the stack into RSP.
282       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr),
283                    StackPtr, false, 0);
284       return;
285     }
286   }
287 
288   while (Offset) {
289     uint64_t ThisVal = std::min(Offset, Chunk);
290     if (ThisVal == SlotSize) {
291       // Use push / pop for slot sized adjustments as a size optimization. We
292       // need to find a dead register when using pop.
293       unsigned Reg = isSub
294         ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
295         : TRI->findDeadCallerSavedReg(MBB, MBBI);
296       if (Reg) {
297         unsigned Opc = isSub
298           ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
299           : (Is64Bit ? X86::POP64r  : X86::POP32r);
300         BuildMI(MBB, MBBI, DL, TII.get(Opc))
301             .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub))
302             .setMIFlag(Flag);
303         Offset -= ThisVal;
304         continue;
305       }
306     }
307 
308     BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)
309         .setMIFlag(Flag);
310 
311     Offset -= ThisVal;
312   }
313 }
314 
315 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
316     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
317     const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
318   assert(Offset != 0 && "zero offset stack adjustment requested");
319 
320   // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
321   // is tricky.
322   bool UseLEA;
323   if (!InEpilogue) {
324     // Check if inserting the prologue at the beginning
325     // of MBB would require to use LEA operations.
326     // We need to use LEA operations if EFLAGS is live in, because
327     // it means an instruction will read it before it gets defined.
328     UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);
329   } else {
330     // If we can use LEA for SP but we shouldn't, check that none
331     // of the terminators uses the eflags. Otherwise we will insert
332     // a ADD that will redefine the eflags and break the condition.
333     // Alternatively, we could move the ADD, but this may not be possible
334     // and is an optimization anyway.
335     UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
336     if (UseLEA && !STI.useLeaForSP())
337       UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB);
338     // If that assert breaks, that means we do not do the right thing
339     // in canUseAsEpilogue.
340     assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) &&
341            "We shouldn't have allowed this insertion point");
342   }
343 
344   MachineInstrBuilder MI;
345   if (UseLEA) {
346     MI = addRegOffset(BuildMI(MBB, MBBI, DL,
347                               TII.get(getLEArOpcode(Uses64BitFramePtr)),
348                               StackPtr),
349                       StackPtr, false, Offset);
350   } else {
351     bool IsSub = Offset < 0;
352     uint64_t AbsOffset = IsSub ? -Offset : Offset;
353     const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
354                                : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
355     MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
356              .addReg(StackPtr)
357              .addImm(AbsOffset);
358     MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
359   }
360   return MI;
361 }
362 
363 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
364                                      MachineBasicBlock::iterator &MBBI,
365                                      bool doMergeWithPrevious) const {
366   if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
367       (!doMergeWithPrevious && MBBI == MBB.end()))
368     return 0;
369 
370   MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
371 
372   PI = skipDebugInstructionsBackward(PI, MBB.begin());
373   // It is assumed that ADD/SUB/LEA instruction is succeded by one CFI
374   // instruction, and that there are no DBG_VALUE or other instructions between
375   // ADD/SUB/LEA and its corresponding CFI instruction.
376   /* TODO: Add support for the case where there are multiple CFI instructions
377     below the ADD/SUB/LEA, e.g.:
378     ...
379     add
380     cfi_def_cfa_offset
381     cfi_offset
382     ...
383   */
384   if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction())
385     PI = std::prev(PI);
386 
387   unsigned Opc = PI->getOpcode();
388   int Offset = 0;
389 
390   if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
391        Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
392       PI->getOperand(0).getReg() == StackPtr){
393     assert(PI->getOperand(1).getReg() == StackPtr);
394     Offset = PI->getOperand(2).getImm();
395   } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
396              PI->getOperand(0).getReg() == StackPtr &&
397              PI->getOperand(1).getReg() == StackPtr &&
398              PI->getOperand(2).getImm() == 1 &&
399              PI->getOperand(3).getReg() == X86::NoRegister &&
400              PI->getOperand(5).getReg() == X86::NoRegister) {
401     // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
402     Offset = PI->getOperand(4).getImm();
403   } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
404               Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
405              PI->getOperand(0).getReg() == StackPtr) {
406     assert(PI->getOperand(1).getReg() == StackPtr);
407     Offset = -PI->getOperand(2).getImm();
408   } else
409     return 0;
410 
411   PI = MBB.erase(PI);
412   if (PI != MBB.end() && PI->isCFIInstruction()) {
413     auto CIs = MBB.getParent()->getFrameInstructions();
414     MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()];
415     if (CI.getOperation() == MCCFIInstruction::OpDefCfaOffset ||
416         CI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset)
417       PI = MBB.erase(PI);
418   }
419   if (!doMergeWithPrevious)
420     MBBI = skipDebugInstructionsForward(PI, MBB.end());
421 
422   return Offset;
423 }
424 
425 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
426                                 MachineBasicBlock::iterator MBBI,
427                                 const DebugLoc &DL,
428                                 const MCCFIInstruction &CFIInst) const {
429   MachineFunction &MF = *MBB.getParent();
430   unsigned CFIIndex = MF.addFrameInst(CFIInst);
431   BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
432       .addCFIIndex(CFIIndex);
433 }
434 
435 /// Emits Dwarf Info specifying offsets of callee saved registers and
436 /// frame pointer. This is called only when basic block sections are enabled.
437 void X86FrameLowering::emitCalleeSavedFrameMoves(
438     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
439   MachineFunction &MF = *MBB.getParent();
440   if (!hasFP(MF)) {
441     emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true);
442     return;
443   }
444   const MachineModuleInfo &MMI = MF.getMMI();
445   const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
446   const Register FramePtr = TRI->getFrameRegister(MF);
447   const Register MachineFramePtr =
448       STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64))
449                                : FramePtr;
450   unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true);
451   // Offset = space for return address + size of the frame pointer itself.
452   unsigned Offset = (Is64Bit ? 8 : 4) + (Uses64BitFramePtr ? 8 : 4);
453   BuildCFI(MBB, MBBI, DebugLoc{},
454            MCCFIInstruction::createOffset(nullptr, DwarfReg, -Offset));
455   emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true);
456 }
457 
458 void X86FrameLowering::emitCalleeSavedFrameMoves(
459     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
460     const DebugLoc &DL, bool IsPrologue) const {
461   MachineFunction &MF = *MBB.getParent();
462   MachineFrameInfo &MFI = MF.getFrameInfo();
463   MachineModuleInfo &MMI = MF.getMMI();
464   const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
465 
466   // Add callee saved registers to move list.
467   const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
468   if (CSI.empty()) return;
469 
470   // Calculate offsets.
471   for (std::vector<CalleeSavedInfo>::const_iterator
472          I = CSI.begin(), E = CSI.end(); I != E; ++I) {
473     int64_t Offset = MFI.getObjectOffset(I->getFrameIdx());
474     unsigned Reg = I->getReg();
475     unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
476 
477     if (IsPrologue) {
478       BuildCFI(MBB, MBBI, DL,
479                MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
480     } else {
481       BuildCFI(MBB, MBBI, DL,
482                MCCFIInstruction::createRestore(nullptr, DwarfReg));
483     }
484   }
485 }
486 
487 void X86FrameLowering::emitStackProbe(MachineFunction &MF,
488                                       MachineBasicBlock &MBB,
489                                       MachineBasicBlock::iterator MBBI,
490                                       const DebugLoc &DL, bool InProlog) const {
491   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
492   if (STI.isTargetWindowsCoreCLR()) {
493     if (InProlog) {
494       BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING))
495           .addImm(0 /* no explicit stack size */);
496     } else {
497       emitStackProbeInline(MF, MBB, MBBI, DL, false);
498     }
499   } else {
500     emitStackProbeCall(MF, MBB, MBBI, DL, InProlog);
501   }
502 }
503 
504 void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
505                                         MachineBasicBlock &PrologMBB) const {
506   auto Where = llvm::find_if(PrologMBB, [](MachineInstr &MI) {
507     return MI.getOpcode() == X86::STACKALLOC_W_PROBING;
508   });
509   if (Where != PrologMBB.end()) {
510     DebugLoc DL = PrologMBB.findDebugLoc(Where);
511     emitStackProbeInline(MF, PrologMBB, Where, DL, true);
512     Where->eraseFromParent();
513   }
514 }
515 
516 void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
517                                             MachineBasicBlock &MBB,
518                                             MachineBasicBlock::iterator MBBI,
519                                             const DebugLoc &DL,
520                                             bool InProlog) const {
521   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
522   if (STI.isTargetWindowsCoreCLR() && STI.is64Bit())
523     emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog);
524   else
525     emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog);
526 }
527 
528 void X86FrameLowering::emitStackProbeInlineGeneric(
529     MachineFunction &MF, MachineBasicBlock &MBB,
530     MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
531   MachineInstr &AllocWithProbe = *MBBI;
532   uint64_t Offset = AllocWithProbe.getOperand(0).getImm();
533 
534   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
535   const X86TargetLowering &TLI = *STI.getTargetLowering();
536   assert(!(STI.is64Bit() && STI.isTargetWindowsCoreCLR()) &&
537          "different expansion expected for CoreCLR 64 bit");
538 
539   const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
540   uint64_t ProbeChunk = StackProbeSize * 8;
541 
542   uint64_t MaxAlign =
543       TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0;
544 
545   // Synthesize a loop or unroll it, depending on the number of iterations.
546   // BuildStackAlignAND ensures that only MaxAlign % StackProbeSize bits left
547   // between the unaligned rsp and current rsp.
548   if (Offset > ProbeChunk) {
549     emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset,
550                                     MaxAlign % StackProbeSize);
551   } else {
552     emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset,
553                                      MaxAlign % StackProbeSize);
554   }
555 }
556 
557 void X86FrameLowering::emitStackProbeInlineGenericBlock(
558     MachineFunction &MF, MachineBasicBlock &MBB,
559     MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset,
560     uint64_t AlignOffset) const {
561 
562   const bool NeedsDwarfCFI = needsDwarfCFI(MF);
563   const bool HasFP = hasFP(MF);
564   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
565   const X86TargetLowering &TLI = *STI.getTargetLowering();
566   const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset);
567   const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
568   const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
569 
570   uint64_t CurrentOffset = 0;
571 
572   assert(AlignOffset < StackProbeSize);
573 
574   // If the offset is so small it fits within a page, there's nothing to do.
575   if (StackProbeSize < Offset + AlignOffset) {
576 
577     MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
578                            .addReg(StackPtr)
579                            .addImm(StackProbeSize - AlignOffset)
580                            .setMIFlag(MachineInstr::FrameSetup);
581     if (!HasFP && NeedsDwarfCFI) {
582       BuildCFI(MBB, MBBI, DL,
583                MCCFIInstruction::createAdjustCfaOffset(
584                    nullptr, StackProbeSize - AlignOffset));
585     }
586     MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
587 
588     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
589                      .setMIFlag(MachineInstr::FrameSetup),
590                  StackPtr, false, 0)
591         .addImm(0)
592         .setMIFlag(MachineInstr::FrameSetup);
593     NumFrameExtraProbe++;
594     CurrentOffset = StackProbeSize - AlignOffset;
595   }
596 
597   // For the next N - 1 pages, just probe. I tried to take advantage of
598   // natural probes but it implies much more logic and there was very few
599   // interesting natural probes to interleave.
600   while (CurrentOffset + StackProbeSize < Offset) {
601     MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
602                            .addReg(StackPtr)
603                            .addImm(StackProbeSize)
604                            .setMIFlag(MachineInstr::FrameSetup);
605     MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
606 
607     if (!HasFP && NeedsDwarfCFI) {
608       BuildCFI(
609           MBB, MBBI, DL,
610           MCCFIInstruction::createAdjustCfaOffset(nullptr, StackProbeSize));
611     }
612     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
613                      .setMIFlag(MachineInstr::FrameSetup),
614                  StackPtr, false, 0)
615         .addImm(0)
616         .setMIFlag(MachineInstr::FrameSetup);
617     NumFrameExtraProbe++;
618     CurrentOffset += StackProbeSize;
619   }
620 
621   // No need to probe the tail, it is smaller than a Page.
622   uint64_t ChunkSize = Offset - CurrentOffset;
623   MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
624                          .addReg(StackPtr)
625                          .addImm(ChunkSize)
626                          .setMIFlag(MachineInstr::FrameSetup);
627   // No need to adjust Dwarf CFA offset here, the last position of the stack has
628   // been defined
629   MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
630 }
631 
632 void X86FrameLowering::emitStackProbeInlineGenericLoop(
633     MachineFunction &MF, MachineBasicBlock &MBB,
634     MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset,
635     uint64_t AlignOffset) const {
636   assert(Offset && "null offset");
637 
638   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
639   const X86TargetLowering &TLI = *STI.getTargetLowering();
640   const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
641   const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
642 
643   if (AlignOffset) {
644     if (AlignOffset < StackProbeSize) {
645       // Perform a first smaller allocation followed by a probe.
646       const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, AlignOffset);
647       MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr)
648                              .addReg(StackPtr)
649                              .addImm(AlignOffset)
650                              .setMIFlag(MachineInstr::FrameSetup);
651       MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
652 
653       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
654                        .setMIFlag(MachineInstr::FrameSetup),
655                    StackPtr, false, 0)
656           .addImm(0)
657           .setMIFlag(MachineInstr::FrameSetup);
658       NumFrameExtraProbe++;
659       Offset -= AlignOffset;
660     }
661   }
662 
663   // Synthesize a loop
664   NumFrameLoopProbe++;
665   const BasicBlock *LLVM_BB = MBB.getBasicBlock();
666 
667   MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB);
668   MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB);
669 
670   MachineFunction::iterator MBBIter = ++MBB.getIterator();
671   MF.insert(MBBIter, testMBB);
672   MF.insert(MBBIter, tailMBB);
673 
674   Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : X86::R11D;
675   BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
676       .addReg(StackPtr)
677       .setMIFlag(MachineInstr::FrameSetup);
678 
679   // save loop bound
680   {
681     const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset);
682     BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed)
683         .addReg(FinalStackProbed)
684         .addImm(Offset / StackProbeSize * StackProbeSize)
685         .setMIFlag(MachineInstr::FrameSetup);
686   }
687 
688   // allocate a page
689   {
690     const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
691     BuildMI(testMBB, DL, TII.get(SUBOpc), StackPtr)
692         .addReg(StackPtr)
693         .addImm(StackProbeSize)
694         .setMIFlag(MachineInstr::FrameSetup);
695   }
696 
697   // touch the page
698   addRegOffset(BuildMI(testMBB, DL, TII.get(MovMIOpc))
699                    .setMIFlag(MachineInstr::FrameSetup),
700                StackPtr, false, 0)
701       .addImm(0)
702       .setMIFlag(MachineInstr::FrameSetup);
703 
704   // cmp with stack pointer bound
705   BuildMI(testMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
706       .addReg(StackPtr)
707       .addReg(FinalStackProbed)
708       .setMIFlag(MachineInstr::FrameSetup);
709 
710   // jump
711   BuildMI(testMBB, DL, TII.get(X86::JCC_1))
712       .addMBB(testMBB)
713       .addImm(X86::COND_NE)
714       .setMIFlag(MachineInstr::FrameSetup);
715   testMBB->addSuccessor(testMBB);
716   testMBB->addSuccessor(tailMBB);
717 
718   // BB management
719   tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end());
720   tailMBB->transferSuccessorsAndUpdatePHIs(&MBB);
721   MBB.addSuccessor(testMBB);
722 
723   // handle tail
724   unsigned TailOffset = Offset % StackProbeSize;
725   if (TailOffset) {
726     const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset);
727     BuildMI(*tailMBB, tailMBB->begin(), DL, TII.get(Opc), StackPtr)
728         .addReg(StackPtr)
729         .addImm(TailOffset)
730         .setMIFlag(MachineInstr::FrameSetup);
731   }
732 
733   // Update Live In information
734   recomputeLiveIns(*testMBB);
735   recomputeLiveIns(*tailMBB);
736 }
737 
738 void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
739     MachineFunction &MF, MachineBasicBlock &MBB,
740     MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
741   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
742   assert(STI.is64Bit() && "different expansion needed for 32 bit");
743   assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
744   const TargetInstrInfo &TII = *STI.getInstrInfo();
745   const BasicBlock *LLVM_BB = MBB.getBasicBlock();
746 
747   // RAX contains the number of bytes of desired stack adjustment.
748   // The handling here assumes this value has already been updated so as to
749   // maintain stack alignment.
750   //
751   // We need to exit with RSP modified by this amount and execute suitable
752   // page touches to notify the OS that we're growing the stack responsibly.
753   // All stack probing must be done without modifying RSP.
754   //
755   // MBB:
756   //    SizeReg = RAX;
757   //    ZeroReg = 0
758   //    CopyReg = RSP
759   //    Flags, TestReg = CopyReg - SizeReg
760   //    FinalReg = !Flags.Ovf ? TestReg : ZeroReg
761   //    LimitReg = gs magic thread env access
762   //    if FinalReg >= LimitReg goto ContinueMBB
763   // RoundBB:
764   //    RoundReg = page address of FinalReg
765   // LoopMBB:
766   //    LoopReg = PHI(LimitReg,ProbeReg)
767   //    ProbeReg = LoopReg - PageSize
768   //    [ProbeReg] = 0
769   //    if (ProbeReg > RoundReg) goto LoopMBB
770   // ContinueMBB:
771   //    RSP = RSP - RAX
772   //    [rest of original MBB]
773 
774   // Set up the new basic blocks
775   MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB);
776   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
777   MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB);
778 
779   MachineFunction::iterator MBBIter = std::next(MBB.getIterator());
780   MF.insert(MBBIter, RoundMBB);
781   MF.insert(MBBIter, LoopMBB);
782   MF.insert(MBBIter, ContinueMBB);
783 
784   // Split MBB and move the tail portion down to ContinueMBB.
785   MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI);
786   ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end());
787   ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB);
788 
789   // Some useful constants
790   const int64_t ThreadEnvironmentStackLimit = 0x10;
791   const int64_t PageSize = 0x1000;
792   const int64_t PageMask = ~(PageSize - 1);
793 
794   // Registers we need. For the normal case we use virtual
795   // registers. For the prolog expansion we use RAX, RCX and RDX.
796   MachineRegisterInfo &MRI = MF.getRegInfo();
797   const TargetRegisterClass *RegClass = &X86::GR64RegClass;
798   const Register SizeReg = InProlog ? X86::RAX
799                                     : MRI.createVirtualRegister(RegClass),
800                  ZeroReg = InProlog ? X86::RCX
801                                     : MRI.createVirtualRegister(RegClass),
802                  CopyReg = InProlog ? X86::RDX
803                                     : MRI.createVirtualRegister(RegClass),
804                  TestReg = InProlog ? X86::RDX
805                                     : MRI.createVirtualRegister(RegClass),
806                  FinalReg = InProlog ? X86::RDX
807                                      : MRI.createVirtualRegister(RegClass),
808                  RoundedReg = InProlog ? X86::RDX
809                                        : MRI.createVirtualRegister(RegClass),
810                  LimitReg = InProlog ? X86::RCX
811                                      : MRI.createVirtualRegister(RegClass),
812                  JoinReg = InProlog ? X86::RCX
813                                     : MRI.createVirtualRegister(RegClass),
814                  ProbeReg = InProlog ? X86::RCX
815                                      : MRI.createVirtualRegister(RegClass);
816 
817   // SP-relative offsets where we can save RCX and RDX.
818   int64_t RCXShadowSlot = 0;
819   int64_t RDXShadowSlot = 0;
820 
821   // If inlining in the prolog, save RCX and RDX.
822   if (InProlog) {
823     // Compute the offsets. We need to account for things already
824     // pushed onto the stack at this point: return address, frame
825     // pointer (if used), and callee saves.
826     X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
827     const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize();
828     const bool HasFP = hasFP(MF);
829 
830     // Check if we need to spill RCX and/or RDX.
831     // Here we assume that no earlier prologue instruction changes RCX and/or
832     // RDX, so checking the block live-ins is enough.
833     const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX);
834     const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX);
835     int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
836     // Assign the initial slot to both registers, then change RDX's slot if both
837     // need to be spilled.
838     if (IsRCXLiveIn)
839       RCXShadowSlot = InitSlot;
840     if (IsRDXLiveIn)
841       RDXShadowSlot = InitSlot;
842     if (IsRDXLiveIn && IsRCXLiveIn)
843       RDXShadowSlot += 8;
844     // Emit the saves if needed.
845     if (IsRCXLiveIn)
846       addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
847                    RCXShadowSlot)
848           .addReg(X86::RCX);
849     if (IsRDXLiveIn)
850       addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
851                    RDXShadowSlot)
852           .addReg(X86::RDX);
853   } else {
854     // Not in the prolog. Copy RAX to a virtual reg.
855     BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX);
856   }
857 
858   // Add code to MBB to check for overflow and set the new target stack pointer
859   // to zero if so.
860   BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg)
861       .addReg(ZeroReg, RegState::Undef)
862       .addReg(ZeroReg, RegState::Undef);
863   BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP);
864   BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg)
865       .addReg(CopyReg)
866       .addReg(SizeReg);
867   BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg)
868       .addReg(TestReg)
869       .addReg(ZeroReg)
870       .addImm(X86::COND_B);
871 
872   // FinalReg now holds final stack pointer value, or zero if
873   // allocation would overflow. Compare against the current stack
874   // limit from the thread environment block. Note this limit is the
875   // lowest touched page on the stack, not the point at which the OS
876   // will cause an overflow exception, so this is just an optimization
877   // to avoid unnecessarily touching pages that are below the current
878   // SP but already committed to the stack by the OS.
879   BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg)
880       .addReg(0)
881       .addImm(1)
882       .addReg(0)
883       .addImm(ThreadEnvironmentStackLimit)
884       .addReg(X86::GS);
885   BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg);
886   // Jump if the desired stack pointer is at or above the stack limit.
887   BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE);
888 
889   // Add code to roundMBB to round the final stack pointer to a page boundary.
890   RoundMBB->addLiveIn(FinalReg);
891   BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
892       .addReg(FinalReg)
893       .addImm(PageMask);
894   BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB);
895 
896   // LimitReg now holds the current stack limit, RoundedReg page-rounded
897   // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page
898   // and probe until we reach RoundedReg.
899   if (!InProlog) {
900     BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
901         .addReg(LimitReg)
902         .addMBB(RoundMBB)
903         .addReg(ProbeReg)
904         .addMBB(LoopMBB);
905   }
906 
907   LoopMBB->addLiveIn(JoinReg);
908   addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg,
909                false, -PageSize);
910 
911   // Probe by storing a byte onto the stack.
912   BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi))
913       .addReg(ProbeReg)
914       .addImm(1)
915       .addReg(0)
916       .addImm(0)
917       .addReg(0)
918       .addImm(0);
919 
920   LoopMBB->addLiveIn(RoundedReg);
921   BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr))
922       .addReg(RoundedReg)
923       .addReg(ProbeReg);
924   BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE);
925 
926   MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI();
927 
928   // If in prolog, restore RDX and RCX.
929   if (InProlog) {
930     if (RCXShadowSlot) // It means we spilled RCX in the prologue.
931       addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
932                            TII.get(X86::MOV64rm), X86::RCX),
933                    X86::RSP, false, RCXShadowSlot);
934     if (RDXShadowSlot) // It means we spilled RDX in the prologue.
935       addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
936                            TII.get(X86::MOV64rm), X86::RDX),
937                    X86::RSP, false, RDXShadowSlot);
938   }
939 
940   // Now that the probing is done, add code to continueMBB to update
941   // the stack pointer for real.
942   ContinueMBB->addLiveIn(SizeReg);
943   BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
944       .addReg(X86::RSP)
945       .addReg(SizeReg);
946 
947   // Add the control flow edges we need.
948   MBB.addSuccessor(ContinueMBB);
949   MBB.addSuccessor(RoundMBB);
950   RoundMBB->addSuccessor(LoopMBB);
951   LoopMBB->addSuccessor(ContinueMBB);
952   LoopMBB->addSuccessor(LoopMBB);
953 
954   // Mark all the instructions added to the prolog as frame setup.
955   if (InProlog) {
956     for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
957       BeforeMBBI->setFlag(MachineInstr::FrameSetup);
958     }
959     for (MachineInstr &MI : *RoundMBB) {
960       MI.setFlag(MachineInstr::FrameSetup);
961     }
962     for (MachineInstr &MI : *LoopMBB) {
963       MI.setFlag(MachineInstr::FrameSetup);
964     }
965     for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin();
966          CMBBI != ContinueMBBI; ++CMBBI) {
967       CMBBI->setFlag(MachineInstr::FrameSetup);
968     }
969   }
970 }
971 
972 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
973                                           MachineBasicBlock &MBB,
974                                           MachineBasicBlock::iterator MBBI,
975                                           const DebugLoc &DL,
976                                           bool InProlog) const {
977   bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
978 
979   // FIXME: Add indirect thunk support and remove this.
980   if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls())
981     report_fatal_error("Emitting stack probe calls on 64-bit with the large "
982                        "code model and indirect thunks not yet implemented.");
983 
984   unsigned CallOp;
985   if (Is64Bit)
986     CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
987   else
988     CallOp = X86::CALLpcrel32;
989 
990   StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF);
991 
992   MachineInstrBuilder CI;
993   MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI);
994 
995   // All current stack probes take AX and SP as input, clobber flags, and
996   // preserve all registers. x86_64 probes leave RSP unmodified.
997   if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
998     // For the large code model, we have to call through a register. Use R11,
999     // as it is scratch in all supported calling conventions.
1000     BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
1001         .addExternalSymbol(MF.createExternalSymbolName(Symbol));
1002     CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
1003   } else {
1004     CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp))
1005         .addExternalSymbol(MF.createExternalSymbolName(Symbol));
1006   }
1007 
1008   unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX;
1009   unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP;
1010   CI.addReg(AX, RegState::Implicit)
1011       .addReg(SP, RegState::Implicit)
1012       .addReg(AX, RegState::Define | RegState::Implicit)
1013       .addReg(SP, RegState::Define | RegState::Implicit)
1014       .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
1015 
1016   if (STI.isTargetWin64() || !STI.isOSWindows()) {
1017     // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves.
1018     // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
1019     // themselves. They also does not clobber %rax so we can reuse it when
1020     // adjusting %rsp.
1021     // All other platforms do not specify a particular ABI for the stack probe
1022     // function, so we arbitrarily define it to not adjust %esp/%rsp itself.
1023     BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP)
1024         .addReg(SP)
1025         .addReg(AX);
1026   }
1027 
1028   if (InProlog) {
1029     // Apply the frame setup flag to all inserted instrs.
1030     for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
1031       ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
1032   }
1033 }
1034 
1035 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
1036   // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
1037   // and might require smaller successive adjustments.
1038   const uint64_t Win64MaxSEHOffset = 128;
1039   uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
1040   // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
1041   return SEHFrameOffset & -16;
1042 }
1043 
1044 // If we're forcing a stack realignment we can't rely on just the frame
1045 // info, we need to know the ABI stack alignment as well in case we
1046 // have a call out.  Otherwise just make sure we have some alignment - we'll
1047 // go with the minimum SlotSize.
1048 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
1049   const MachineFrameInfo &MFI = MF.getFrameInfo();
1050   Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment.
1051   Align StackAlign = getStackAlign();
1052   if (MF.getFunction().hasFnAttribute("stackrealign")) {
1053     if (MFI.hasCalls())
1054       MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
1055     else if (MaxAlign < SlotSize)
1056       MaxAlign = Align(SlotSize);
1057   }
1058   return MaxAlign.value();
1059 }
1060 
1061 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
1062                                           MachineBasicBlock::iterator MBBI,
1063                                           const DebugLoc &DL, unsigned Reg,
1064                                           uint64_t MaxAlign) const {
1065   uint64_t Val = -MaxAlign;
1066   unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
1067 
1068   MachineFunction &MF = *MBB.getParent();
1069   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
1070   const X86TargetLowering &TLI = *STI.getTargetLowering();
1071   const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
1072   const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
1073 
1074   // We want to make sure that (in worst case) less than StackProbeSize bytes
1075   // are not probed after the AND. This assumption is used in
1076   // emitStackProbeInlineGeneric.
1077   if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) {
1078     {
1079       NumFrameLoopProbe++;
1080       MachineBasicBlock *entryMBB =
1081           MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1082       MachineBasicBlock *headMBB =
1083           MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1084       MachineBasicBlock *bodyMBB =
1085           MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1086       MachineBasicBlock *footMBB =
1087           MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1088 
1089       MachineFunction::iterator MBBIter = MBB.getIterator();
1090       MF.insert(MBBIter, entryMBB);
1091       MF.insert(MBBIter, headMBB);
1092       MF.insert(MBBIter, bodyMBB);
1093       MF.insert(MBBIter, footMBB);
1094       const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
1095       Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : X86::R11D;
1096 
1097       // Setup entry block
1098       {
1099 
1100         entryMBB->splice(entryMBB->end(), &MBB, MBB.begin(), MBBI);
1101         BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
1102             .addReg(StackPtr)
1103             .setMIFlag(MachineInstr::FrameSetup);
1104         MachineInstr *MI =
1105             BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed)
1106                 .addReg(FinalStackProbed)
1107                 .addImm(Val)
1108                 .setMIFlag(MachineInstr::FrameSetup);
1109 
1110         // The EFLAGS implicit def is dead.
1111         MI->getOperand(3).setIsDead();
1112 
1113         BuildMI(entryMBB, DL,
1114                 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
1115             .addReg(FinalStackProbed)
1116             .addReg(StackPtr)
1117             .setMIFlag(MachineInstr::FrameSetup);
1118         BuildMI(entryMBB, DL, TII.get(X86::JCC_1))
1119             .addMBB(&MBB)
1120             .addImm(X86::COND_E)
1121             .setMIFlag(MachineInstr::FrameSetup);
1122         entryMBB->addSuccessor(headMBB);
1123         entryMBB->addSuccessor(&MBB);
1124       }
1125 
1126       // Loop entry block
1127 
1128       {
1129         const unsigned SUBOpc =
1130             getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
1131         BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr)
1132             .addReg(StackPtr)
1133             .addImm(StackProbeSize)
1134             .setMIFlag(MachineInstr::FrameSetup);
1135 
1136         BuildMI(headMBB, DL,
1137                 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
1138             .addReg(FinalStackProbed)
1139             .addReg(StackPtr)
1140             .setMIFlag(MachineInstr::FrameSetup);
1141 
1142         // jump
1143         BuildMI(headMBB, DL, TII.get(X86::JCC_1))
1144             .addMBB(footMBB)
1145             .addImm(X86::COND_B)
1146             .setMIFlag(MachineInstr::FrameSetup);
1147 
1148         headMBB->addSuccessor(bodyMBB);
1149         headMBB->addSuccessor(footMBB);
1150       }
1151 
1152       // setup loop body
1153       {
1154         addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc))
1155                          .setMIFlag(MachineInstr::FrameSetup),
1156                      StackPtr, false, 0)
1157             .addImm(0)
1158             .setMIFlag(MachineInstr::FrameSetup);
1159 
1160         const unsigned SUBOpc =
1161             getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
1162         BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr)
1163             .addReg(StackPtr)
1164             .addImm(StackProbeSize)
1165             .setMIFlag(MachineInstr::FrameSetup);
1166 
1167         // cmp with stack pointer bound
1168         BuildMI(bodyMBB, DL,
1169                 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
1170             .addReg(FinalStackProbed)
1171             .addReg(StackPtr)
1172             .setMIFlag(MachineInstr::FrameSetup);
1173 
1174         // jump
1175         BuildMI(bodyMBB, DL, TII.get(X86::JCC_1))
1176             .addMBB(bodyMBB)
1177             .addImm(X86::COND_B)
1178             .setMIFlag(MachineInstr::FrameSetup);
1179         bodyMBB->addSuccessor(bodyMBB);
1180         bodyMBB->addSuccessor(footMBB);
1181       }
1182 
1183       // setup loop footer
1184       {
1185         BuildMI(footMBB, DL, TII.get(TargetOpcode::COPY), StackPtr)
1186             .addReg(FinalStackProbed)
1187             .setMIFlag(MachineInstr::FrameSetup);
1188         addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc))
1189                          .setMIFlag(MachineInstr::FrameSetup),
1190                      StackPtr, false, 0)
1191             .addImm(0)
1192             .setMIFlag(MachineInstr::FrameSetup);
1193         footMBB->addSuccessor(&MBB);
1194       }
1195 
1196       recomputeLiveIns(*headMBB);
1197       recomputeLiveIns(*bodyMBB);
1198       recomputeLiveIns(*footMBB);
1199       recomputeLiveIns(MBB);
1200     }
1201   } else {
1202     MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
1203                            .addReg(Reg)
1204                            .addImm(Val)
1205                            .setMIFlag(MachineInstr::FrameSetup);
1206 
1207     // The EFLAGS implicit def is dead.
1208     MI->getOperand(3).setIsDead();
1209   }
1210 }
1211 
1212 bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const {
1213   // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be
1214   // clobbered by any interrupt handler.
1215   assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
1216          "MF used frame lowering for wrong subtarget");
1217   const Function &Fn = MF.getFunction();
1218   const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
1219   return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone);
1220 }
1221 
1222 bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const {
1223   return MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1224 }
1225 
1226 bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const {
1227   return !isWin64Prologue(MF) && MF.needsFrameMoves();
1228 }
1229 
1230 /// emitPrologue - Push callee-saved registers onto the stack, which
1231 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
1232 /// space for local variables. Also emit labels used by the exception handler to
1233 /// generate the exception handling frames.
1234 
1235 /*
1236   Here's a gist of what gets emitted:
1237 
1238   ; Establish frame pointer, if needed
1239   [if needs FP]
1240       push  %rbp
1241       .cfi_def_cfa_offset 16
1242       .cfi_offset %rbp, -16
1243       .seh_pushreg %rpb
1244       mov  %rsp, %rbp
1245       .cfi_def_cfa_register %rbp
1246 
1247   ; Spill general-purpose registers
1248   [for all callee-saved GPRs]
1249       pushq %<reg>
1250       [if not needs FP]
1251          .cfi_def_cfa_offset (offset from RETADDR)
1252       .seh_pushreg %<reg>
1253 
1254   ; If the required stack alignment > default stack alignment
1255   ; rsp needs to be re-aligned.  This creates a "re-alignment gap"
1256   ; of unknown size in the stack frame.
1257   [if stack needs re-alignment]
1258       and  $MASK, %rsp
1259 
1260   ; Allocate space for locals
1261   [if target is Windows and allocated space > 4096 bytes]
1262       ; Windows needs special care for allocations larger
1263       ; than one page.
1264       mov $NNN, %rax
1265       call ___chkstk_ms/___chkstk
1266       sub  %rax, %rsp
1267   [else]
1268       sub  $NNN, %rsp
1269 
1270   [if needs FP]
1271       .seh_stackalloc (size of XMM spill slots)
1272       .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
1273   [else]
1274       .seh_stackalloc NNN
1275 
1276   ; Spill XMMs
1277   ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
1278   ; they may get spilled on any platform, if the current function
1279   ; calls @llvm.eh.unwind.init
1280   [if needs FP]
1281       [for all callee-saved XMM registers]
1282           movaps  %<xmm reg>, -MMM(%rbp)
1283       [for all callee-saved XMM registers]
1284           .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
1285               ; i.e. the offset relative to (%rbp - SEHFrameOffset)
1286   [else]
1287       [for all callee-saved XMM registers]
1288           movaps  %<xmm reg>, KKK(%rsp)
1289       [for all callee-saved XMM registers]
1290           .seh_savexmm %<xmm reg>, KKK
1291 
1292   .seh_endprologue
1293 
1294   [if needs base pointer]
1295       mov  %rsp, %rbx
1296       [if needs to restore base pointer]
1297           mov %rsp, -MMM(%rbp)
1298 
1299   ; Emit CFI info
1300   [if needs FP]
1301       [for all callee-saved registers]
1302           .cfi_offset %<reg>, (offset from %rbp)
1303   [else]
1304        .cfi_def_cfa_offset (offset from RETADDR)
1305       [for all callee-saved registers]
1306           .cfi_offset %<reg>, (offset from %rsp)
1307 
1308   Notes:
1309   - .seh directives are emitted only for Windows 64 ABI
1310   - .cv_fpo directives are emitted on win32 when emitting CodeView
1311   - .cfi directives are emitted for all other ABIs
1312   - for 32-bit code, substitute %e?? registers for %r??
1313 */
1314 
1315 void X86FrameLowering::emitPrologue(MachineFunction &MF,
1316                                     MachineBasicBlock &MBB) const {
1317   assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
1318          "MF used frame lowering for wrong subtarget");
1319   MachineBasicBlock::iterator MBBI = MBB.begin();
1320   MachineFrameInfo &MFI = MF.getFrameInfo();
1321   const Function &Fn = MF.getFunction();
1322   MachineModuleInfo &MMI = MF.getMMI();
1323   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1324   uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
1325   uint64_t StackSize = MFI.getStackSize();    // Number of bytes to allocate.
1326   bool IsFunclet = MBB.isEHFuncletEntry();
1327   EHPersonality Personality = EHPersonality::Unknown;
1328   if (Fn.hasPersonalityFn())
1329     Personality = classifyEHPersonality(Fn.getPersonalityFn());
1330   bool FnHasClrFunclet =
1331       MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
1332   bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
1333   bool HasFP = hasFP(MF);
1334   bool IsWin64Prologue = isWin64Prologue(MF);
1335   bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
1336   // FIXME: Emit FPO data for EH funclets.
1337   bool NeedsWinFPO =
1338       !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
1339   bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
1340   bool NeedsDwarfCFI = needsDwarfCFI(MF);
1341   Register FramePtr = TRI->getFrameRegister(MF);
1342   const Register MachineFramePtr =
1343       STI.isTarget64BitILP32()
1344           ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr;
1345   Register BasePtr = TRI->getBaseRegister();
1346   bool HasWinCFI = false;
1347 
1348   // Debug location must be unknown since the first debug location is used
1349   // to determine the end of the prologue.
1350   DebugLoc DL;
1351 
1352   // Add RETADDR move area to callee saved frame size.
1353   int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1354   if (TailCallReturnAddrDelta && IsWin64Prologue)
1355     report_fatal_error("Can't handle guaranteed tail call under win64 yet");
1356 
1357   if (TailCallReturnAddrDelta < 0)
1358     X86FI->setCalleeSavedFrameSize(
1359       X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
1360 
1361   const bool EmitStackProbeCall =
1362       STI.getTargetLowering()->hasStackProbeSymbol(MF);
1363   unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF);
1364 
1365   if (HasFP && X86FI->hasSwiftAsyncContext()) {
1366     BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8),
1367             MachineFramePtr)
1368         .addUse(MachineFramePtr)
1369         .addImm(60)
1370         .setMIFlag(MachineInstr::FrameSetup);
1371   }
1372 
1373   // Re-align the stack on 64-bit if the x86-interrupt calling convention is
1374   // used and an error code was pushed, since the x86-64 ABI requires a 16-byte
1375   // stack alignment.
1376   if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
1377       Fn.arg_size() == 2) {
1378     StackSize += 8;
1379     MFI.setStackSize(StackSize);
1380     emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false);
1381   }
1382 
1383   // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
1384   // function, and use up to 128 bytes of stack space, don't have a frame
1385   // pointer, calls, or dynamic alloca then we do not need to adjust the
1386   // stack pointer (we fit in the Red Zone). We also check that we don't
1387   // push and pop from the stack.
1388   if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) &&
1389       !MFI.hasVarSizedObjects() &&             // No dynamic alloca.
1390       !MFI.adjustsStack() &&                   // No calls.
1391       !EmitStackProbeCall &&                   // No stack probes.
1392       !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop.
1393       !MF.shouldSplitStack()) {                // Regular stack
1394     uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
1395     if (HasFP) MinSize += SlotSize;
1396     X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
1397     StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
1398     MFI.setStackSize(StackSize);
1399   }
1400 
1401   // Insert stack pointer adjustment for later moving of return addr.  Only
1402   // applies to tail call optimized functions where the callee argument stack
1403   // size is bigger than the callers.
1404   if (TailCallReturnAddrDelta < 0) {
1405     BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
1406                          /*InEpilogue=*/false)
1407         .setMIFlag(MachineInstr::FrameSetup);
1408   }
1409 
1410   // Mapping for machine moves:
1411   //
1412   //   DST: VirtualFP AND
1413   //        SRC: VirtualFP              => DW_CFA_def_cfa_offset
1414   //        ELSE                        => DW_CFA_def_cfa
1415   //
1416   //   SRC: VirtualFP AND
1417   //        DST: Register               => DW_CFA_def_cfa_register
1418   //
1419   //   ELSE
1420   //        OFFSET < 0                  => DW_CFA_offset_extended_sf
1421   //        REG < 64                    => DW_CFA_offset + Reg
1422   //        ELSE                        => DW_CFA_offset_extended
1423 
1424   uint64_t NumBytes = 0;
1425   int stackGrowth = -SlotSize;
1426 
1427   // Find the funclet establisher parameter
1428   Register Establisher = X86::NoRegister;
1429   if (IsClrFunclet)
1430     Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
1431   else if (IsFunclet)
1432     Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
1433 
1434   if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1435     // Immediately spill establisher into the home slot.
1436     // The runtime cares about this.
1437     // MOV64mr %rdx, 16(%rsp)
1438     unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1439     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
1440         .addReg(Establisher)
1441         .setMIFlag(MachineInstr::FrameSetup);
1442     MBB.addLiveIn(Establisher);
1443   }
1444 
1445   if (HasFP) {
1446     assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved");
1447 
1448     // Calculate required stack adjustment.
1449     uint64_t FrameSize = StackSize - SlotSize;
1450     // If required, include space for extra hidden slot for stashing base pointer.
1451     if (X86FI->getRestoreBasePointer())
1452       FrameSize += SlotSize;
1453 
1454     NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
1455 
1456     // Callee-saved registers are pushed on stack before the stack is realigned.
1457     if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
1458       NumBytes = alignTo(NumBytes, MaxAlign);
1459 
1460     // Save EBP/RBP into the appropriate stack slot.
1461     BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
1462       .addReg(MachineFramePtr, RegState::Kill)
1463       .setMIFlag(MachineInstr::FrameSetup);
1464 
1465     if (NeedsDwarfCFI) {
1466       // Mark the place where EBP/RBP was saved.
1467       // Define the current CFA rule to use the provided offset.
1468       assert(StackSize);
1469       BuildCFI(MBB, MBBI, DL,
1470                MCCFIInstruction::cfiDefCfaOffset(nullptr, -2 * stackGrowth));
1471 
1472       // Change the rule for the FramePtr to be an "offset" rule.
1473       unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1474       BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
1475                                   nullptr, DwarfFramePtr, 2 * stackGrowth));
1476     }
1477 
1478     if (NeedsWinCFI) {
1479       HasWinCFI = true;
1480       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1481           .addImm(FramePtr)
1482           .setMIFlag(MachineInstr::FrameSetup);
1483     }
1484 
1485     if (!IsFunclet) {
1486       if (X86FI->hasSwiftAsyncContext()) {
1487         const auto &Attrs = MF.getFunction().getAttributes();
1488 
1489         // Before we update the live frame pointer we have to ensure there's a
1490         // valid (or null) asynchronous context in its slot just before FP in
1491         // the frame record, so store it now.
1492         if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) {
1493           // We have an initial context in r14, store it just before the frame
1494           // pointer.
1495           MBB.addLiveIn(X86::R14);
1496           BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
1497               .addReg(X86::R14)
1498               .setMIFlag(MachineInstr::FrameSetup);
1499         } else {
1500           // No initial context, store null so that there's no pointer that
1501           // could be misused.
1502           BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64i8))
1503               .addImm(0)
1504               .setMIFlag(MachineInstr::FrameSetup);
1505         }
1506 
1507         if (NeedsWinCFI) {
1508           HasWinCFI = true;
1509           BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1510               .addImm(X86::R14)
1511               .setMIFlag(MachineInstr::FrameSetup);
1512         }
1513 
1514         BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr)
1515             .addUse(X86::RSP)
1516             .addImm(1)
1517             .addUse(X86::NoRegister)
1518             .addImm(8)
1519             .addUse(X86::NoRegister)
1520             .setMIFlag(MachineInstr::FrameSetup);
1521         BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri8), X86::RSP)
1522             .addUse(X86::RSP)
1523             .addImm(8)
1524             .setMIFlag(MachineInstr::FrameSetup);
1525       }
1526 
1527       if (!IsWin64Prologue && !IsFunclet) {
1528         // Update EBP with the new base value.
1529         if (!X86FI->hasSwiftAsyncContext())
1530           BuildMI(MBB, MBBI, DL,
1531                   TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
1532                   FramePtr)
1533               .addReg(StackPtr)
1534               .setMIFlag(MachineInstr::FrameSetup);
1535 
1536         if (NeedsDwarfCFI) {
1537           // Mark effective beginning of when frame pointer becomes valid.
1538           // Define the current CFA to use the EBP/RBP register.
1539           unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1540           BuildCFI(
1541               MBB, MBBI, DL,
1542               MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
1543         }
1544 
1545         if (NeedsWinFPO) {
1546           // .cv_fpo_setframe $FramePtr
1547           HasWinCFI = true;
1548           BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1549               .addImm(FramePtr)
1550               .addImm(0)
1551               .setMIFlag(MachineInstr::FrameSetup);
1552         }
1553       }
1554     }
1555   } else {
1556     assert(!IsFunclet && "funclets without FPs not yet implemented");
1557     NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
1558   }
1559 
1560   // Update the offset adjustment, which is mainly used by codeview to translate
1561   // from ESP to VFRAME relative local variable offsets.
1562   if (!IsFunclet) {
1563     if (HasFP && TRI->hasStackRealignment(MF))
1564       MFI.setOffsetAdjustment(-NumBytes);
1565     else
1566       MFI.setOffsetAdjustment(-StackSize);
1567   }
1568 
1569   // For EH funclets, only allocate enough space for outgoing calls. Save the
1570   // NumBytes value that we would've used for the parent frame.
1571   unsigned ParentFrameNumBytes = NumBytes;
1572   if (IsFunclet)
1573     NumBytes = getWinEHFuncletFrameSize(MF);
1574 
1575   // Skip the callee-saved push instructions.
1576   bool PushedRegs = false;
1577   int StackOffset = 2 * stackGrowth;
1578 
1579   while (MBBI != MBB.end() &&
1580          MBBI->getFlag(MachineInstr::FrameSetup) &&
1581          (MBBI->getOpcode() == X86::PUSH32r ||
1582           MBBI->getOpcode() == X86::PUSH64r)) {
1583     PushedRegs = true;
1584     Register Reg = MBBI->getOperand(0).getReg();
1585     ++MBBI;
1586 
1587     if (!HasFP && NeedsDwarfCFI) {
1588       // Mark callee-saved push instruction.
1589       // Define the current CFA rule to use the provided offset.
1590       assert(StackSize);
1591       BuildCFI(MBB, MBBI, DL,
1592                MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackOffset));
1593       StackOffset += stackGrowth;
1594     }
1595 
1596     if (NeedsWinCFI) {
1597       HasWinCFI = true;
1598       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1599           .addImm(Reg)
1600           .setMIFlag(MachineInstr::FrameSetup);
1601     }
1602   }
1603 
1604   // Realign stack after we pushed callee-saved registers (so that we'll be
1605   // able to calculate their offsets from the frame pointer).
1606   // Don't do this for Win64, it needs to realign the stack after the prologue.
1607   if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) {
1608     assert(HasFP && "There should be a frame pointer if stack is realigned.");
1609     BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
1610 
1611     if (NeedsWinCFI) {
1612       HasWinCFI = true;
1613       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign))
1614           .addImm(MaxAlign)
1615           .setMIFlag(MachineInstr::FrameSetup);
1616     }
1617   }
1618 
1619   // If there is an SUB32ri of ESP immediately before this instruction, merge
1620   // the two. This can be the case when tail call elimination is enabled and
1621   // the callee has more arguments then the caller.
1622   NumBytes -= mergeSPUpdates(MBB, MBBI, true);
1623 
1624   // Adjust stack pointer: ESP -= numbytes.
1625 
1626   // Windows and cygwin/mingw require a prologue helper routine when allocating
1627   // more than 4K bytes on the stack.  Windows uses __chkstk and cygwin/mingw
1628   // uses __alloca.  __alloca and the 32-bit version of __chkstk will probe the
1629   // stack and adjust the stack pointer in one go.  The 64-bit version of
1630   // __chkstk is only responsible for probing the stack.  The 64-bit prologue is
1631   // responsible for adjusting the stack pointer.  Touching the stack at 4K
1632   // increments is necessary to ensure that the guard pages used by the OS
1633   // virtual memory manager are allocated in correct sequence.
1634   uint64_t AlignedNumBytes = NumBytes;
1635   if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF))
1636     AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
1637   if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) {
1638     assert(!X86FI->getUsesRedZone() &&
1639            "The Red Zone is not accounted for in stack probes");
1640 
1641     // Check whether EAX is livein for this block.
1642     bool isEAXAlive = isEAXLiveIn(MBB);
1643 
1644     if (isEAXAlive) {
1645       if (Is64Bit) {
1646         // Save RAX
1647         BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
1648           .addReg(X86::RAX, RegState::Kill)
1649           .setMIFlag(MachineInstr::FrameSetup);
1650       } else {
1651         // Save EAX
1652         BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
1653           .addReg(X86::EAX, RegState::Kill)
1654           .setMIFlag(MachineInstr::FrameSetup);
1655       }
1656     }
1657 
1658     if (Is64Bit) {
1659       // Handle the 64-bit Windows ABI case where we need to call __chkstk.
1660       // Function prologue is responsible for adjusting the stack pointer.
1661       int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
1662       if (isUInt<32>(Alloc)) {
1663         BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1664             .addImm(Alloc)
1665             .setMIFlag(MachineInstr::FrameSetup);
1666       } else if (isInt<32>(Alloc)) {
1667         BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
1668             .addImm(Alloc)
1669             .setMIFlag(MachineInstr::FrameSetup);
1670       } else {
1671         BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
1672             .addImm(Alloc)
1673             .setMIFlag(MachineInstr::FrameSetup);
1674       }
1675     } else {
1676       // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
1677       // We'll also use 4 already allocated bytes for EAX.
1678       BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1679           .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
1680           .setMIFlag(MachineInstr::FrameSetup);
1681     }
1682 
1683     // Call __chkstk, __chkstk_ms, or __alloca.
1684     emitStackProbe(MF, MBB, MBBI, DL, true);
1685 
1686     if (isEAXAlive) {
1687       // Restore RAX/EAX
1688       MachineInstr *MI;
1689       if (Is64Bit)
1690         MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX),
1691                           StackPtr, false, NumBytes - 8);
1692       else
1693         MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX),
1694                           StackPtr, false, NumBytes - 4);
1695       MI->setFlag(MachineInstr::FrameSetup);
1696       MBB.insert(MBBI, MI);
1697     }
1698   } else if (NumBytes) {
1699     emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false);
1700   }
1701 
1702   if (NeedsWinCFI && NumBytes) {
1703     HasWinCFI = true;
1704     BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
1705         .addImm(NumBytes)
1706         .setMIFlag(MachineInstr::FrameSetup);
1707   }
1708 
1709   int SEHFrameOffset = 0;
1710   unsigned SPOrEstablisher;
1711   if (IsFunclet) {
1712     if (IsClrFunclet) {
1713       // The establisher parameter passed to a CLR funclet is actually a pointer
1714       // to the (mostly empty) frame of its nearest enclosing funclet; we have
1715       // to find the root function establisher frame by loading the PSPSym from
1716       // the intermediate frame.
1717       unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1718       MachinePointerInfo NoInfo;
1719       MBB.addLiveIn(Establisher);
1720       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher),
1721                    Establisher, false, PSPSlotOffset)
1722           .addMemOperand(MF.getMachineMemOperand(
1723               NoInfo, MachineMemOperand::MOLoad, SlotSize, Align(SlotSize)));
1724       ;
1725       // Save the root establisher back into the current funclet's (mostly
1726       // empty) frame, in case a sub-funclet or the GC needs it.
1727       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr,
1728                    false, PSPSlotOffset)
1729           .addReg(Establisher)
1730           .addMemOperand(MF.getMachineMemOperand(
1731               NoInfo,
1732               MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
1733               SlotSize, Align(SlotSize)));
1734     }
1735     SPOrEstablisher = Establisher;
1736   } else {
1737     SPOrEstablisher = StackPtr;
1738   }
1739 
1740   if (IsWin64Prologue && HasFP) {
1741     // Set RBP to a small fixed offset from RSP. In the funclet case, we base
1742     // this calculation on the incoming establisher, which holds the value of
1743     // RSP from the parent frame at the end of the prologue.
1744     SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
1745     if (SEHFrameOffset)
1746       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
1747                    SPOrEstablisher, false, SEHFrameOffset);
1748     else
1749       BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
1750           .addReg(SPOrEstablisher);
1751 
1752     // If this is not a funclet, emit the CFI describing our frame pointer.
1753     if (NeedsWinCFI && !IsFunclet) {
1754       assert(!NeedsWinFPO && "this setframe incompatible with FPO data");
1755       HasWinCFI = true;
1756       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1757           .addImm(FramePtr)
1758           .addImm(SEHFrameOffset)
1759           .setMIFlag(MachineInstr::FrameSetup);
1760       if (isAsynchronousEHPersonality(Personality))
1761         MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset;
1762     }
1763   } else if (IsFunclet && STI.is32Bit()) {
1764     // Reset EBP / ESI to something good for funclets.
1765     MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
1766     // If we're a catch funclet, we can be returned to via catchret. Save ESP
1767     // into the registration node so that the runtime will restore it for us.
1768     if (!MBB.isCleanupFuncletEntry()) {
1769       assert(Personality == EHPersonality::MSVC_CXX);
1770       Register FrameReg;
1771       int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex;
1772       int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg).getFixed();
1773       // ESP is the first field, so no extra displacement is needed.
1774       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg,
1775                    false, EHRegOffset)
1776           .addReg(X86::ESP);
1777     }
1778   }
1779 
1780   while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
1781     const MachineInstr &FrameInstr = *MBBI;
1782     ++MBBI;
1783 
1784     if (NeedsWinCFI) {
1785       int FI;
1786       if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
1787         if (X86::FR64RegClass.contains(Reg)) {
1788           int Offset;
1789           Register IgnoredFrameReg;
1790           if (IsWin64Prologue && IsFunclet)
1791             Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg);
1792           else
1793             Offset =
1794                 getFrameIndexReference(MF, FI, IgnoredFrameReg).getFixed() +
1795                 SEHFrameOffset;
1796 
1797           HasWinCFI = true;
1798           assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");
1799           BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
1800               .addImm(Reg)
1801               .addImm(Offset)
1802               .setMIFlag(MachineInstr::FrameSetup);
1803         }
1804       }
1805     }
1806   }
1807 
1808   if (NeedsWinCFI && HasWinCFI)
1809     BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
1810         .setMIFlag(MachineInstr::FrameSetup);
1811 
1812   if (FnHasClrFunclet && !IsFunclet) {
1813     // Save the so-called Initial-SP (i.e. the value of the stack pointer
1814     // immediately after the prolog)  into the PSPSlot so that funclets
1815     // and the GC can recover it.
1816     unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1817     auto PSPInfo = MachinePointerInfo::getFixedStack(
1818         MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx);
1819     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false,
1820                  PSPSlotOffset)
1821         .addReg(StackPtr)
1822         .addMemOperand(MF.getMachineMemOperand(
1823             PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
1824             SlotSize, Align(SlotSize)));
1825   }
1826 
1827   // Realign stack after we spilled callee-saved registers (so that we'll be
1828   // able to calculate their offsets from the frame pointer).
1829   // Win64 requires aligning the stack after the prologue.
1830   if (IsWin64Prologue && TRI->hasStackRealignment(MF)) {
1831     assert(HasFP && "There should be a frame pointer if stack is realigned.");
1832     BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
1833   }
1834 
1835   // We already dealt with stack realignment and funclets above.
1836   if (IsFunclet && STI.is32Bit())
1837     return;
1838 
1839   // If we need a base pointer, set it up here. It's whatever the value
1840   // of the stack pointer is at this point. Any variable size objects
1841   // will be allocated after this, so we can still use the base pointer
1842   // to reference locals.
1843   if (TRI->hasBasePointer(MF)) {
1844     // Update the base pointer with the current stack pointer.
1845     unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
1846     BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
1847       .addReg(SPOrEstablisher)
1848       .setMIFlag(MachineInstr::FrameSetup);
1849     if (X86FI->getRestoreBasePointer()) {
1850       // Stash value of base pointer.  Saving RSP instead of EBP shortens
1851       // dependence chain. Used by SjLj EH.
1852       unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1853       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
1854                    FramePtr, true, X86FI->getRestoreBasePointerOffset())
1855         .addReg(SPOrEstablisher)
1856         .setMIFlag(MachineInstr::FrameSetup);
1857     }
1858 
1859     if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
1860       // Stash the value of the frame pointer relative to the base pointer for
1861       // Win32 EH. This supports Win32 EH, which does the inverse of the above:
1862       // it recovers the frame pointer from the base pointer rather than the
1863       // other way around.
1864       unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1865       Register UsedReg;
1866       int Offset =
1867           getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg)
1868               .getFixed();
1869       assert(UsedReg == BasePtr);
1870       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
1871           .addReg(FramePtr)
1872           .setMIFlag(MachineInstr::FrameSetup);
1873     }
1874   }
1875 
1876   if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
1877     // Mark end of stack pointer adjustment.
1878     if (!HasFP && NumBytes) {
1879       // Define the current CFA rule to use the provided offset.
1880       assert(StackSize);
1881       BuildCFI(
1882           MBB, MBBI, DL,
1883           MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize - stackGrowth));
1884     }
1885 
1886     // Emit DWARF info specifying the offsets of the callee-saved registers.
1887     emitCalleeSavedFrameMoves(MBB, MBBI, DL, true);
1888   }
1889 
1890   // X86 Interrupt handling function cannot assume anything about the direction
1891   // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
1892   // in each prologue of interrupt handler function.
1893   //
1894   // FIXME: Create "cld" instruction only in these cases:
1895   // 1. The interrupt handling function uses any of the "rep" instructions.
1896   // 2. Interrupt handling function calls another function.
1897   //
1898   if (Fn.getCallingConv() == CallingConv::X86_INTR)
1899     BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
1900         .setMIFlag(MachineInstr::FrameSetup);
1901 
1902   // At this point we know if the function has WinCFI or not.
1903   MF.setHasWinCFI(HasWinCFI);
1904 }
1905 
1906 bool X86FrameLowering::canUseLEAForSPInEpilogue(
1907     const MachineFunction &MF) const {
1908   // We can't use LEA instructions for adjusting the stack pointer if we don't
1909   // have a frame pointer in the Win64 ABI.  Only ADD instructions may be used
1910   // to deallocate the stack.
1911   // This means that we can use LEA for SP in two situations:
1912   // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
1913   // 2. We *have* a frame pointer which means we are permitted to use LEA.
1914   return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
1915 }
1916 
1917 static bool isFuncletReturnInstr(MachineInstr &MI) {
1918   switch (MI.getOpcode()) {
1919   case X86::CATCHRET:
1920   case X86::CLEANUPRET:
1921     return true;
1922   default:
1923     return false;
1924   }
1925   llvm_unreachable("impossible");
1926 }
1927 
1928 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the
1929 // stack. It holds a pointer to the bottom of the root function frame.  The
1930 // establisher frame pointer passed to a nested funclet may point to the
1931 // (mostly empty) frame of its parent funclet, but it will need to find
1932 // the frame of the root function to access locals.  To facilitate this,
1933 // every funclet copies the pointer to the bottom of the root function
1934 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the
1935 // same offset for the PSPSym in the root function frame that's used in the
1936 // funclets' frames allows each funclet to dynamically accept any ancestor
1937 // frame as its establisher argument (the runtime doesn't guarantee the
1938 // immediate parent for some reason lost to history), and also allows the GC,
1939 // which uses the PSPSym for some bookkeeping, to find it in any funclet's
1940 // frame with only a single offset reported for the entire method.
1941 unsigned
1942 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
1943   const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
1944   Register SPReg;
1945   int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
1946                                               /*IgnoreSPUpdates*/ true)
1947                    .getFixed();
1948   assert(Offset >= 0 && SPReg == TRI->getStackRegister());
1949   return static_cast<unsigned>(Offset);
1950 }
1951 
1952 unsigned
1953 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
1954   const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1955   // This is the size of the pushed CSRs.
1956   unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1957   // This is the size of callee saved XMMs.
1958   const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
1959   unsigned XMMSize = WinEHXMMSlotInfo.size() *
1960                      TRI->getSpillSize(X86::VR128RegClass);
1961   // This is the amount of stack a funclet needs to allocate.
1962   unsigned UsedSize;
1963   EHPersonality Personality =
1964       classifyEHPersonality(MF.getFunction().getPersonalityFn());
1965   if (Personality == EHPersonality::CoreCLR) {
1966     // CLR funclets need to hold enough space to include the PSPSym, at the
1967     // same offset from the stack pointer (immediately after the prolog) as it
1968     // resides at in the main function.
1969     UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
1970   } else {
1971     // Other funclets just need enough stack for outgoing call arguments.
1972     UsedSize = MF.getFrameInfo().getMaxCallFrameSize();
1973   }
1974   // RBP is not included in the callee saved register block. After pushing RBP,
1975   // everything is 16 byte aligned. Everything we allocate before an outgoing
1976   // call must also be 16 byte aligned.
1977   unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign());
1978   // Subtract out the size of the callee saved registers. This is how much stack
1979   // each funclet will allocate.
1980   return FrameSizeMinusRBP + XMMSize - CSSize;
1981 }
1982 
1983 static bool isTailCallOpcode(unsigned Opc) {
1984     return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
1985         Opc == X86::TCRETURNmi ||
1986         Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
1987         Opc == X86::TCRETURNmi64;
1988 }
1989 
1990 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
1991                                     MachineBasicBlock &MBB) const {
1992   const MachineFrameInfo &MFI = MF.getFrameInfo();
1993   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1994   MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator();
1995   MachineBasicBlock::iterator MBBI = Terminator;
1996   DebugLoc DL;
1997   if (MBBI != MBB.end())
1998     DL = MBBI->getDebugLoc();
1999   // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
2000   const bool Is64BitILP32 = STI.isTarget64BitILP32();
2001   Register FramePtr = TRI->getFrameRegister(MF);
2002   Register MachineFramePtr =
2003       Is64BitILP32 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr;
2004 
2005   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2006   bool NeedsWin64CFI =
2007       IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
2008   bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
2009 
2010   // Get the number of bytes to allocate from the FrameInfo.
2011   uint64_t StackSize = MFI.getStackSize();
2012   uint64_t MaxAlign = calculateMaxStackAlign(MF);
2013   unsigned CSSize = X86FI->getCalleeSavedFrameSize();
2014   bool HasFP = hasFP(MF);
2015   uint64_t NumBytes = 0;
2016 
2017   bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() &&
2018                         !MF.getTarget().getTargetTriple().isOSWindows()) &&
2019                        MF.needsFrameMoves();
2020 
2021   if (IsFunclet) {
2022     assert(HasFP && "EH funclets without FP not yet implemented");
2023     NumBytes = getWinEHFuncletFrameSize(MF);
2024   } else if (HasFP) {
2025     // Calculate required stack adjustment.
2026     uint64_t FrameSize = StackSize - SlotSize;
2027     NumBytes = FrameSize - CSSize;
2028 
2029     // Callee-saved registers were pushed on stack before the stack was
2030     // realigned.
2031     if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
2032       NumBytes = alignTo(FrameSize, MaxAlign);
2033   } else {
2034     NumBytes = StackSize - CSSize;
2035   }
2036   uint64_t SEHStackAllocAmt = NumBytes;
2037 
2038   // AfterPop is the position to insert .cfi_restore.
2039   MachineBasicBlock::iterator AfterPop = MBBI;
2040   if (HasFP) {
2041     if (X86FI->hasSwiftAsyncContext()) {
2042       // Discard the context.
2043       int Offset = 16 + mergeSPUpdates(MBB, MBBI, true);
2044       emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/true);
2045     }
2046     // Pop EBP.
2047     BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
2048             MachineFramePtr)
2049         .setMIFlag(MachineInstr::FrameDestroy);
2050 
2051     // We need to reset FP to its untagged state on return. Bit 60 is currently
2052     // used to show the presence of an extended frame.
2053     if (X86FI->hasSwiftAsyncContext()) {
2054       BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8),
2055               MachineFramePtr)
2056           .addUse(MachineFramePtr)
2057           .addImm(60)
2058           .setMIFlag(MachineInstr::FrameDestroy);
2059     }
2060 
2061     if (NeedsDwarfCFI) {
2062       unsigned DwarfStackPtr =
2063           TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);
2064       BuildCFI(MBB, MBBI, DL,
2065                MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize));
2066       if (!MBB.succ_empty() && !MBB.isReturnBlock()) {
2067         unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
2068         BuildCFI(MBB, AfterPop, DL,
2069                  MCCFIInstruction::createRestore(nullptr, DwarfFramePtr));
2070         --MBBI;
2071         --AfterPop;
2072       }
2073       --MBBI;
2074     }
2075   }
2076 
2077   MachineBasicBlock::iterator FirstCSPop = MBBI;
2078   // Skip the callee-saved pop instructions.
2079   while (MBBI != MBB.begin()) {
2080     MachineBasicBlock::iterator PI = std::prev(MBBI);
2081     unsigned Opc = PI->getOpcode();
2082 
2083     if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
2084       if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
2085           (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
2086           (Opc != X86::BTR64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)) &&
2087           (Opc != X86::ADD64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)))
2088         break;
2089       FirstCSPop = PI;
2090     }
2091 
2092     --MBBI;
2093   }
2094   MBBI = FirstCSPop;
2095 
2096   if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
2097     emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);
2098 
2099   if (MBBI != MBB.end())
2100     DL = MBBI->getDebugLoc();
2101 
2102   // If there is an ADD32ri or SUB32ri of ESP immediately before this
2103   // instruction, merge the two instructions.
2104   if (NumBytes || MFI.hasVarSizedObjects())
2105     NumBytes += mergeSPUpdates(MBB, MBBI, true);
2106 
2107   // If dynamic alloca is used, then reset esp to point to the last callee-saved
2108   // slot before popping them off! Same applies for the case, when stack was
2109   // realigned. Don't do this if this was a funclet epilogue, since the funclets
2110   // will not do realignment or dynamic stack allocation.
2111   if (((TRI->hasStackRealignment(MF)) || MFI.hasVarSizedObjects()) &&
2112       !IsFunclet) {
2113     if (TRI->hasStackRealignment(MF))
2114       MBBI = FirstCSPop;
2115     unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
2116     uint64_t LEAAmount =
2117         IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
2118 
2119     if (X86FI->hasSwiftAsyncContext())
2120       LEAAmount -= 16;
2121 
2122     // There are only two legal forms of epilogue:
2123     // - add SEHAllocationSize, %rsp
2124     // - lea SEHAllocationSize(%FramePtr), %rsp
2125     //
2126     // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
2127     // However, we may use this sequence if we have a frame pointer because the
2128     // effects of the prologue can safely be undone.
2129     if (LEAAmount != 0) {
2130       unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
2131       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
2132                    FramePtr, false, LEAAmount);
2133       --MBBI;
2134     } else {
2135       unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
2136       BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
2137         .addReg(FramePtr);
2138       --MBBI;
2139     }
2140   } else if (NumBytes) {
2141     // Adjust stack pointer back: ESP += numbytes.
2142     emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true);
2143     if (!hasFP(MF) && NeedsDwarfCFI) {
2144       // Define the current CFA rule to use the provided offset.
2145       BuildCFI(MBB, MBBI, DL,
2146                MCCFIInstruction::cfiDefCfaOffset(nullptr, CSSize + SlotSize));
2147     }
2148     --MBBI;
2149   }
2150 
2151   // Windows unwinder will not invoke function's exception handler if IP is
2152   // either in prologue or in epilogue.  This behavior causes a problem when a
2153   // call immediately precedes an epilogue, because the return address points
2154   // into the epilogue.  To cope with that, we insert an epilogue marker here,
2155   // then replace it with a 'nop' if it ends up immediately after a CALL in the
2156   // final emitted code.
2157   if (NeedsWin64CFI && MF.hasWinCFI())
2158     BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
2159 
2160   if (!hasFP(MF) && NeedsDwarfCFI) {
2161     MBBI = FirstCSPop;
2162     int64_t Offset = -CSSize - SlotSize;
2163     // Mark callee-saved pop instruction.
2164     // Define the current CFA rule to use the provided offset.
2165     while (MBBI != MBB.end()) {
2166       MachineBasicBlock::iterator PI = MBBI;
2167       unsigned Opc = PI->getOpcode();
2168       ++MBBI;
2169       if (Opc == X86::POP32r || Opc == X86::POP64r) {
2170         Offset += SlotSize;
2171         BuildCFI(MBB, MBBI, DL,
2172                  MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset));
2173       }
2174     }
2175   }
2176 
2177   // Emit DWARF info specifying the restores of the callee-saved registers.
2178   // For epilogue with return inside or being other block without successor,
2179   // no need to generate .cfi_restore for callee-saved registers.
2180   if (NeedsDwarfCFI && !MBB.succ_empty() && !MBB.isReturnBlock()) {
2181     emitCalleeSavedFrameMoves(MBB, AfterPop, DL, false);
2182   }
2183 
2184   if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) {
2185     // Add the return addr area delta back since we are not tail calling.
2186     int Offset = -1 * X86FI->getTCReturnAddrDelta();
2187     assert(Offset >= 0 && "TCDelta should never be positive");
2188     if (Offset) {
2189       // Check for possible merge with preceding ADD instruction.
2190       Offset += mergeSPUpdates(MBB, Terminator, true);
2191       emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true);
2192     }
2193   }
2194 
2195   // Emit tilerelease for AMX kernel.
2196   const MachineRegisterInfo &MRI = MF.getRegInfo();
2197   const TargetRegisterClass *RC = TRI->getRegClass(X86::TILERegClassID);
2198   for (unsigned I = 0; I < RC->getNumRegs(); I++)
2199     if (!MRI.reg_nodbg_empty(X86::TMM0 + I)) {
2200       BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE));
2201       break;
2202     }
2203 }
2204 
2205 StackOffset X86FrameLowering::getFrameIndexReference(const MachineFunction &MF,
2206                                                      int FI,
2207                                                      Register &FrameReg) const {
2208   const MachineFrameInfo &MFI = MF.getFrameInfo();
2209 
2210   bool IsFixed = MFI.isFixedObjectIndex(FI);
2211   // We can't calculate offset from frame pointer if the stack is realigned,
2212   // so enforce usage of stack/base pointer.  The base pointer is used when we
2213   // have dynamic allocas in addition to dynamic realignment.
2214   if (TRI->hasBasePointer(MF))
2215     FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister();
2216   else if (TRI->hasStackRealignment(MF))
2217     FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister();
2218   else
2219     FrameReg = TRI->getFrameRegister(MF);
2220 
2221   // Offset will hold the offset from the stack pointer at function entry to the
2222   // object.
2223   // We need to factor in additional offsets applied during the prologue to the
2224   // frame, base, and stack pointer depending on which is used.
2225   int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea();
2226   const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2227   unsigned CSSize = X86FI->getCalleeSavedFrameSize();
2228   uint64_t StackSize = MFI.getStackSize();
2229   bool HasFP = hasFP(MF);
2230   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2231   int64_t FPDelta = 0;
2232 
2233   // In an x86 interrupt, remove the offset we added to account for the return
2234   // address from any stack object allocated in the caller's frame. Interrupts
2235   // do not have a standard return address. Fixed objects in the current frame,
2236   // such as SSE register spills, should not get this treatment.
2237   if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR &&
2238       Offset >= 0) {
2239     Offset += getOffsetOfLocalArea();
2240   }
2241 
2242   if (IsWin64Prologue) {
2243     assert(!MFI.hasCalls() || (StackSize % 16) == 8);
2244 
2245     // Calculate required stack adjustment.
2246     uint64_t FrameSize = StackSize - SlotSize;
2247     // If required, include space for extra hidden slot for stashing base pointer.
2248     if (X86FI->getRestoreBasePointer())
2249       FrameSize += SlotSize;
2250     uint64_t NumBytes = FrameSize - CSSize;
2251 
2252     uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
2253     if (FI && FI == X86FI->getFAIndex())
2254       return StackOffset::getFixed(-SEHFrameOffset);
2255 
2256     // FPDelta is the offset from the "traditional" FP location of the old base
2257     // pointer followed by return address and the location required by the
2258     // restricted Win64 prologue.
2259     // Add FPDelta to all offsets below that go through the frame pointer.
2260     FPDelta = FrameSize - SEHFrameOffset;
2261     assert((!MFI.hasCalls() || (FPDelta % 16) == 0) &&
2262            "FPDelta isn't aligned per the Win64 ABI!");
2263   }
2264 
2265 
2266   if (TRI->hasBasePointer(MF)) {
2267     assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
2268     if (FI < 0) {
2269       // Skip the saved EBP.
2270       return StackOffset::getFixed(Offset + SlotSize + FPDelta);
2271     } else {
2272       assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize)));
2273       return StackOffset::getFixed(Offset + StackSize);
2274     }
2275   } else if (TRI->hasStackRealignment(MF)) {
2276     if (FI < 0) {
2277       // Skip the saved EBP.
2278       return StackOffset::getFixed(Offset + SlotSize + FPDelta);
2279     } else {
2280       assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize)));
2281       return StackOffset::getFixed(Offset + StackSize);
2282     }
2283     // FIXME: Support tail calls
2284   } else {
2285     if (!HasFP)
2286       return StackOffset::getFixed(Offset + StackSize);
2287 
2288     // Skip the saved EBP.
2289     Offset += SlotSize;
2290 
2291     // Skip the RETADDR move area
2292     int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
2293     if (TailCallReturnAddrDelta < 0)
2294       Offset -= TailCallReturnAddrDelta;
2295   }
2296 
2297   return StackOffset::getFixed(Offset + FPDelta);
2298 }
2299 
2300 int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI,
2301                                               Register &FrameReg) const {
2302   const MachineFrameInfo &MFI = MF.getFrameInfo();
2303   const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2304   const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
2305   const auto it = WinEHXMMSlotInfo.find(FI);
2306 
2307   if (it == WinEHXMMSlotInfo.end())
2308     return getFrameIndexReference(MF, FI, FrameReg).getFixed();
2309 
2310   FrameReg = TRI->getStackRegister();
2311   return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) +
2312          it->second;
2313 }
2314 
2315 StackOffset
2316 X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF, int FI,
2317                                            Register &FrameReg,
2318                                            int Adjustment) const {
2319   const MachineFrameInfo &MFI = MF.getFrameInfo();
2320   FrameReg = TRI->getStackRegister();
2321   return StackOffset::getFixed(MFI.getObjectOffset(FI) -
2322                                getOffsetOfLocalArea() + Adjustment);
2323 }
2324 
2325 StackOffset
2326 X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
2327                                                  int FI, Register &FrameReg,
2328                                                  bool IgnoreSPUpdates) const {
2329 
2330   const MachineFrameInfo &MFI = MF.getFrameInfo();
2331   // Does not include any dynamic realign.
2332   const uint64_t StackSize = MFI.getStackSize();
2333   // LLVM arranges the stack as follows:
2334   //   ...
2335   //   ARG2
2336   //   ARG1
2337   //   RETADDR
2338   //   PUSH RBP   <-- RBP points here
2339   //   PUSH CSRs
2340   //   ~~~~~~~    <-- possible stack realignment (non-win64)
2341   //   ...
2342   //   STACK OBJECTS
2343   //   ...        <-- RSP after prologue points here
2344   //   ~~~~~~~    <-- possible stack realignment (win64)
2345   //
2346   // if (hasVarSizedObjects()):
2347   //   ...        <-- "base pointer" (ESI/RBX) points here
2348   //   DYNAMIC ALLOCAS
2349   //   ...        <-- RSP points here
2350   //
2351   // Case 1: In the simple case of no stack realignment and no dynamic
2352   // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
2353   // with fixed offsets from RSP.
2354   //
2355   // Case 2: In the case of stack realignment with no dynamic allocas, fixed
2356   // stack objects are addressed with RBP and regular stack objects with RSP.
2357   //
2358   // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
2359   // to address stack arguments for outgoing calls and nothing else. The "base
2360   // pointer" points to local variables, and RBP points to fixed objects.
2361   //
2362   // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
2363   // answer we give is relative to the SP after the prologue, and not the
2364   // SP in the middle of the function.
2365 
2366   if (MFI.isFixedObjectIndex(FI) && TRI->hasStackRealignment(MF) &&
2367       !STI.isTargetWin64())
2368     return getFrameIndexReference(MF, FI, FrameReg);
2369 
2370   // If !hasReservedCallFrame the function might have SP adjustement in the
2371   // body.  So, even though the offset is statically known, it depends on where
2372   // we are in the function.
2373   if (!IgnoreSPUpdates && !hasReservedCallFrame(MF))
2374     return getFrameIndexReference(MF, FI, FrameReg);
2375 
2376   // We don't handle tail calls, and shouldn't be seeing them either.
2377   assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
2378          "we don't handle this case!");
2379 
2380   // This is how the math works out:
2381   //
2382   //  %rsp grows (i.e. gets lower) left to right. Each box below is
2383   //  one word (eight bytes).  Obj0 is the stack slot we're trying to
2384   //  get to.
2385   //
2386   //    ----------------------------------
2387   //    | BP | Obj0 | Obj1 | ... | ObjN |
2388   //    ----------------------------------
2389   //    ^    ^      ^                   ^
2390   //    A    B      C                   E
2391   //
2392   // A is the incoming stack pointer.
2393   // (B - A) is the local area offset (-8 for x86-64) [1]
2394   // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2]
2395   //
2396   // |(E - B)| is the StackSize (absolute value, positive).  For a
2397   // stack that grown down, this works out to be (B - E). [3]
2398   //
2399   // E is also the value of %rsp after stack has been set up, and we
2400   // want (C - E) -- the value we can add to %rsp to get to Obj0.  Now
2401   // (C - E) == (C - A) - (B - A) + (B - E)
2402   //            { Using [1], [2] and [3] above }
2403   //         == getObjectOffset - LocalAreaOffset + StackSize
2404 
2405   return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize);
2406 }
2407 
2408 bool X86FrameLowering::assignCalleeSavedSpillSlots(
2409     MachineFunction &MF, const TargetRegisterInfo *TRI,
2410     std::vector<CalleeSavedInfo> &CSI) const {
2411   MachineFrameInfo &MFI = MF.getFrameInfo();
2412   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2413 
2414   unsigned CalleeSavedFrameSize = 0;
2415   unsigned XMMCalleeSavedFrameSize = 0;
2416   auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
2417   int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
2418 
2419   int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
2420 
2421   if (TailCallReturnAddrDelta < 0) {
2422     // create RETURNADDR area
2423     //   arg
2424     //   arg
2425     //   RETADDR
2426     //   { ...
2427     //     RETADDR area
2428     //     ...
2429     //   }
2430     //   [EBP]
2431     MFI.CreateFixedObject(-TailCallReturnAddrDelta,
2432                            TailCallReturnAddrDelta - SlotSize, true);
2433   }
2434 
2435   // Spill the BasePtr if it's used.
2436   if (this->TRI->hasBasePointer(MF)) {
2437     // Allocate a spill slot for EBP if we have a base pointer and EH funclets.
2438     if (MF.hasEHFunclets()) {
2439       int FI = MFI.CreateSpillStackObject(SlotSize, Align(SlotSize));
2440       X86FI->setHasSEHFramePtrSave(true);
2441       X86FI->setSEHFramePtrSaveIndex(FI);
2442     }
2443   }
2444 
2445   if (hasFP(MF)) {
2446     // emitPrologue always spills frame register the first thing.
2447     SpillSlotOffset -= SlotSize;
2448     MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2449 
2450     // The async context lives directly before the frame pointer, and we
2451     // allocate a second slot to preserve stack alignment.
2452     if (X86FI->hasSwiftAsyncContext()) {
2453       SpillSlotOffset -= SlotSize;
2454       MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2455       SpillSlotOffset -= SlotSize;
2456     }
2457 
2458     // Since emitPrologue and emitEpilogue will handle spilling and restoring of
2459     // the frame register, we can delete it from CSI list and not have to worry
2460     // about avoiding it later.
2461     Register FPReg = TRI->getFrameRegister(MF);
2462     for (unsigned i = 0; i < CSI.size(); ++i) {
2463       if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
2464         CSI.erase(CSI.begin() + i);
2465         break;
2466       }
2467     }
2468   }
2469 
2470   // Assign slots for GPRs. It increases frame size.
2471   for (unsigned i = CSI.size(); i != 0; --i) {
2472     unsigned Reg = CSI[i - 1].getReg();
2473 
2474     if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2475       continue;
2476 
2477     SpillSlotOffset -= SlotSize;
2478     CalleeSavedFrameSize += SlotSize;
2479 
2480     int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2481     CSI[i - 1].setFrameIdx(SlotIndex);
2482   }
2483 
2484   X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
2485   MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize);
2486 
2487   // Assign slots for XMMs.
2488   for (unsigned i = CSI.size(); i != 0; --i) {
2489     unsigned Reg = CSI[i - 1].getReg();
2490     if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2491       continue;
2492 
2493     // If this is k-register make sure we lookup via the largest legal type.
2494     MVT VT = MVT::Other;
2495     if (X86::VK16RegClass.contains(Reg))
2496       VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2497 
2498     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2499     unsigned Size = TRI->getSpillSize(*RC);
2500     Align Alignment = TRI->getSpillAlign(*RC);
2501     // ensure alignment
2502     assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86");
2503     SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment);
2504 
2505     // spill into slot
2506     SpillSlotOffset -= Size;
2507     int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset);
2508     CSI[i - 1].setFrameIdx(SlotIndex);
2509     MFI.ensureMaxAlignment(Alignment);
2510 
2511     // Save the start offset and size of XMM in stack frame for funclets.
2512     if (X86::VR128RegClass.contains(Reg)) {
2513       WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize;
2514       XMMCalleeSavedFrameSize += Size;
2515     }
2516   }
2517 
2518   return true;
2519 }
2520 
2521 bool X86FrameLowering::spillCalleeSavedRegisters(
2522     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2523     ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2524   DebugLoc DL = MBB.findDebugLoc(MI);
2525 
2526   // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
2527   // for us, and there are no XMM CSRs on Win32.
2528   if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
2529     return true;
2530 
2531   // Push GPRs. It increases frame size.
2532   const MachineFunction &MF = *MBB.getParent();
2533   unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
2534   for (unsigned i = CSI.size(); i != 0; --i) {
2535     unsigned Reg = CSI[i - 1].getReg();
2536 
2537     if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2538       continue;
2539 
2540     const MachineRegisterInfo &MRI = MF.getRegInfo();
2541     bool isLiveIn = MRI.isLiveIn(Reg);
2542     if (!isLiveIn)
2543       MBB.addLiveIn(Reg);
2544 
2545     // Decide whether we can add a kill flag to the use.
2546     bool CanKill = !isLiveIn;
2547     // Check if any subregister is live-in
2548     if (CanKill) {
2549       for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) {
2550         if (MRI.isLiveIn(*AReg)) {
2551           CanKill = false;
2552           break;
2553         }
2554       }
2555     }
2556 
2557     // Do not set a kill flag on values that are also marked as live-in. This
2558     // happens with the @llvm-returnaddress intrinsic and with arguments
2559     // passed in callee saved registers.
2560     // Omitting the kill flags is conservatively correct even if the live-in
2561     // is not used after all.
2562     BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
2563       .setMIFlag(MachineInstr::FrameSetup);
2564   }
2565 
2566   // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
2567   // It can be done by spilling XMMs to stack frame.
2568   for (unsigned i = CSI.size(); i != 0; --i) {
2569     unsigned Reg = CSI[i-1].getReg();
2570     if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2571       continue;
2572 
2573     // If this is k-register make sure we lookup via the largest legal type.
2574     MVT VT = MVT::Other;
2575     if (X86::VK16RegClass.contains(Reg))
2576       VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2577 
2578     // Add the callee-saved register as live-in. It's killed at the spill.
2579     MBB.addLiveIn(Reg);
2580     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2581 
2582     TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
2583                             TRI);
2584     --MI;
2585     MI->setFlag(MachineInstr::FrameSetup);
2586     ++MI;
2587   }
2588 
2589   return true;
2590 }
2591 
2592 void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB,
2593                                                MachineBasicBlock::iterator MBBI,
2594                                                MachineInstr *CatchRet) const {
2595   // SEH shouldn't use catchret.
2596   assert(!isAsynchronousEHPersonality(classifyEHPersonality(
2597              MBB.getParent()->getFunction().getPersonalityFn())) &&
2598          "SEH should not use CATCHRET");
2599   const DebugLoc &DL = CatchRet->getDebugLoc();
2600   MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
2601 
2602   // Fill EAX/RAX with the address of the target block.
2603   if (STI.is64Bit()) {
2604     // LEA64r CatchRetTarget(%rip), %rax
2605     BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX)
2606         .addReg(X86::RIP)
2607         .addImm(0)
2608         .addReg(0)
2609         .addMBB(CatchRetTarget)
2610         .addReg(0);
2611   } else {
2612     // MOV32ri $CatchRetTarget, %eax
2613     BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
2614         .addMBB(CatchRetTarget);
2615   }
2616 
2617   // Record that we've taken the address of CatchRetTarget and no longer just
2618   // reference it in a terminator.
2619   CatchRetTarget->setHasAddressTaken();
2620 }
2621 
2622 bool X86FrameLowering::restoreCalleeSavedRegisters(
2623     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2624     MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2625   if (CSI.empty())
2626     return false;
2627 
2628   if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
2629     // Don't restore CSRs in 32-bit EH funclets. Matches
2630     // spillCalleeSavedRegisters.
2631     if (STI.is32Bit())
2632       return true;
2633     // Don't restore CSRs before an SEH catchret. SEH except blocks do not form
2634     // funclets. emitEpilogue transforms these to normal jumps.
2635     if (MI->getOpcode() == X86::CATCHRET) {
2636       const Function &F = MBB.getParent()->getFunction();
2637       bool IsSEH = isAsynchronousEHPersonality(
2638           classifyEHPersonality(F.getPersonalityFn()));
2639       if (IsSEH)
2640         return true;
2641     }
2642   }
2643 
2644   DebugLoc DL = MBB.findDebugLoc(MI);
2645 
2646   // Reload XMMs from stack frame.
2647   for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2648     unsigned Reg = CSI[i].getReg();
2649     if (X86::GR64RegClass.contains(Reg) ||
2650         X86::GR32RegClass.contains(Reg))
2651       continue;
2652 
2653     // If this is k-register make sure we lookup via the largest legal type.
2654     MVT VT = MVT::Other;
2655     if (X86::VK16RegClass.contains(Reg))
2656       VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2657 
2658     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2659     TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
2660   }
2661 
2662   // POP GPRs.
2663   unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
2664   for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2665     unsigned Reg = CSI[i].getReg();
2666     if (!X86::GR64RegClass.contains(Reg) &&
2667         !X86::GR32RegClass.contains(Reg))
2668       continue;
2669 
2670     BuildMI(MBB, MI, DL, TII.get(Opc), Reg)
2671         .setMIFlag(MachineInstr::FrameDestroy);
2672   }
2673   return true;
2674 }
2675 
2676 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
2677                                             BitVector &SavedRegs,
2678                                             RegScavenger *RS) const {
2679   TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
2680 
2681   // Spill the BasePtr if it's used.
2682   if (TRI->hasBasePointer(MF)){
2683     Register BasePtr = TRI->getBaseRegister();
2684     if (STI.isTarget64BitILP32())
2685       BasePtr = getX86SubSuperRegister(BasePtr, 64);
2686     SavedRegs.set(BasePtr);
2687   }
2688 }
2689 
2690 static bool
2691 HasNestArgument(const MachineFunction *MF) {
2692   const Function &F = MF->getFunction();
2693   for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
2694        I != E; I++) {
2695     if (I->hasNestAttr() && !I->use_empty())
2696       return true;
2697   }
2698   return false;
2699 }
2700 
2701 /// GetScratchRegister - Get a temp register for performing work in the
2702 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
2703 /// and the properties of the function either one or two registers will be
2704 /// needed. Set primary to true for the first register, false for the second.
2705 static unsigned
2706 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
2707   CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
2708 
2709   // Erlang stuff.
2710   if (CallingConvention == CallingConv::HiPE) {
2711     if (Is64Bit)
2712       return Primary ? X86::R14 : X86::R13;
2713     else
2714       return Primary ? X86::EBX : X86::EDI;
2715   }
2716 
2717   if (Is64Bit) {
2718     if (IsLP64)
2719       return Primary ? X86::R11 : X86::R12;
2720     else
2721       return Primary ? X86::R11D : X86::R12D;
2722   }
2723 
2724   bool IsNested = HasNestArgument(&MF);
2725 
2726   if (CallingConvention == CallingConv::X86_FastCall ||
2727       CallingConvention == CallingConv::Fast ||
2728       CallingConvention == CallingConv::Tail) {
2729     if (IsNested)
2730       report_fatal_error("Segmented stacks does not support fastcall with "
2731                          "nested function.");
2732     return Primary ? X86::EAX : X86::ECX;
2733   }
2734   if (IsNested)
2735     return Primary ? X86::EDX : X86::EAX;
2736   return Primary ? X86::ECX : X86::EAX;
2737 }
2738 
2739 // The stack limit in the TCB is set to this many bytes above the actual stack
2740 // limit.
2741 static const uint64_t kSplitStackAvailable = 256;
2742 
2743 void X86FrameLowering::adjustForSegmentedStacks(
2744     MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2745   MachineFrameInfo &MFI = MF.getFrameInfo();
2746   uint64_t StackSize;
2747   unsigned TlsReg, TlsOffset;
2748   DebugLoc DL;
2749 
2750   // To support shrink-wrapping we would need to insert the new blocks
2751   // at the right place and update the branches to PrologueMBB.
2752   assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
2753 
2754   unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2755   assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2756          "Scratch register is live-in");
2757 
2758   if (MF.getFunction().isVarArg())
2759     report_fatal_error("Segmented stacks do not support vararg functions.");
2760   if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
2761       !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
2762       !STI.isTargetDragonFly())
2763     report_fatal_error("Segmented stacks not supported on this platform.");
2764 
2765   // Eventually StackSize will be calculated by a link-time pass; which will
2766   // also decide whether checking code needs to be injected into this particular
2767   // prologue.
2768   StackSize = MFI.getStackSize();
2769 
2770   // Do not generate a prologue for leaf functions with a stack of size zero.
2771   // For non-leaf functions we have to allow for the possibility that the
2772   // callis to a non-split function, as in PR37807. This function could also
2773   // take the address of a non-split function. When the linker tries to adjust
2774   // its non-existent prologue, it would fail with an error. Mark the object
2775   // file so that such failures are not errors. See this Go language bug-report
2776   // https://go-review.googlesource.com/c/go/+/148819/
2777   if (StackSize == 0 && !MFI.hasTailCall()) {
2778     MF.getMMI().setHasNosplitStack(true);
2779     return;
2780   }
2781 
2782   MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
2783   MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
2784   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2785   bool IsNested = false;
2786 
2787   // We need to know if the function has a nest argument only in 64 bit mode.
2788   if (Is64Bit)
2789     IsNested = HasNestArgument(&MF);
2790 
2791   // The MOV R10, RAX needs to be in a different block, since the RET we emit in
2792   // allocMBB needs to be last (terminating) instruction.
2793 
2794   for (const auto &LI : PrologueMBB.liveins()) {
2795     allocMBB->addLiveIn(LI);
2796     checkMBB->addLiveIn(LI);
2797   }
2798 
2799   if (IsNested)
2800     allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
2801 
2802   MF.push_front(allocMBB);
2803   MF.push_front(checkMBB);
2804 
2805   // When the frame size is less than 256 we just compare the stack
2806   // boundary directly to the value of the stack pointer, per gcc.
2807   bool CompareStackPointer = StackSize < kSplitStackAvailable;
2808 
2809   // Read the limit off the current stacklet off the stack_guard location.
2810   if (Is64Bit) {
2811     if (STI.isTargetLinux()) {
2812       TlsReg = X86::FS;
2813       TlsOffset = IsLP64 ? 0x70 : 0x40;
2814     } else if (STI.isTargetDarwin()) {
2815       TlsReg = X86::GS;
2816       TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
2817     } else if (STI.isTargetWin64()) {
2818       TlsReg = X86::GS;
2819       TlsOffset = 0x28; // pvArbitrary, reserved for application use
2820     } else if (STI.isTargetFreeBSD()) {
2821       TlsReg = X86::FS;
2822       TlsOffset = 0x18;
2823     } else if (STI.isTargetDragonFly()) {
2824       TlsReg = X86::FS;
2825       TlsOffset = 0x20; // use tls_tcb.tcb_segstack
2826     } else {
2827       report_fatal_error("Segmented stacks not supported on this platform.");
2828     }
2829 
2830     if (CompareStackPointer)
2831       ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
2832     else
2833       BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
2834         .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2835 
2836     BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
2837       .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2838   } else {
2839     if (STI.isTargetLinux()) {
2840       TlsReg = X86::GS;
2841       TlsOffset = 0x30;
2842     } else if (STI.isTargetDarwin()) {
2843       TlsReg = X86::GS;
2844       TlsOffset = 0x48 + 90*4;
2845     } else if (STI.isTargetWin32()) {
2846       TlsReg = X86::FS;
2847       TlsOffset = 0x14; // pvArbitrary, reserved for application use
2848     } else if (STI.isTargetDragonFly()) {
2849       TlsReg = X86::FS;
2850       TlsOffset = 0x10; // use tls_tcb.tcb_segstack
2851     } else if (STI.isTargetFreeBSD()) {
2852       report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
2853     } else {
2854       report_fatal_error("Segmented stacks not supported on this platform.");
2855     }
2856 
2857     if (CompareStackPointer)
2858       ScratchReg = X86::ESP;
2859     else
2860       BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
2861         .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2862 
2863     if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
2864         STI.isTargetDragonFly()) {
2865       BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
2866         .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2867     } else if (STI.isTargetDarwin()) {
2868 
2869       // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
2870       unsigned ScratchReg2;
2871       bool SaveScratch2;
2872       if (CompareStackPointer) {
2873         // The primary scratch register is available for holding the TLS offset.
2874         ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2875         SaveScratch2 = false;
2876       } else {
2877         // Need to use a second register to hold the TLS offset
2878         ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
2879 
2880         // Unfortunately, with fastcc the second scratch register may hold an
2881         // argument.
2882         SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
2883       }
2884 
2885       // If Scratch2 is live-in then it needs to be saved.
2886       assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
2887              "Scratch register is live-in and not saved");
2888 
2889       if (SaveScratch2)
2890         BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
2891           .addReg(ScratchReg2, RegState::Kill);
2892 
2893       BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
2894         .addImm(TlsOffset);
2895       BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
2896         .addReg(ScratchReg)
2897         .addReg(ScratchReg2).addImm(1).addReg(0)
2898         .addImm(0)
2899         .addReg(TlsReg);
2900 
2901       if (SaveScratch2)
2902         BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
2903     }
2904   }
2905 
2906   // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
2907   // It jumps to normal execution of the function body.
2908   BuildMI(checkMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_A);
2909 
2910   // On 32 bit we first push the arguments size and then the frame size. On 64
2911   // bit, we pass the stack frame size in r10 and the argument size in r11.
2912   if (Is64Bit) {
2913     // Functions with nested arguments use R10, so it needs to be saved across
2914     // the call to _morestack
2915 
2916     const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
2917     const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
2918     const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
2919     const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
2920     const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
2921 
2922     if (IsNested)
2923       BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
2924 
2925     BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
2926       .addImm(StackSize);
2927     BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
2928       .addImm(X86FI->getArgumentStackSize());
2929   } else {
2930     BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2931       .addImm(X86FI->getArgumentStackSize());
2932     BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2933       .addImm(StackSize);
2934   }
2935 
2936   // __morestack is in libgcc
2937   if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
2938     // Under the large code model, we cannot assume that __morestack lives
2939     // within 2^31 bytes of the call site, so we cannot use pc-relative
2940     // addressing. We cannot perform the call via a temporary register,
2941     // as the rax register may be used to store the static chain, and all
2942     // other suitable registers may be either callee-save or used for
2943     // parameter passing. We cannot use the stack at this point either
2944     // because __morestack manipulates the stack directly.
2945     //
2946     // To avoid these issues, perform an indirect call via a read-only memory
2947     // location containing the address.
2948     //
2949     // This solution is not perfect, as it assumes that the .rodata section
2950     // is laid out within 2^31 bytes of each function body, but this seems
2951     // to be sufficient for JIT.
2952     // FIXME: Add retpoline support and remove the error here..
2953     if (STI.useIndirectThunkCalls())
2954       report_fatal_error("Emitting morestack calls on 64-bit with the large "
2955                          "code model and thunks not yet implemented.");
2956     BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
2957         .addReg(X86::RIP)
2958         .addImm(0)
2959         .addReg(0)
2960         .addExternalSymbol("__morestack_addr")
2961         .addReg(0);
2962     MF.getMMI().setUsesMorestackAddr(true);
2963   } else {
2964     if (Is64Bit)
2965       BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
2966         .addExternalSymbol("__morestack");
2967     else
2968       BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
2969         .addExternalSymbol("__morestack");
2970   }
2971 
2972   if (IsNested)
2973     BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
2974   else
2975     BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
2976 
2977   allocMBB->addSuccessor(&PrologueMBB);
2978 
2979   checkMBB->addSuccessor(allocMBB, BranchProbability::getZero());
2980   checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne());
2981 
2982 #ifdef EXPENSIVE_CHECKS
2983   MF.verify();
2984 #endif
2985 }
2986 
2987 /// Lookup an ERTS parameter in the !hipe.literals named metadata node.
2988 /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
2989 /// to fields it needs, through a named metadata node "hipe.literals" containing
2990 /// name-value pairs.
2991 static unsigned getHiPELiteral(
2992     NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) {
2993   for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
2994     MDNode *Node = HiPELiteralsMD->getOperand(i);
2995     if (Node->getNumOperands() != 2) continue;
2996     MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
2997     ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
2998     if (!NodeName || !NodeVal) continue;
2999     ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
3000     if (ValConst && NodeName->getString() == LiteralName) {
3001       return ValConst->getZExtValue();
3002     }
3003   }
3004 
3005   report_fatal_error("HiPE literal " + LiteralName
3006                      + " required but not provided");
3007 }
3008 
3009 // Return true if there are no non-ehpad successors to MBB and there are no
3010 // non-meta instructions between MBBI and MBB.end().
3011 static bool blockEndIsUnreachable(const MachineBasicBlock &MBB,
3012                                   MachineBasicBlock::const_iterator MBBI) {
3013   return llvm::all_of(
3014              MBB.successors(),
3015              [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) &&
3016          std::all_of(MBBI, MBB.end(), [](const MachineInstr &MI) {
3017            return MI.isMetaInstruction();
3018          });
3019 }
3020 
3021 /// Erlang programs may need a special prologue to handle the stack size they
3022 /// might need at runtime. That is because Erlang/OTP does not implement a C
3023 /// stack but uses a custom implementation of hybrid stack/heap architecture.
3024 /// (for more information see Eric Stenman's Ph.D. thesis:
3025 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
3026 ///
3027 /// CheckStack:
3028 ///       temp0 = sp - MaxStack
3029 ///       if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
3030 /// OldStart:
3031 ///       ...
3032 /// IncStack:
3033 ///       call inc_stack   # doubles the stack space
3034 ///       temp0 = sp - MaxStack
3035 ///       if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
3036 void X86FrameLowering::adjustForHiPEPrologue(
3037     MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
3038   MachineFrameInfo &MFI = MF.getFrameInfo();
3039   DebugLoc DL;
3040 
3041   // To support shrink-wrapping we would need to insert the new blocks
3042   // at the right place and update the branches to PrologueMBB.
3043   assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
3044 
3045   // HiPE-specific values
3046   NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule()
3047     ->getNamedMetadata("hipe.literals");
3048   if (!HiPELiteralsMD)
3049     report_fatal_error(
3050         "Can't generate HiPE prologue without runtime parameters");
3051   const unsigned HipeLeafWords
3052     = getHiPELiteral(HiPELiteralsMD,
3053                      Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
3054   const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
3055   const unsigned Guaranteed = HipeLeafWords * SlotSize;
3056   unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ?
3057                             MF.getFunction().arg_size() - CCRegisteredArgs : 0;
3058   unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize;
3059 
3060   assert(STI.isTargetLinux() &&
3061          "HiPE prologue is only supported on Linux operating systems.");
3062 
3063   // Compute the largest caller's frame that is needed to fit the callees'
3064   // frames. This 'MaxStack' is computed from:
3065   //
3066   // a) the fixed frame size, which is the space needed for all spilled temps,
3067   // b) outgoing on-stack parameter areas, and
3068   // c) the minimum stack space this function needs to make available for the
3069   //    functions it calls (a tunable ABI property).
3070   if (MFI.hasCalls()) {
3071     unsigned MoreStackForCalls = 0;
3072 
3073     for (auto &MBB : MF) {
3074       for (auto &MI : MBB) {
3075         if (!MI.isCall())
3076           continue;
3077 
3078         // Get callee operand.
3079         const MachineOperand &MO = MI.getOperand(0);
3080 
3081         // Only take account of global function calls (no closures etc.).
3082         if (!MO.isGlobal())
3083           continue;
3084 
3085         const Function *F = dyn_cast<Function>(MO.getGlobal());
3086         if (!F)
3087           continue;
3088 
3089         // Do not update 'MaxStack' for primitive and built-in functions
3090         // (encoded with names either starting with "erlang."/"bif_" or not
3091         // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
3092         // "_", such as the BIF "suspend_0") as they are executed on another
3093         // stack.
3094         if (F->getName().find("erlang.") != StringRef::npos ||
3095             F->getName().find("bif_") != StringRef::npos ||
3096             F->getName().find_first_of("._") == StringRef::npos)
3097           continue;
3098 
3099         unsigned CalleeStkArity =
3100           F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
3101         if (HipeLeafWords - 1 > CalleeStkArity)
3102           MoreStackForCalls = std::max(MoreStackForCalls,
3103                                (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
3104       }
3105     }
3106     MaxStack += MoreStackForCalls;
3107   }
3108 
3109   // If the stack frame needed is larger than the guaranteed then runtime checks
3110   // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
3111   if (MaxStack > Guaranteed) {
3112     MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
3113     MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
3114 
3115     for (const auto &LI : PrologueMBB.liveins()) {
3116       stackCheckMBB->addLiveIn(LI);
3117       incStackMBB->addLiveIn(LI);
3118     }
3119 
3120     MF.push_front(incStackMBB);
3121     MF.push_front(stackCheckMBB);
3122 
3123     unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
3124     unsigned LEAop, CMPop, CALLop;
3125     SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
3126     if (Is64Bit) {
3127       SPReg = X86::RSP;
3128       PReg  = X86::RBP;
3129       LEAop = X86::LEA64r;
3130       CMPop = X86::CMP64rm;
3131       CALLop = X86::CALL64pcrel32;
3132     } else {
3133       SPReg = X86::ESP;
3134       PReg  = X86::EBP;
3135       LEAop = X86::LEA32r;
3136       CMPop = X86::CMP32rm;
3137       CALLop = X86::CALLpcrel32;
3138     }
3139 
3140     ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
3141     assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
3142            "HiPE prologue scratch register is live-in");
3143 
3144     // Create new MBB for StackCheck:
3145     addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
3146                  SPReg, false, -MaxStack);
3147     // SPLimitOffset is in a fixed heap location (pointed by BP).
3148     addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
3149                  .addReg(ScratchReg), PReg, false, SPLimitOffset);
3150     BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_AE);
3151 
3152     // Create new MBB for IncStack:
3153     BuildMI(incStackMBB, DL, TII.get(CALLop)).
3154       addExternalSymbol("inc_stack_0");
3155     addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
3156                  SPReg, false, -MaxStack);
3157     addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
3158                  .addReg(ScratchReg), PReg, false, SPLimitOffset);
3159     BuildMI(incStackMBB, DL, TII.get(X86::JCC_1)).addMBB(incStackMBB).addImm(X86::COND_LE);
3160 
3161     stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
3162     stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
3163     incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
3164     incStackMBB->addSuccessor(incStackMBB, {1, 100});
3165   }
3166 #ifdef EXPENSIVE_CHECKS
3167   MF.verify();
3168 #endif
3169 }
3170 
3171 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
3172                                            MachineBasicBlock::iterator MBBI,
3173                                            const DebugLoc &DL,
3174                                            int Offset) const {
3175   if (Offset <= 0)
3176     return false;
3177 
3178   if (Offset % SlotSize)
3179     return false;
3180 
3181   int NumPops = Offset / SlotSize;
3182   // This is only worth it if we have at most 2 pops.
3183   if (NumPops != 1 && NumPops != 2)
3184     return false;
3185 
3186   // Handle only the trivial case where the adjustment directly follows
3187   // a call. This is the most common one, anyway.
3188   if (MBBI == MBB.begin())
3189     return false;
3190   MachineBasicBlock::iterator Prev = std::prev(MBBI);
3191   if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
3192     return false;
3193 
3194   unsigned Regs[2];
3195   unsigned FoundRegs = 0;
3196 
3197   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3198   const MachineOperand &RegMask = Prev->getOperand(1);
3199 
3200   auto &RegClass =
3201       Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
3202   // Try to find up to NumPops free registers.
3203   for (auto Candidate : RegClass) {
3204     // Poor man's liveness:
3205     // Since we're immediately after a call, any register that is clobbered
3206     // by the call and not defined by it can be considered dead.
3207     if (!RegMask.clobbersPhysReg(Candidate))
3208       continue;
3209 
3210     // Don't clobber reserved registers
3211     if (MRI.isReserved(Candidate))
3212       continue;
3213 
3214     bool IsDef = false;
3215     for (const MachineOperand &MO : Prev->implicit_operands()) {
3216       if (MO.isReg() && MO.isDef() &&
3217           TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
3218         IsDef = true;
3219         break;
3220       }
3221     }
3222 
3223     if (IsDef)
3224       continue;
3225 
3226     Regs[FoundRegs++] = Candidate;
3227     if (FoundRegs == (unsigned)NumPops)
3228       break;
3229   }
3230 
3231   if (FoundRegs == 0)
3232     return false;
3233 
3234   // If we found only one free register, but need two, reuse the same one twice.
3235   while (FoundRegs < (unsigned)NumPops)
3236     Regs[FoundRegs++] = Regs[0];
3237 
3238   for (int i = 0; i < NumPops; ++i)
3239     BuildMI(MBB, MBBI, DL,
3240             TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
3241 
3242   return true;
3243 }
3244 
3245 MachineBasicBlock::iterator X86FrameLowering::
3246 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
3247                               MachineBasicBlock::iterator I) const {
3248   bool reserveCallFrame = hasReservedCallFrame(MF);
3249   unsigned Opcode = I->getOpcode();
3250   bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
3251   DebugLoc DL = I->getDebugLoc(); // copy DebugLoc as I will be erased.
3252   uint64_t Amount = TII.getFrameSize(*I);
3253   uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0;
3254   I = MBB.erase(I);
3255   auto InsertPos = skipDebugInstructionsForward(I, MBB.end());
3256 
3257   // Try to avoid emitting dead SP adjustments if the block end is unreachable,
3258   // typically because the function is marked noreturn (abort, throw,
3259   // assert_fail, etc).
3260   if (isDestroy && blockEndIsUnreachable(MBB, I))
3261     return I;
3262 
3263   if (!reserveCallFrame) {
3264     // If the stack pointer can be changed after prologue, turn the
3265     // adjcallstackup instruction into a 'sub ESP, <amt>' and the
3266     // adjcallstackdown instruction into 'add ESP, <amt>'
3267 
3268     // We need to keep the stack aligned properly.  To do this, we round the
3269     // amount of space needed for the outgoing arguments up to the next
3270     // alignment boundary.
3271     Amount = alignTo(Amount, getStackAlign());
3272 
3273     const Function &F = MF.getFunction();
3274     bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
3275     bool DwarfCFI = !WindowsCFI && MF.needsFrameMoves();
3276 
3277     // If we have any exception handlers in this function, and we adjust
3278     // the SP before calls, we may need to indicate this to the unwinder
3279     // using GNU_ARGS_SIZE. Note that this may be necessary even when
3280     // Amount == 0, because the preceding function may have set a non-0
3281     // GNU_ARGS_SIZE.
3282     // TODO: We don't need to reset this between subsequent functions,
3283     // if it didn't change.
3284     bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();
3285 
3286     if (HasDwarfEHHandlers && !isDestroy &&
3287         MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
3288       BuildCFI(MBB, InsertPos, DL,
3289                MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
3290 
3291     if (Amount == 0)
3292       return I;
3293 
3294     // Factor out the amount that gets handled inside the sequence
3295     // (Pushes of argument for frame setup, callee pops for frame destroy)
3296     Amount -= InternalAmt;
3297 
3298     // TODO: This is needed only if we require precise CFA.
3299     // If this is a callee-pop calling convention, emit a CFA adjust for
3300     // the amount the callee popped.
3301     if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF))
3302       BuildCFI(MBB, InsertPos, DL,
3303                MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
3304 
3305     // Add Amount to SP to destroy a frame, or subtract to setup.
3306     int64_t StackAdjustment = isDestroy ? Amount : -Amount;
3307 
3308     if (StackAdjustment) {
3309       // Merge with any previous or following adjustment instruction. Note: the
3310       // instructions merged with here do not have CFI, so their stack
3311       // adjustments do not feed into CfaAdjustment.
3312       StackAdjustment += mergeSPUpdates(MBB, InsertPos, true);
3313       StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
3314 
3315       if (StackAdjustment) {
3316         if (!(F.hasMinSize() &&
3317               adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
3318           BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
3319                                /*InEpilogue=*/false);
3320       }
3321     }
3322 
3323     if (DwarfCFI && !hasFP(MF)) {
3324       // If we don't have FP, but need to generate unwind information,
3325       // we need to set the correct CFA offset after the stack adjustment.
3326       // How much we adjust the CFA offset depends on whether we're emitting
3327       // CFI only for EH purposes or for debugging. EH only requires the CFA
3328       // offset to be correct at each call site, while for debugging we want
3329       // it to be more precise.
3330 
3331       int64_t CfaAdjustment = -StackAdjustment;
3332       // TODO: When not using precise CFA, we also need to adjust for the
3333       // InternalAmt here.
3334       if (CfaAdjustment) {
3335         BuildCFI(MBB, InsertPos, DL,
3336                  MCCFIInstruction::createAdjustCfaOffset(nullptr,
3337                                                          CfaAdjustment));
3338       }
3339     }
3340 
3341     return I;
3342   }
3343 
3344   if (InternalAmt) {
3345     MachineBasicBlock::iterator CI = I;
3346     MachineBasicBlock::iterator B = MBB.begin();
3347     while (CI != B && !std::prev(CI)->isCall())
3348       --CI;
3349     BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
3350   }
3351 
3352   return I;
3353 }
3354 
3355 bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
3356   assert(MBB.getParent() && "Block is not attached to a function!");
3357   const MachineFunction &MF = *MBB.getParent();
3358   if (!MBB.isLiveIn(X86::EFLAGS))
3359     return true;
3360 
3361   const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3362   return !TRI->hasStackRealignment(MF) && !X86FI->hasSwiftAsyncContext();
3363 }
3364 
3365 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
3366   assert(MBB.getParent() && "Block is not attached to a function!");
3367 
3368   // Win64 has strict requirements in terms of epilogue and we are
3369   // not taking a chance at messing with them.
3370   // I.e., unless this block is already an exit block, we can't use
3371   // it as an epilogue.
3372   if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock())
3373     return false;
3374 
3375   // Swift async context epilogue has a BTR instruction that clobbers parts of
3376   // EFLAGS.
3377   const MachineFunction &MF = *MBB.getParent();
3378   if (MF.getInfo<X86MachineFunctionInfo>()->hasSwiftAsyncContext())
3379     return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
3380 
3381   if (canUseLEAForSPInEpilogue(*MBB.getParent()))
3382     return true;
3383 
3384   // If we cannot use LEA to adjust SP, we may need to use ADD, which
3385   // clobbers the EFLAGS. Check that we do not need to preserve it,
3386   // otherwise, conservatively assume this is not
3387   // safe to insert the epilogue here.
3388   return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
3389 }
3390 
3391 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
3392   // If we may need to emit frameless compact unwind information, give
3393   // up as this is currently broken: PR25614.
3394   bool CompactUnwind =
3395       MF.getMMI().getContext().getObjectFileInfo()->getCompactUnwindSection() !=
3396       nullptr;
3397   return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF) ||
3398           !CompactUnwind) &&
3399          // The lowering of segmented stack and HiPE only support entry
3400          // blocks as prologue blocks: PR26107. This limitation may be
3401          // lifted if we fix:
3402          // - adjustForSegmentedStacks
3403          // - adjustForHiPEPrologue
3404          MF.getFunction().getCallingConv() != CallingConv::HiPE &&
3405          !MF.shouldSplitStack();
3406 }
3407 
3408 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
3409     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
3410     const DebugLoc &DL, bool RestoreSP) const {
3411   assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
3412   assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
3413   assert(STI.is32Bit() && !Uses64BitFramePtr &&
3414          "restoring EBP/ESI on non-32-bit target");
3415 
3416   MachineFunction &MF = *MBB.getParent();
3417   Register FramePtr = TRI->getFrameRegister(MF);
3418   Register BasePtr = TRI->getBaseRegister();
3419   WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo();
3420   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3421   MachineFrameInfo &MFI = MF.getFrameInfo();
3422 
3423   // FIXME: Don't set FrameSetup flag in catchret case.
3424 
3425   int FI = FuncInfo.EHRegNodeFrameIndex;
3426   int EHRegSize = MFI.getObjectSize(FI);
3427 
3428   if (RestoreSP) {
3429     // MOV32rm -EHRegSize(%ebp), %esp
3430     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
3431                  X86::EBP, true, -EHRegSize)
3432         .setMIFlag(MachineInstr::FrameSetup);
3433   }
3434 
3435   Register UsedReg;
3436   int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg).getFixed();
3437   int EndOffset = -EHRegOffset - EHRegSize;
3438   FuncInfo.EHRegNodeEndOffset = EndOffset;
3439 
3440   if (UsedReg == FramePtr) {
3441     // ADD $offset, %ebp
3442     unsigned ADDri = getADDriOpcode(false, EndOffset);
3443     BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
3444         .addReg(FramePtr)
3445         .addImm(EndOffset)
3446         .setMIFlag(MachineInstr::FrameSetup)
3447         ->getOperand(3)
3448         .setIsDead();
3449     assert(EndOffset >= 0 &&
3450            "end of registration object above normal EBP position!");
3451   } else if (UsedReg == BasePtr) {
3452     // LEA offset(%ebp), %esi
3453     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
3454                  FramePtr, false, EndOffset)
3455         .setMIFlag(MachineInstr::FrameSetup);
3456     // MOV32rm SavedEBPOffset(%esi), %ebp
3457     assert(X86FI->getHasSEHFramePtrSave());
3458     int Offset =
3459         getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg)
3460             .getFixed();
3461     assert(UsedReg == BasePtr);
3462     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
3463                  UsedReg, true, Offset)
3464         .setMIFlag(MachineInstr::FrameSetup);
3465   } else {
3466     llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
3467   }
3468   return MBBI;
3469 }
3470 
3471 int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const {
3472   return TRI->getSlotSize();
3473 }
3474 
3475 Register
3476 X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) const {
3477   return TRI->getDwarfRegNum(StackPtr, true);
3478 }
3479 
3480 namespace {
3481 // Struct used by orderFrameObjects to help sort the stack objects.
3482 struct X86FrameSortingObject {
3483   bool IsValid = false;         // true if we care about this Object.
3484   unsigned ObjectIndex = 0;     // Index of Object into MFI list.
3485   unsigned ObjectSize = 0;      // Size of Object in bytes.
3486   Align ObjectAlignment = Align(1); // Alignment of Object in bytes.
3487   unsigned ObjectNumUses = 0;   // Object static number of uses.
3488 };
3489 
3490 // The comparison function we use for std::sort to order our local
3491 // stack symbols. The current algorithm is to use an estimated
3492 // "density". This takes into consideration the size and number of
3493 // uses each object has in order to roughly minimize code size.
3494 // So, for example, an object of size 16B that is referenced 5 times
3495 // will get higher priority than 4 4B objects referenced 1 time each.
3496 // It's not perfect and we may be able to squeeze a few more bytes out of
3497 // it (for example : 0(esp) requires fewer bytes, symbols allocated at the
3498 // fringe end can have special consideration, given their size is less
3499 // important, etc.), but the algorithmic complexity grows too much to be
3500 // worth the extra gains we get. This gets us pretty close.
3501 // The final order leaves us with objects with highest priority going
3502 // at the end of our list.
3503 struct X86FrameSortingComparator {
3504   inline bool operator()(const X86FrameSortingObject &A,
3505                          const X86FrameSortingObject &B) const {
3506     uint64_t DensityAScaled, DensityBScaled;
3507 
3508     // For consistency in our comparison, all invalid objects are placed
3509     // at the end. This also allows us to stop walking when we hit the
3510     // first invalid item after it's all sorted.
3511     if (!A.IsValid)
3512       return false;
3513     if (!B.IsValid)
3514       return true;
3515 
3516     // The density is calculated by doing :
3517     //     (double)DensityA = A.ObjectNumUses / A.ObjectSize
3518     //     (double)DensityB = B.ObjectNumUses / B.ObjectSize
3519     // Since this approach may cause inconsistencies in
3520     // the floating point <, >, == comparisons, depending on the floating
3521     // point model with which the compiler was built, we're going
3522     // to scale both sides by multiplying with
3523     // A.ObjectSize * B.ObjectSize. This ends up factoring away
3524     // the division and, with it, the need for any floating point
3525     // arithmetic.
3526     DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
3527       static_cast<uint64_t>(B.ObjectSize);
3528     DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
3529       static_cast<uint64_t>(A.ObjectSize);
3530 
3531     // If the two densities are equal, prioritize highest alignment
3532     // objects. This allows for similar alignment objects
3533     // to be packed together (given the same density).
3534     // There's room for improvement here, also, since we can pack
3535     // similar alignment (different density) objects next to each
3536     // other to save padding. This will also require further
3537     // complexity/iterations, and the overall gain isn't worth it,
3538     // in general. Something to keep in mind, though.
3539     if (DensityAScaled == DensityBScaled)
3540       return A.ObjectAlignment < B.ObjectAlignment;
3541 
3542     return DensityAScaled < DensityBScaled;
3543   }
3544 };
3545 } // namespace
3546 
3547 // Order the symbols in the local stack.
3548 // We want to place the local stack objects in some sort of sensible order.
3549 // The heuristic we use is to try and pack them according to static number
3550 // of uses and size of object in order to minimize code size.
3551 void X86FrameLowering::orderFrameObjects(
3552     const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
3553   const MachineFrameInfo &MFI = MF.getFrameInfo();
3554 
3555   // Don't waste time if there's nothing to do.
3556   if (ObjectsToAllocate.empty())
3557     return;
3558 
3559   // Create an array of all MFI objects. We won't need all of these
3560   // objects, but we're going to create a full array of them to make
3561   // it easier to index into when we're counting "uses" down below.
3562   // We want to be able to easily/cheaply access an object by simply
3563   // indexing into it, instead of having to search for it every time.
3564   std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd());
3565 
3566   // Walk the objects we care about and mark them as such in our working
3567   // struct.
3568   for (auto &Obj : ObjectsToAllocate) {
3569     SortingObjects[Obj].IsValid = true;
3570     SortingObjects[Obj].ObjectIndex = Obj;
3571     SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj);
3572     // Set the size.
3573     int ObjectSize = MFI.getObjectSize(Obj);
3574     if (ObjectSize == 0)
3575       // Variable size. Just use 4.
3576       SortingObjects[Obj].ObjectSize = 4;
3577     else
3578       SortingObjects[Obj].ObjectSize = ObjectSize;
3579   }
3580 
3581   // Count the number of uses for each object.
3582   for (auto &MBB : MF) {
3583     for (auto &MI : MBB) {
3584       if (MI.isDebugInstr())
3585         continue;
3586       for (const MachineOperand &MO : MI.operands()) {
3587         // Check to see if it's a local stack symbol.
3588         if (!MO.isFI())
3589           continue;
3590         int Index = MO.getIndex();
3591         // Check to see if it falls within our range, and is tagged
3592         // to require ordering.
3593         if (Index >= 0 && Index < MFI.getObjectIndexEnd() &&
3594             SortingObjects[Index].IsValid)
3595           SortingObjects[Index].ObjectNumUses++;
3596       }
3597     }
3598   }
3599 
3600   // Sort the objects using X86FrameSortingAlgorithm (see its comment for
3601   // info).
3602   llvm::stable_sort(SortingObjects, X86FrameSortingComparator());
3603 
3604   // Now modify the original list to represent the final order that
3605   // we want. The order will depend on whether we're going to access them
3606   // from the stack pointer or the frame pointer. For SP, the list should
3607   // end up with the END containing objects that we want with smaller offsets.
3608   // For FP, it should be flipped.
3609   int i = 0;
3610   for (auto &Obj : SortingObjects) {
3611     // All invalid items are sorted at the end, so it's safe to stop.
3612     if (!Obj.IsValid)
3613       break;
3614     ObjectsToAllocate[i++] = Obj.ObjectIndex;
3615   }
3616 
3617   // Flip it if we're accessing off of the FP.
3618   if (!TRI->hasStackRealignment(MF) && hasFP(MF))
3619     std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
3620 }
3621 
3622 
3623 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
3624   // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
3625   unsigned Offset = 16;
3626   // RBP is immediately pushed.
3627   Offset += SlotSize;
3628   // All callee-saved registers are then pushed.
3629   Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
3630   // Every funclet allocates enough stack space for the largest outgoing call.
3631   Offset += getWinEHFuncletFrameSize(MF);
3632   return Offset;
3633 }
3634 
3635 void X86FrameLowering::processFunctionBeforeFrameFinalized(
3636     MachineFunction &MF, RegScavenger *RS) const {
3637   // Mark the function as not having WinCFI. We will set it back to true in
3638   // emitPrologue if it gets called and emits CFI.
3639   MF.setHasWinCFI(false);
3640 
3641   // If we are using Windows x64 CFI, ensure that the stack is always 8 byte
3642   // aligned. The format doesn't support misaligned stack adjustments.
3643   if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI())
3644     MF.getFrameInfo().ensureMaxAlignment(Align(SlotSize));
3645 
3646   // If this function isn't doing Win64-style C++ EH, we don't need to do
3647   // anything.
3648   if (STI.is64Bit() && MF.hasEHFunclets() &&
3649       classifyEHPersonality(MF.getFunction().getPersonalityFn()) ==
3650           EHPersonality::MSVC_CXX) {
3651     adjustFrameForMsvcCxxEh(MF);
3652   }
3653 }
3654 
3655 void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const {
3656   // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
3657   // relative to RSP after the prologue.  Find the offset of the last fixed
3658   // object, so that we can allocate a slot immediately following it. If there
3659   // were no fixed objects, use offset -SlotSize, which is immediately after the
3660   // return address. Fixed objects have negative frame indices.
3661   MachineFrameInfo &MFI = MF.getFrameInfo();
3662   WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
3663   int64_t MinFixedObjOffset = -SlotSize;
3664   for (int I = MFI.getObjectIndexBegin(); I < 0; ++I)
3665     MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));
3666 
3667   for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
3668     for (WinEHHandlerType &H : TBME.HandlerArray) {
3669       int FrameIndex = H.CatchObj.FrameIndex;
3670       if (FrameIndex != INT_MAX) {
3671         // Ensure alignment.
3672         unsigned Align = MFI.getObjectAlign(FrameIndex).value();
3673         MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
3674         MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);
3675         MFI.setObjectOffset(FrameIndex, MinFixedObjOffset);
3676       }
3677     }
3678   }
3679 
3680   // Ensure alignment.
3681   MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
3682   int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
3683   int UnwindHelpFI =
3684       MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false);
3685   EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
3686 
3687   // Store -2 into UnwindHelp on function entry. We have to scan forwards past
3688   // other frame setup instructions.
3689   MachineBasicBlock &MBB = MF.front();
3690   auto MBBI = MBB.begin();
3691   while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
3692     ++MBBI;
3693 
3694   DebugLoc DL = MBB.findDebugLoc(MBBI);
3695   addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)),
3696                     UnwindHelpFI)
3697       .addImm(-2);
3698 }
3699 
3700 void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced(
3701     MachineFunction &MF, RegScavenger *RS) const {
3702   if (STI.is32Bit() && MF.hasEHFunclets())
3703     restoreWinEHStackPointersInParent(MF);
3704 }
3705 
3706 void X86FrameLowering::restoreWinEHStackPointersInParent(
3707     MachineFunction &MF) const {
3708   // 32-bit functions have to restore stack pointers when control is transferred
3709   // back to the parent function. These blocks are identified as eh pads that
3710   // are not funclet entries.
3711   bool IsSEH = isAsynchronousEHPersonality(
3712       classifyEHPersonality(MF.getFunction().getPersonalityFn()));
3713   for (MachineBasicBlock &MBB : MF) {
3714     bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry();
3715     if (NeedsRestore)
3716       restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(),
3717                                   /*RestoreSP=*/IsSEH);
3718   }
3719 }
3720