1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the X86 implementation of TargetFrameLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/Analysis/EHPersonalities.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/WinEHFuncInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCSymbol.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include <cstdlib>
35 
36 using namespace llvm;
37 
38 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
39                                    unsigned StackAlignOverride)
40     : TargetFrameLowering(StackGrowsDown, StackAlignOverride,
41                           STI.is64Bit() ? -8 : -4),
42       STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
43   // Cache a bunch of frame-related predicates for this subtarget.
44   SlotSize = TRI->getSlotSize();
45   Is64Bit = STI.is64Bit();
46   IsLP64 = STI.isTarget64BitLP64();
47   // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
48   Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
49   StackPtr = TRI->getStackRegister();
50 }
51 
52 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
53   return !MF.getFrameInfo().hasVarSizedObjects() &&
54          !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
55 }
56 
57 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
58 /// call frame pseudos can be simplified.  Having a FP, as in the default
59 /// implementation, is not sufficient here since we can't always use it.
60 /// Use a more nuanced condition.
61 bool
62 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
63   return hasReservedCallFrame(MF) ||
64          (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
65          TRI->hasBasePointer(MF);
66 }
67 
68 // needsFrameIndexResolution - Do we need to perform FI resolution for
69 // this function. Normally, this is required only when the function
70 // has any stack objects. However, FI resolution actually has another job,
71 // not apparent from the title - it resolves callframesetup/destroy
72 // that were not simplified earlier.
73 // So, this is required for x86 functions that have push sequences even
74 // when there are no stack objects.
75 bool
76 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
77   return MF.getFrameInfo().hasStackObjects() ||
78          MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
79 }
80 
81 /// hasFP - Return true if the specified function should have a dedicated frame
82 /// pointer register.  This is true if the function has variable sized allocas
83 /// or if frame pointer elimination is disabled.
84 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
85   const MachineFrameInfo &MFI = MF.getFrameInfo();
86   return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
87           TRI->needsStackRealignment(MF) ||
88           MFI.hasVarSizedObjects() ||
89           MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() ||
90           MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
91           MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() ||
92           MFI.hasStackMap() || MFI.hasPatchPoint() ||
93           MFI.hasCopyImplyingStackAdjustment());
94 }
95 
96 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
97   if (IsLP64) {
98     if (isInt<8>(Imm))
99       return X86::SUB64ri8;
100     return X86::SUB64ri32;
101   } else {
102     if (isInt<8>(Imm))
103       return X86::SUB32ri8;
104     return X86::SUB32ri;
105   }
106 }
107 
108 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
109   if (IsLP64) {
110     if (isInt<8>(Imm))
111       return X86::ADD64ri8;
112     return X86::ADD64ri32;
113   } else {
114     if (isInt<8>(Imm))
115       return X86::ADD32ri8;
116     return X86::ADD32ri;
117   }
118 }
119 
120 static unsigned getSUBrrOpcode(unsigned isLP64) {
121   return isLP64 ? X86::SUB64rr : X86::SUB32rr;
122 }
123 
124 static unsigned getADDrrOpcode(unsigned isLP64) {
125   return isLP64 ? X86::ADD64rr : X86::ADD32rr;
126 }
127 
128 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
129   if (IsLP64) {
130     if (isInt<8>(Imm))
131       return X86::AND64ri8;
132     return X86::AND64ri32;
133   }
134   if (isInt<8>(Imm))
135     return X86::AND32ri8;
136   return X86::AND32ri;
137 }
138 
139 static unsigned getLEArOpcode(unsigned IsLP64) {
140   return IsLP64 ? X86::LEA64r : X86::LEA32r;
141 }
142 
143 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
144 /// when it reaches the "return" instruction. We can then pop a stack object
145 /// to this register without worry about clobbering it.
146 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
147                                        MachineBasicBlock::iterator &MBBI,
148                                        const X86RegisterInfo *TRI,
149                                        bool Is64Bit) {
150   const MachineFunction *MF = MBB.getParent();
151   if (MF->callsEHReturn())
152     return 0;
153 
154   const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF);
155 
156   if (MBBI == MBB.end())
157     return 0;
158 
159   switch (MBBI->getOpcode()) {
160   default: return 0;
161   case TargetOpcode::PATCHABLE_RET:
162   case X86::RET:
163   case X86::RETL:
164   case X86::RETQ:
165   case X86::RETIL:
166   case X86::RETIQ:
167   case X86::TCRETURNdi:
168   case X86::TCRETURNri:
169   case X86::TCRETURNmi:
170   case X86::TCRETURNdi64:
171   case X86::TCRETURNri64:
172   case X86::TCRETURNmi64:
173   case X86::EH_RETURN:
174   case X86::EH_RETURN64: {
175     SmallSet<uint16_t, 8> Uses;
176     for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
177       MachineOperand &MO = MBBI->getOperand(i);
178       if (!MO.isReg() || MO.isDef())
179         continue;
180       unsigned Reg = MO.getReg();
181       if (!Reg)
182         continue;
183       for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
184         Uses.insert(*AI);
185     }
186 
187     for (auto CS : AvailableRegs)
188       if (!Uses.count(CS) && CS != X86::RIP)
189         return CS;
190   }
191   }
192 
193   return 0;
194 }
195 
196 static bool isEAXLiveIn(MachineBasicBlock &MBB) {
197   for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) {
198     unsigned Reg = RegMask.PhysReg;
199 
200     if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
201         Reg == X86::AH || Reg == X86::AL)
202       return true;
203   }
204 
205   return false;
206 }
207 
208 /// Check if the flags need to be preserved before the terminators.
209 /// This would be the case, if the eflags is live-in of the region
210 /// composed by the terminators or live-out of that region, without
211 /// being defined by a terminator.
212 static bool
213 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) {
214   for (const MachineInstr &MI : MBB.terminators()) {
215     bool BreakNext = false;
216     for (const MachineOperand &MO : MI.operands()) {
217       if (!MO.isReg())
218         continue;
219       unsigned Reg = MO.getReg();
220       if (Reg != X86::EFLAGS)
221         continue;
222 
223       // This terminator needs an eflags that is not defined
224       // by a previous another terminator:
225       // EFLAGS is live-in of the region composed by the terminators.
226       if (!MO.isDef())
227         return true;
228       // This terminator defines the eflags, i.e., we don't need to preserve it.
229       // However, we still need to check this specific terminator does not
230       // read a live-in value.
231       BreakNext = true;
232     }
233     // We found a definition of the eflags, no need to preserve them.
234     if (BreakNext)
235       return false;
236   }
237 
238   // None of the terminators use or define the eflags.
239   // Check if they are live-out, that would imply we need to preserve them.
240   for (const MachineBasicBlock *Succ : MBB.successors())
241     if (Succ->isLiveIn(X86::EFLAGS))
242       return true;
243 
244   return false;
245 }
246 
247 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
248 /// stack pointer by a constant value.
249 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
250                                     MachineBasicBlock::iterator &MBBI,
251                                     const DebugLoc &DL,
252                                     int64_t NumBytes, bool InEpilogue) const {
253   bool isSub = NumBytes < 0;
254   uint64_t Offset = isSub ? -NumBytes : NumBytes;
255   MachineInstr::MIFlag Flag =
256       isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy;
257 
258   uint64_t Chunk = (1LL << 31) - 1;
259 
260   if (Offset > Chunk) {
261     // Rather than emit a long series of instructions for large offsets,
262     // load the offset into a register and do one sub/add
263     unsigned Reg = 0;
264     unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
265 
266     if (isSub && !isEAXLiveIn(MBB))
267       Reg = Rax;
268     else
269       Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
270 
271     unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
272     unsigned AddSubRROpc =
273         isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit);
274     if (Reg) {
275       BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg)
276           .addImm(Offset)
277           .setMIFlag(Flag);
278       MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr)
279                              .addReg(StackPtr)
280                              .addReg(Reg);
281       MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
282       return;
283     } else if (Offset > 8 * Chunk) {
284       // If we would need more than 8 add or sub instructions (a >16GB stack
285       // frame), it's worth spilling RAX to materialize this immediate.
286       //   pushq %rax
287       //   movabsq +-$Offset+-SlotSize, %rax
288       //   addq %rsp, %rax
289       //   xchg %rax, (%rsp)
290       //   movq (%rsp), %rsp
291       assert(Is64Bit && "can't have 32-bit 16GB stack frame");
292       BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
293           .addReg(Rax, RegState::Kill)
294           .setMIFlag(Flag);
295       // Subtract is not commutative, so negate the offset and always use add.
296       // Subtract 8 less and add 8 more to account for the PUSH we just did.
297       if (isSub)
298         Offset = -(Offset - SlotSize);
299       else
300         Offset = Offset + SlotSize;
301       BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax)
302           .addImm(Offset)
303           .setMIFlag(Flag);
304       MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax)
305                              .addReg(Rax)
306                              .addReg(StackPtr);
307       MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
308       // Exchange the new SP in RAX with the top of the stack.
309       addRegOffset(
310           BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax),
311           StackPtr, false, 0);
312       // Load new SP from the top of the stack into RSP.
313       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr),
314                    StackPtr, false, 0);
315       return;
316     }
317   }
318 
319   while (Offset) {
320     uint64_t ThisVal = std::min(Offset, Chunk);
321     if (ThisVal == SlotSize) {
322       // Use push / pop for slot sized adjustments as a size optimization. We
323       // need to find a dead register when using pop.
324       unsigned Reg = isSub
325         ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
326         : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
327       if (Reg) {
328         unsigned Opc = isSub
329           ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
330           : (Is64Bit ? X86::POP64r  : X86::POP32r);
331         BuildMI(MBB, MBBI, DL, TII.get(Opc))
332             .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub))
333             .setMIFlag(Flag);
334         Offset -= ThisVal;
335         continue;
336       }
337     }
338 
339     BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)
340         .setMIFlag(Flag);
341 
342     Offset -= ThisVal;
343   }
344 }
345 
346 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
347     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
348     const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
349   assert(Offset != 0 && "zero offset stack adjustment requested");
350 
351   // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
352   // is tricky.
353   bool UseLEA;
354   if (!InEpilogue) {
355     // Check if inserting the prologue at the beginning
356     // of MBB would require to use LEA operations.
357     // We need to use LEA operations if EFLAGS is live in, because
358     // it means an instruction will read it before it gets defined.
359     UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);
360   } else {
361     // If we can use LEA for SP but we shouldn't, check that none
362     // of the terminators uses the eflags. Otherwise we will insert
363     // a ADD that will redefine the eflags and break the condition.
364     // Alternatively, we could move the ADD, but this may not be possible
365     // and is an optimization anyway.
366     UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
367     if (UseLEA && !STI.useLeaForSP())
368       UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB);
369     // If that assert breaks, that means we do not do the right thing
370     // in canUseAsEpilogue.
371     assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) &&
372            "We shouldn't have allowed this insertion point");
373   }
374 
375   MachineInstrBuilder MI;
376   if (UseLEA) {
377     MI = addRegOffset(BuildMI(MBB, MBBI, DL,
378                               TII.get(getLEArOpcode(Uses64BitFramePtr)),
379                               StackPtr),
380                       StackPtr, false, Offset);
381   } else {
382     bool IsSub = Offset < 0;
383     uint64_t AbsOffset = IsSub ? -Offset : Offset;
384     unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
385                          : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
386     MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
387              .addReg(StackPtr)
388              .addImm(AbsOffset);
389     MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
390   }
391   return MI;
392 }
393 
394 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
395                                      MachineBasicBlock::iterator &MBBI,
396                                      bool doMergeWithPrevious) const {
397   if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
398       (!doMergeWithPrevious && MBBI == MBB.end()))
399     return 0;
400 
401   MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
402   MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
403                                                        : std::next(MBBI);
404   PI = skipDebugInstructionsBackward(PI, MBB.begin());
405   if (NI != nullptr)
406     NI = skipDebugInstructionsForward(NI, MBB.end());
407 
408   unsigned Opc = PI->getOpcode();
409   int Offset = 0;
410 
411   if (!doMergeWithPrevious && NI != MBB.end() &&
412       NI->getOpcode() == TargetOpcode::CFI_INSTRUCTION) {
413     // Don't merge with the next instruction if it has CFI.
414     return Offset;
415   }
416 
417   if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
418        Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
419       PI->getOperand(0).getReg() == StackPtr){
420     assert(PI->getOperand(1).getReg() == StackPtr);
421     Offset += PI->getOperand(2).getImm();
422     MBB.erase(PI);
423     if (!doMergeWithPrevious) MBBI = NI;
424   } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
425              PI->getOperand(0).getReg() == StackPtr &&
426              PI->getOperand(1).getReg() == StackPtr &&
427              PI->getOperand(2).getImm() == 1 &&
428              PI->getOperand(3).getReg() == X86::NoRegister &&
429              PI->getOperand(5).getReg() == X86::NoRegister) {
430     // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
431     Offset += PI->getOperand(4).getImm();
432     MBB.erase(PI);
433     if (!doMergeWithPrevious) MBBI = NI;
434   } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
435               Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
436              PI->getOperand(0).getReg() == StackPtr) {
437     assert(PI->getOperand(1).getReg() == StackPtr);
438     Offset -= PI->getOperand(2).getImm();
439     MBB.erase(PI);
440     if (!doMergeWithPrevious) MBBI = NI;
441   }
442 
443   return Offset;
444 }
445 
446 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
447                                 MachineBasicBlock::iterator MBBI,
448                                 const DebugLoc &DL,
449                                 const MCCFIInstruction &CFIInst) const {
450   MachineFunction &MF = *MBB.getParent();
451   unsigned CFIIndex = MF.addFrameInst(CFIInst);
452   BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
453       .addCFIIndex(CFIIndex);
454 }
455 
456 void X86FrameLowering::emitCalleeSavedFrameMoves(
457     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
458     const DebugLoc &DL) const {
459   MachineFunction &MF = *MBB.getParent();
460   MachineFrameInfo &MFI = MF.getFrameInfo();
461   MachineModuleInfo &MMI = MF.getMMI();
462   const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
463 
464   // Add callee saved registers to move list.
465   const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
466   if (CSI.empty()) return;
467 
468   // Calculate offsets.
469   for (std::vector<CalleeSavedInfo>::const_iterator
470          I = CSI.begin(), E = CSI.end(); I != E; ++I) {
471     int64_t Offset = MFI.getObjectOffset(I->getFrameIdx());
472     unsigned Reg = I->getReg();
473 
474     unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
475     BuildCFI(MBB, MBBI, DL,
476              MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
477   }
478 }
479 
480 void X86FrameLowering::emitStackProbe(MachineFunction &MF,
481                                       MachineBasicBlock &MBB,
482                                       MachineBasicBlock::iterator MBBI,
483                                       const DebugLoc &DL, bool InProlog) const {
484   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
485   if (STI.isTargetWindowsCoreCLR()) {
486     if (InProlog) {
487       emitStackProbeInlineStub(MF, MBB, MBBI, DL, true);
488     } else {
489       emitStackProbeInline(MF, MBB, MBBI, DL, false);
490     }
491   } else {
492     emitStackProbeCall(MF, MBB, MBBI, DL, InProlog);
493   }
494 }
495 
496 void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
497                                         MachineBasicBlock &PrologMBB) const {
498   const StringRef ChkStkStubSymbol = "__chkstk_stub";
499   MachineInstr *ChkStkStub = nullptr;
500 
501   for (MachineInstr &MI : PrologMBB) {
502     if (MI.isCall() && MI.getOperand(0).isSymbol() &&
503         ChkStkStubSymbol == MI.getOperand(0).getSymbolName()) {
504       ChkStkStub = &MI;
505       break;
506     }
507   }
508 
509   if (ChkStkStub != nullptr) {
510     assert(!ChkStkStub->isBundled() &&
511            "Not expecting bundled instructions here");
512     MachineBasicBlock::iterator MBBI = std::next(ChkStkStub->getIterator());
513     assert(std::prev(MBBI) == ChkStkStub &&
514            "MBBI expected after __chkstk_stub.");
515     DebugLoc DL = PrologMBB.findDebugLoc(MBBI);
516     emitStackProbeInline(MF, PrologMBB, MBBI, DL, true);
517     ChkStkStub->eraseFromParent();
518   }
519 }
520 
521 void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
522                                             MachineBasicBlock &MBB,
523                                             MachineBasicBlock::iterator MBBI,
524                                             const DebugLoc &DL,
525                                             bool InProlog) const {
526   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
527   assert(STI.is64Bit() && "different expansion needed for 32 bit");
528   assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
529   const TargetInstrInfo &TII = *STI.getInstrInfo();
530   const BasicBlock *LLVM_BB = MBB.getBasicBlock();
531 
532   // RAX contains the number of bytes of desired stack adjustment.
533   // The handling here assumes this value has already been updated so as to
534   // maintain stack alignment.
535   //
536   // We need to exit with RSP modified by this amount and execute suitable
537   // page touches to notify the OS that we're growing the stack responsibly.
538   // All stack probing must be done without modifying RSP.
539   //
540   // MBB:
541   //    SizeReg = RAX;
542   //    ZeroReg = 0
543   //    CopyReg = RSP
544   //    Flags, TestReg = CopyReg - SizeReg
545   //    FinalReg = !Flags.Ovf ? TestReg : ZeroReg
546   //    LimitReg = gs magic thread env access
547   //    if FinalReg >= LimitReg goto ContinueMBB
548   // RoundBB:
549   //    RoundReg = page address of FinalReg
550   // LoopMBB:
551   //    LoopReg = PHI(LimitReg,ProbeReg)
552   //    ProbeReg = LoopReg - PageSize
553   //    [ProbeReg] = 0
554   //    if (ProbeReg > RoundReg) goto LoopMBB
555   // ContinueMBB:
556   //    RSP = RSP - RAX
557   //    [rest of original MBB]
558 
559   // Set up the new basic blocks
560   MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB);
561   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
562   MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB);
563 
564   MachineFunction::iterator MBBIter = std::next(MBB.getIterator());
565   MF.insert(MBBIter, RoundMBB);
566   MF.insert(MBBIter, LoopMBB);
567   MF.insert(MBBIter, ContinueMBB);
568 
569   // Split MBB and move the tail portion down to ContinueMBB.
570   MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI);
571   ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end());
572   ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB);
573 
574   // Some useful constants
575   const int64_t ThreadEnvironmentStackLimit = 0x10;
576   const int64_t PageSize = 0x1000;
577   const int64_t PageMask = ~(PageSize - 1);
578 
579   // Registers we need. For the normal case we use virtual
580   // registers. For the prolog expansion we use RAX, RCX and RDX.
581   MachineRegisterInfo &MRI = MF.getRegInfo();
582   const TargetRegisterClass *RegClass = &X86::GR64RegClass;
583   const unsigned SizeReg = InProlog ? (unsigned)X86::RAX
584                                     : MRI.createVirtualRegister(RegClass),
585                  ZeroReg = InProlog ? (unsigned)X86::RCX
586                                     : MRI.createVirtualRegister(RegClass),
587                  CopyReg = InProlog ? (unsigned)X86::RDX
588                                     : MRI.createVirtualRegister(RegClass),
589                  TestReg = InProlog ? (unsigned)X86::RDX
590                                     : MRI.createVirtualRegister(RegClass),
591                  FinalReg = InProlog ? (unsigned)X86::RDX
592                                      : MRI.createVirtualRegister(RegClass),
593                  RoundedReg = InProlog ? (unsigned)X86::RDX
594                                        : MRI.createVirtualRegister(RegClass),
595                  LimitReg = InProlog ? (unsigned)X86::RCX
596                                      : MRI.createVirtualRegister(RegClass),
597                  JoinReg = InProlog ? (unsigned)X86::RCX
598                                     : MRI.createVirtualRegister(RegClass),
599                  ProbeReg = InProlog ? (unsigned)X86::RCX
600                                      : MRI.createVirtualRegister(RegClass);
601 
602   // SP-relative offsets where we can save RCX and RDX.
603   int64_t RCXShadowSlot = 0;
604   int64_t RDXShadowSlot = 0;
605 
606   // If inlining in the prolog, save RCX and RDX.
607   // Future optimization: don't save or restore if not live in.
608   if (InProlog) {
609     // Compute the offsets. We need to account for things already
610     // pushed onto the stack at this point: return address, frame
611     // pointer (if used), and callee saves.
612     X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
613     const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize();
614     const bool HasFP = hasFP(MF);
615     RCXShadowSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
616     RDXShadowSlot = RCXShadowSlot + 8;
617     // Emit the saves.
618     addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
619                  RCXShadowSlot)
620         .addReg(X86::RCX);
621     addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
622                  RDXShadowSlot)
623         .addReg(X86::RDX);
624   } else {
625     // Not in the prolog. Copy RAX to a virtual reg.
626     BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX);
627   }
628 
629   // Add code to MBB to check for overflow and set the new target stack pointer
630   // to zero if so.
631   BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg)
632       .addReg(ZeroReg, RegState::Undef)
633       .addReg(ZeroReg, RegState::Undef);
634   BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP);
635   BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg)
636       .addReg(CopyReg)
637       .addReg(SizeReg);
638   BuildMI(&MBB, DL, TII.get(X86::CMOVB64rr), FinalReg)
639       .addReg(TestReg)
640       .addReg(ZeroReg);
641 
642   // FinalReg now holds final stack pointer value, or zero if
643   // allocation would overflow. Compare against the current stack
644   // limit from the thread environment block. Note this limit is the
645   // lowest touched page on the stack, not the point at which the OS
646   // will cause an overflow exception, so this is just an optimization
647   // to avoid unnecessarily touching pages that are below the current
648   // SP but already committed to the stack by the OS.
649   BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg)
650       .addReg(0)
651       .addImm(1)
652       .addReg(0)
653       .addImm(ThreadEnvironmentStackLimit)
654       .addReg(X86::GS);
655   BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg);
656   // Jump if the desired stack pointer is at or above the stack limit.
657   BuildMI(&MBB, DL, TII.get(X86::JAE_1)).addMBB(ContinueMBB);
658 
659   // Add code to roundMBB to round the final stack pointer to a page boundary.
660   BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
661       .addReg(FinalReg)
662       .addImm(PageMask);
663   BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB);
664 
665   // LimitReg now holds the current stack limit, RoundedReg page-rounded
666   // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page
667   // and probe until we reach RoundedReg.
668   if (!InProlog) {
669     BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
670         .addReg(LimitReg)
671         .addMBB(RoundMBB)
672         .addReg(ProbeReg)
673         .addMBB(LoopMBB);
674   }
675 
676   addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg,
677                false, -PageSize);
678 
679   // Probe by storing a byte onto the stack.
680   BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi))
681       .addReg(ProbeReg)
682       .addImm(1)
683       .addReg(0)
684       .addImm(0)
685       .addReg(0)
686       .addImm(0);
687   BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr))
688       .addReg(RoundedReg)
689       .addReg(ProbeReg);
690   BuildMI(LoopMBB, DL, TII.get(X86::JNE_1)).addMBB(LoopMBB);
691 
692   MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI();
693 
694   // If in prolog, restore RDX and RCX.
695   if (InProlog) {
696     addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::MOV64rm),
697                          X86::RCX),
698                  X86::RSP, false, RCXShadowSlot);
699     addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::MOV64rm),
700                          X86::RDX),
701                  X86::RSP, false, RDXShadowSlot);
702   }
703 
704   // Now that the probing is done, add code to continueMBB to update
705   // the stack pointer for real.
706   BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
707       .addReg(X86::RSP)
708       .addReg(SizeReg);
709 
710   // Add the control flow edges we need.
711   MBB.addSuccessor(ContinueMBB);
712   MBB.addSuccessor(RoundMBB);
713   RoundMBB->addSuccessor(LoopMBB);
714   LoopMBB->addSuccessor(ContinueMBB);
715   LoopMBB->addSuccessor(LoopMBB);
716 
717   // Mark all the instructions added to the prolog as frame setup.
718   if (InProlog) {
719     for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
720       BeforeMBBI->setFlag(MachineInstr::FrameSetup);
721     }
722     for (MachineInstr &MI : *RoundMBB) {
723       MI.setFlag(MachineInstr::FrameSetup);
724     }
725     for (MachineInstr &MI : *LoopMBB) {
726       MI.setFlag(MachineInstr::FrameSetup);
727     }
728     for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin();
729          CMBBI != ContinueMBBI; ++CMBBI) {
730       CMBBI->setFlag(MachineInstr::FrameSetup);
731     }
732   }
733 
734   // Possible TODO: physreg liveness for InProlog case.
735 }
736 
737 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
738                                           MachineBasicBlock &MBB,
739                                           MachineBasicBlock::iterator MBBI,
740                                           const DebugLoc &DL,
741                                           bool InProlog) const {
742   bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
743 
744   // FIXME: Add retpoline support and remove this.
745   if (Is64Bit && IsLargeCodeModel && STI.useRetpoline())
746     report_fatal_error("Emitting stack probe calls on 64-bit with the large "
747                        "code model and retpoline not yet implemented.");
748 
749   unsigned CallOp;
750   if (Is64Bit)
751     CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
752   else
753     CallOp = X86::CALLpcrel32;
754 
755   StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF);
756 
757   MachineInstrBuilder CI;
758   MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI);
759 
760   // All current stack probes take AX and SP as input, clobber flags, and
761   // preserve all registers. x86_64 probes leave RSP unmodified.
762   if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
763     // For the large code model, we have to call through a register. Use R11,
764     // as it is scratch in all supported calling conventions.
765     BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
766         .addExternalSymbol(MF.createExternalSymbolName(Symbol));
767     CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
768   } else {
769     CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp))
770         .addExternalSymbol(MF.createExternalSymbolName(Symbol));
771   }
772 
773   unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
774   unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
775   CI.addReg(AX, RegState::Implicit)
776       .addReg(SP, RegState::Implicit)
777       .addReg(AX, RegState::Define | RegState::Implicit)
778       .addReg(SP, RegState::Define | RegState::Implicit)
779       .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
780 
781   if (STI.isTargetWin64() || !STI.isOSWindows()) {
782     // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves.
783     // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
784     // themselves. They also does not clobber %rax so we can reuse it when
785     // adjusting %rsp.
786     // All other platforms do not specify a particular ABI for the stack probe
787     // function, so we arbitrarily define it to not adjust %esp/%rsp itself.
788     BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Is64Bit)), SP)
789         .addReg(SP)
790         .addReg(AX);
791   }
792 
793   if (InProlog) {
794     // Apply the frame setup flag to all inserted instrs.
795     for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
796       ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
797   }
798 }
799 
800 void X86FrameLowering::emitStackProbeInlineStub(
801     MachineFunction &MF, MachineBasicBlock &MBB,
802     MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
803 
804   assert(InProlog && "ChkStkStub called outside prolog!");
805 
806   BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
807       .addExternalSymbol("__chkstk_stub");
808 }
809 
810 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
811   // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
812   // and might require smaller successive adjustments.
813   const uint64_t Win64MaxSEHOffset = 128;
814   uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
815   // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
816   return SEHFrameOffset & -16;
817 }
818 
819 // If we're forcing a stack realignment we can't rely on just the frame
820 // info, we need to know the ABI stack alignment as well in case we
821 // have a call out.  Otherwise just make sure we have some alignment - we'll
822 // go with the minimum SlotSize.
823 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
824   const MachineFrameInfo &MFI = MF.getFrameInfo();
825   uint64_t MaxAlign = MFI.getMaxAlignment(); // Desired stack alignment.
826   unsigned StackAlign = getStackAlignment();
827   if (MF.getFunction().hasFnAttribute("stackrealign")) {
828     if (MFI.hasCalls())
829       MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
830     else if (MaxAlign < SlotSize)
831       MaxAlign = SlotSize;
832   }
833   return MaxAlign;
834 }
835 
836 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
837                                           MachineBasicBlock::iterator MBBI,
838                                           const DebugLoc &DL, unsigned Reg,
839                                           uint64_t MaxAlign) const {
840   uint64_t Val = -MaxAlign;
841   unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
842   MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
843                          .addReg(Reg)
844                          .addImm(Val)
845                          .setMIFlag(MachineInstr::FrameSetup);
846 
847   // The EFLAGS implicit def is dead.
848   MI->getOperand(3).setIsDead();
849 }
850 
851 /// emitPrologue - Push callee-saved registers onto the stack, which
852 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
853 /// space for local variables. Also emit labels used by the exception handler to
854 /// generate the exception handling frames.
855 
856 /*
857   Here's a gist of what gets emitted:
858 
859   ; Establish frame pointer, if needed
860   [if needs FP]
861       push  %rbp
862       .cfi_def_cfa_offset 16
863       .cfi_offset %rbp, -16
864       .seh_pushreg %rpb
865       mov  %rsp, %rbp
866       .cfi_def_cfa_register %rbp
867 
868   ; Spill general-purpose registers
869   [for all callee-saved GPRs]
870       pushq %<reg>
871       [if not needs FP]
872          .cfi_def_cfa_offset (offset from RETADDR)
873       .seh_pushreg %<reg>
874 
875   ; If the required stack alignment > default stack alignment
876   ; rsp needs to be re-aligned.  This creates a "re-alignment gap"
877   ; of unknown size in the stack frame.
878   [if stack needs re-alignment]
879       and  $MASK, %rsp
880 
881   ; Allocate space for locals
882   [if target is Windows and allocated space > 4096 bytes]
883       ; Windows needs special care for allocations larger
884       ; than one page.
885       mov $NNN, %rax
886       call ___chkstk_ms/___chkstk
887       sub  %rax, %rsp
888   [else]
889       sub  $NNN, %rsp
890 
891   [if needs FP]
892       .seh_stackalloc (size of XMM spill slots)
893       .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
894   [else]
895       .seh_stackalloc NNN
896 
897   ; Spill XMMs
898   ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
899   ; they may get spilled on any platform, if the current function
900   ; calls @llvm.eh.unwind.init
901   [if needs FP]
902       [for all callee-saved XMM registers]
903           movaps  %<xmm reg>, -MMM(%rbp)
904       [for all callee-saved XMM registers]
905           .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
906               ; i.e. the offset relative to (%rbp - SEHFrameOffset)
907   [else]
908       [for all callee-saved XMM registers]
909           movaps  %<xmm reg>, KKK(%rsp)
910       [for all callee-saved XMM registers]
911           .seh_savexmm %<xmm reg>, KKK
912 
913   .seh_endprologue
914 
915   [if needs base pointer]
916       mov  %rsp, %rbx
917       [if needs to restore base pointer]
918           mov %rsp, -MMM(%rbp)
919 
920   ; Emit CFI info
921   [if needs FP]
922       [for all callee-saved registers]
923           .cfi_offset %<reg>, (offset from %rbp)
924   [else]
925        .cfi_def_cfa_offset (offset from RETADDR)
926       [for all callee-saved registers]
927           .cfi_offset %<reg>, (offset from %rsp)
928 
929   Notes:
930   - .seh directives are emitted only for Windows 64 ABI
931   - .cv_fpo directives are emitted on win32 when emitting CodeView
932   - .cfi directives are emitted for all other ABIs
933   - for 32-bit code, substitute %e?? registers for %r??
934 */
935 
936 void X86FrameLowering::emitPrologue(MachineFunction &MF,
937                                     MachineBasicBlock &MBB) const {
938   assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
939          "MF used frame lowering for wrong subtarget");
940   MachineBasicBlock::iterator MBBI = MBB.begin();
941   MachineFrameInfo &MFI = MF.getFrameInfo();
942   const Function &Fn = MF.getFunction();
943   MachineModuleInfo &MMI = MF.getMMI();
944   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
945   uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
946   uint64_t StackSize = MFI.getStackSize();    // Number of bytes to allocate.
947   bool IsFunclet = MBB.isEHFuncletEntry();
948   EHPersonality Personality = EHPersonality::Unknown;
949   if (Fn.hasPersonalityFn())
950     Personality = classifyEHPersonality(Fn.getPersonalityFn());
951   bool FnHasClrFunclet =
952       MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
953   bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
954   bool HasFP = hasFP(MF);
955   bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
956   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
957   bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
958   // FIXME: Emit FPO data for EH funclets.
959   bool NeedsWinFPO =
960       !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
961   bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
962   bool NeedsDwarfCFI =
963       !IsWin64Prologue && (MMI.hasDebugInfo() || Fn.needsUnwindTableEntry());
964   unsigned FramePtr = TRI->getFrameRegister(MF);
965   const unsigned MachineFramePtr =
966       STI.isTarget64BitILP32()
967           ? getX86SubSuperRegister(FramePtr, 64) : FramePtr;
968   unsigned BasePtr = TRI->getBaseRegister();
969   bool HasWinCFI = false;
970 
971   // Debug location must be unknown since the first debug location is used
972   // to determine the end of the prologue.
973   DebugLoc DL;
974 
975   // Add RETADDR move area to callee saved frame size.
976   int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
977   if (TailCallReturnAddrDelta && IsWin64Prologue)
978     report_fatal_error("Can't handle guaranteed tail call under win64 yet");
979 
980   if (TailCallReturnAddrDelta < 0)
981     X86FI->setCalleeSavedFrameSize(
982       X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
983 
984   bool UseStackProbe = !STI.getTargetLowering()->getStackProbeSymbolName(MF).empty();
985 
986   // The default stack probe size is 4096 if the function has no stackprobesize
987   // attribute.
988   unsigned StackProbeSize = 4096;
989   if (Fn.hasFnAttribute("stack-probe-size"))
990     Fn.getFnAttribute("stack-probe-size")
991         .getValueAsString()
992         .getAsInteger(0, StackProbeSize);
993 
994   // Re-align the stack on 64-bit if the x86-interrupt calling convention is
995   // used and an error code was pushed, since the x86-64 ABI requires a 16-byte
996   // stack alignment.
997   if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
998       Fn.arg_size() == 2) {
999     StackSize += 8;
1000     MFI.setStackSize(StackSize);
1001     emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false);
1002   }
1003 
1004   // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
1005   // function, and use up to 128 bytes of stack space, don't have a frame
1006   // pointer, calls, or dynamic alloca then we do not need to adjust the
1007   // stack pointer (we fit in the Red Zone). We also check that we don't
1008   // push and pop from the stack.
1009   if (Is64Bit && !Fn.hasFnAttribute(Attribute::NoRedZone) &&
1010       !TRI->needsStackRealignment(MF) &&
1011       !MFI.hasVarSizedObjects() &&             // No dynamic alloca.
1012       !MFI.adjustsStack() &&                   // No calls.
1013       !UseStackProbe &&                        // No stack probes.
1014       !IsWin64CC &&                            // Win64 has no Red Zone
1015       !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop.
1016       !MF.shouldSplitStack()) {                // Regular stack
1017     uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
1018     if (HasFP) MinSize += SlotSize;
1019     X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
1020     StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
1021     MFI.setStackSize(StackSize);
1022   }
1023 
1024   // Insert stack pointer adjustment for later moving of return addr.  Only
1025   // applies to tail call optimized functions where the callee argument stack
1026   // size is bigger than the callers.
1027   if (TailCallReturnAddrDelta < 0) {
1028     BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
1029                          /*InEpilogue=*/false)
1030         .setMIFlag(MachineInstr::FrameSetup);
1031   }
1032 
1033   // Mapping for machine moves:
1034   //
1035   //   DST: VirtualFP AND
1036   //        SRC: VirtualFP              => DW_CFA_def_cfa_offset
1037   //        ELSE                        => DW_CFA_def_cfa
1038   //
1039   //   SRC: VirtualFP AND
1040   //        DST: Register               => DW_CFA_def_cfa_register
1041   //
1042   //   ELSE
1043   //        OFFSET < 0                  => DW_CFA_offset_extended_sf
1044   //        REG < 64                    => DW_CFA_offset + Reg
1045   //        ELSE                        => DW_CFA_offset_extended
1046 
1047   uint64_t NumBytes = 0;
1048   int stackGrowth = -SlotSize;
1049 
1050   // Find the funclet establisher parameter
1051   unsigned Establisher = X86::NoRegister;
1052   if (IsClrFunclet)
1053     Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
1054   else if (IsFunclet)
1055     Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
1056 
1057   if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1058     // Immediately spill establisher into the home slot.
1059     // The runtime cares about this.
1060     // MOV64mr %rdx, 16(%rsp)
1061     unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1062     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
1063         .addReg(Establisher)
1064         .setMIFlag(MachineInstr::FrameSetup);
1065     MBB.addLiveIn(Establisher);
1066   }
1067 
1068   if (HasFP) {
1069     assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved");
1070 
1071     // Calculate required stack adjustment.
1072     uint64_t FrameSize = StackSize - SlotSize;
1073     // If required, include space for extra hidden slot for stashing base pointer.
1074     if (X86FI->getRestoreBasePointer())
1075       FrameSize += SlotSize;
1076 
1077     NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
1078 
1079     // Callee-saved registers are pushed on stack before the stack is realigned.
1080     if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1081       NumBytes = alignTo(NumBytes, MaxAlign);
1082 
1083     // Get the offset of the stack slot for the EBP register, which is
1084     // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
1085     // Update the frame offset adjustment.
1086     if (!IsFunclet)
1087       MFI.setOffsetAdjustment(-NumBytes);
1088     else
1089       assert(MFI.getOffsetAdjustment() == -(int)NumBytes &&
1090              "should calculate same local variable offset for funclets");
1091 
1092     // Save EBP/RBP into the appropriate stack slot.
1093     BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
1094       .addReg(MachineFramePtr, RegState::Kill)
1095       .setMIFlag(MachineInstr::FrameSetup);
1096 
1097     if (NeedsDwarfCFI) {
1098       // Mark the place where EBP/RBP was saved.
1099       // Define the current CFA rule to use the provided offset.
1100       assert(StackSize);
1101       BuildCFI(MBB, MBBI, DL,
1102                MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
1103 
1104       // Change the rule for the FramePtr to be an "offset" rule.
1105       unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1106       BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
1107                                   nullptr, DwarfFramePtr, 2 * stackGrowth));
1108     }
1109 
1110     if (NeedsWinCFI) {
1111       HasWinCFI = true;
1112       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1113           .addImm(FramePtr)
1114           .setMIFlag(MachineInstr::FrameSetup);
1115     }
1116 
1117     if (!IsWin64Prologue && !IsFunclet) {
1118       // Update EBP with the new base value.
1119       BuildMI(MBB, MBBI, DL,
1120               TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
1121               FramePtr)
1122           .addReg(StackPtr)
1123           .setMIFlag(MachineInstr::FrameSetup);
1124 
1125       if (NeedsDwarfCFI) {
1126         // Mark effective beginning of when frame pointer becomes valid.
1127         // Define the current CFA to use the EBP/RBP register.
1128         unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1129         BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaRegister(
1130                                     nullptr, DwarfFramePtr));
1131       }
1132 
1133       if (NeedsWinFPO) {
1134         // .cv_fpo_setframe $FramePtr
1135         HasWinCFI = true;
1136         BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1137             .addImm(FramePtr)
1138             .addImm(0)
1139             .setMIFlag(MachineInstr::FrameSetup);
1140       }
1141     }
1142   } else {
1143     assert(!IsFunclet && "funclets without FPs not yet implemented");
1144     NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
1145   }
1146 
1147   // For EH funclets, only allocate enough space for outgoing calls. Save the
1148   // NumBytes value that we would've used for the parent frame.
1149   unsigned ParentFrameNumBytes = NumBytes;
1150   if (IsFunclet)
1151     NumBytes = getWinEHFuncletFrameSize(MF);
1152 
1153   // Skip the callee-saved push instructions.
1154   bool PushedRegs = false;
1155   int StackOffset = 2 * stackGrowth;
1156 
1157   while (MBBI != MBB.end() &&
1158          MBBI->getFlag(MachineInstr::FrameSetup) &&
1159          (MBBI->getOpcode() == X86::PUSH32r ||
1160           MBBI->getOpcode() == X86::PUSH64r)) {
1161     PushedRegs = true;
1162     unsigned Reg = MBBI->getOperand(0).getReg();
1163     ++MBBI;
1164 
1165     if (!HasFP && NeedsDwarfCFI) {
1166       // Mark callee-saved push instruction.
1167       // Define the current CFA rule to use the provided offset.
1168       assert(StackSize);
1169       BuildCFI(MBB, MBBI, DL,
1170                MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
1171       StackOffset += stackGrowth;
1172     }
1173 
1174     if (NeedsWinCFI) {
1175       HasWinCFI = true;
1176       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1177           .addImm(Reg)
1178           .setMIFlag(MachineInstr::FrameSetup);
1179     }
1180   }
1181 
1182   // Realign stack after we pushed callee-saved registers (so that we'll be
1183   // able to calculate their offsets from the frame pointer).
1184   // Don't do this for Win64, it needs to realign the stack after the prologue.
1185   if (!IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF)) {
1186     assert(HasFP && "There should be a frame pointer if stack is realigned.");
1187     BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
1188   }
1189 
1190   // If there is an SUB32ri of ESP immediately before this instruction, merge
1191   // the two. This can be the case when tail call elimination is enabled and
1192   // the callee has more arguments then the caller.
1193   NumBytes -= mergeSPUpdates(MBB, MBBI, true);
1194 
1195   // Adjust stack pointer: ESP -= numbytes.
1196 
1197   // Windows and cygwin/mingw require a prologue helper routine when allocating
1198   // more than 4K bytes on the stack.  Windows uses __chkstk and cygwin/mingw
1199   // uses __alloca.  __alloca and the 32-bit version of __chkstk will probe the
1200   // stack and adjust the stack pointer in one go.  The 64-bit version of
1201   // __chkstk is only responsible for probing the stack.  The 64-bit prologue is
1202   // responsible for adjusting the stack pointer.  Touching the stack at 4K
1203   // increments is necessary to ensure that the guard pages used by the OS
1204   // virtual memory manager are allocated in correct sequence.
1205   uint64_t AlignedNumBytes = NumBytes;
1206   if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
1207     AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
1208   if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
1209     assert(!X86FI->getUsesRedZone() &&
1210            "The Red Zone is not accounted for in stack probes");
1211 
1212     // Check whether EAX is livein for this block.
1213     bool isEAXAlive = isEAXLiveIn(MBB);
1214 
1215     if (isEAXAlive) {
1216       if (Is64Bit) {
1217         // Save RAX
1218         BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
1219           .addReg(X86::RAX, RegState::Kill)
1220           .setMIFlag(MachineInstr::FrameSetup);
1221       } else {
1222         // Save EAX
1223         BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
1224           .addReg(X86::EAX, RegState::Kill)
1225           .setMIFlag(MachineInstr::FrameSetup);
1226       }
1227     }
1228 
1229     if (Is64Bit) {
1230       // Handle the 64-bit Windows ABI case where we need to call __chkstk.
1231       // Function prologue is responsible for adjusting the stack pointer.
1232       int Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
1233       if (isUInt<32>(Alloc)) {
1234         BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1235             .addImm(Alloc)
1236             .setMIFlag(MachineInstr::FrameSetup);
1237       } else if (isInt<32>(Alloc)) {
1238         BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
1239             .addImm(Alloc)
1240             .setMIFlag(MachineInstr::FrameSetup);
1241       } else {
1242         BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
1243             .addImm(Alloc)
1244             .setMIFlag(MachineInstr::FrameSetup);
1245       }
1246     } else {
1247       // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
1248       // We'll also use 4 already allocated bytes for EAX.
1249       BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1250           .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
1251           .setMIFlag(MachineInstr::FrameSetup);
1252     }
1253 
1254     // Call __chkstk, __chkstk_ms, or __alloca.
1255     emitStackProbe(MF, MBB, MBBI, DL, true);
1256 
1257     if (isEAXAlive) {
1258       // Restore RAX/EAX
1259       MachineInstr *MI;
1260       if (Is64Bit)
1261         MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX),
1262                           StackPtr, false, NumBytes - 8);
1263       else
1264         MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX),
1265                           StackPtr, false, NumBytes - 4);
1266       MI->setFlag(MachineInstr::FrameSetup);
1267       MBB.insert(MBBI, MI);
1268     }
1269   } else if (NumBytes) {
1270     emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false);
1271   }
1272 
1273   if (NeedsWinCFI && NumBytes) {
1274     HasWinCFI = true;
1275     BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
1276         .addImm(NumBytes)
1277         .setMIFlag(MachineInstr::FrameSetup);
1278   }
1279 
1280   int SEHFrameOffset = 0;
1281   unsigned SPOrEstablisher;
1282   if (IsFunclet) {
1283     if (IsClrFunclet) {
1284       // The establisher parameter passed to a CLR funclet is actually a pointer
1285       // to the (mostly empty) frame of its nearest enclosing funclet; we have
1286       // to find the root function establisher frame by loading the PSPSym from
1287       // the intermediate frame.
1288       unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1289       MachinePointerInfo NoInfo;
1290       MBB.addLiveIn(Establisher);
1291       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher),
1292                    Establisher, false, PSPSlotOffset)
1293           .addMemOperand(MF.getMachineMemOperand(
1294               NoInfo, MachineMemOperand::MOLoad, SlotSize, SlotSize));
1295       ;
1296       // Save the root establisher back into the current funclet's (mostly
1297       // empty) frame, in case a sub-funclet or the GC needs it.
1298       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr,
1299                    false, PSPSlotOffset)
1300           .addReg(Establisher)
1301           .addMemOperand(
1302               MF.getMachineMemOperand(NoInfo, MachineMemOperand::MOStore |
1303                                                   MachineMemOperand::MOVolatile,
1304                                       SlotSize, SlotSize));
1305     }
1306     SPOrEstablisher = Establisher;
1307   } else {
1308     SPOrEstablisher = StackPtr;
1309   }
1310 
1311   if (IsWin64Prologue && HasFP) {
1312     // Set RBP to a small fixed offset from RSP. In the funclet case, we base
1313     // this calculation on the incoming establisher, which holds the value of
1314     // RSP from the parent frame at the end of the prologue.
1315     SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
1316     if (SEHFrameOffset)
1317       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
1318                    SPOrEstablisher, false, SEHFrameOffset);
1319     else
1320       BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
1321           .addReg(SPOrEstablisher);
1322 
1323     // If this is not a funclet, emit the CFI describing our frame pointer.
1324     if (NeedsWinCFI && !IsFunclet) {
1325       assert(!NeedsWinFPO && "this setframe incompatible with FPO data");
1326       HasWinCFI = true;
1327       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1328           .addImm(FramePtr)
1329           .addImm(SEHFrameOffset)
1330           .setMIFlag(MachineInstr::FrameSetup);
1331       if (isAsynchronousEHPersonality(Personality))
1332         MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset;
1333     }
1334   } else if (IsFunclet && STI.is32Bit()) {
1335     // Reset EBP / ESI to something good for funclets.
1336     MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
1337     // If we're a catch funclet, we can be returned to via catchret. Save ESP
1338     // into the registration node so that the runtime will restore it for us.
1339     if (!MBB.isCleanupFuncletEntry()) {
1340       assert(Personality == EHPersonality::MSVC_CXX);
1341       unsigned FrameReg;
1342       int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex;
1343       int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg);
1344       // ESP is the first field, so no extra displacement is needed.
1345       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg,
1346                    false, EHRegOffset)
1347           .addReg(X86::ESP);
1348     }
1349   }
1350 
1351   while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
1352     const MachineInstr &FrameInstr = *MBBI;
1353     ++MBBI;
1354 
1355     if (NeedsWinCFI) {
1356       int FI;
1357       if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
1358         if (X86::FR64RegClass.contains(Reg)) {
1359           unsigned IgnoredFrameReg;
1360           int Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg);
1361           Offset += SEHFrameOffset;
1362 
1363           HasWinCFI = true;
1364           assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");
1365           BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
1366               .addImm(Reg)
1367               .addImm(Offset)
1368               .setMIFlag(MachineInstr::FrameSetup);
1369         }
1370       }
1371     }
1372   }
1373 
1374   if (NeedsWinCFI && HasWinCFI)
1375     BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
1376         .setMIFlag(MachineInstr::FrameSetup);
1377 
1378   if (FnHasClrFunclet && !IsFunclet) {
1379     // Save the so-called Initial-SP (i.e. the value of the stack pointer
1380     // immediately after the prolog)  into the PSPSlot so that funclets
1381     // and the GC can recover it.
1382     unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1383     auto PSPInfo = MachinePointerInfo::getFixedStack(
1384         MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx);
1385     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false,
1386                  PSPSlotOffset)
1387         .addReg(StackPtr)
1388         .addMemOperand(MF.getMachineMemOperand(
1389             PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
1390             SlotSize, SlotSize));
1391   }
1392 
1393   // Realign stack after we spilled callee-saved registers (so that we'll be
1394   // able to calculate their offsets from the frame pointer).
1395   // Win64 requires aligning the stack after the prologue.
1396   if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
1397     assert(HasFP && "There should be a frame pointer if stack is realigned.");
1398     BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
1399   }
1400 
1401   // We already dealt with stack realignment and funclets above.
1402   if (IsFunclet && STI.is32Bit())
1403     return;
1404 
1405   // If we need a base pointer, set it up here. It's whatever the value
1406   // of the stack pointer is at this point. Any variable size objects
1407   // will be allocated after this, so we can still use the base pointer
1408   // to reference locals.
1409   if (TRI->hasBasePointer(MF)) {
1410     // Update the base pointer with the current stack pointer.
1411     unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
1412     BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
1413       .addReg(SPOrEstablisher)
1414       .setMIFlag(MachineInstr::FrameSetup);
1415     if (X86FI->getRestoreBasePointer()) {
1416       // Stash value of base pointer.  Saving RSP instead of EBP shortens
1417       // dependence chain. Used by SjLj EH.
1418       unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1419       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
1420                    FramePtr, true, X86FI->getRestoreBasePointerOffset())
1421         .addReg(SPOrEstablisher)
1422         .setMIFlag(MachineInstr::FrameSetup);
1423     }
1424 
1425     if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
1426       // Stash the value of the frame pointer relative to the base pointer for
1427       // Win32 EH. This supports Win32 EH, which does the inverse of the above:
1428       // it recovers the frame pointer from the base pointer rather than the
1429       // other way around.
1430       unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1431       unsigned UsedReg;
1432       int Offset =
1433           getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
1434       assert(UsedReg == BasePtr);
1435       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
1436           .addReg(FramePtr)
1437           .setMIFlag(MachineInstr::FrameSetup);
1438     }
1439   }
1440 
1441   if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
1442     // Mark end of stack pointer adjustment.
1443     if (!HasFP && NumBytes) {
1444       // Define the current CFA rule to use the provided offset.
1445       assert(StackSize);
1446       BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
1447                                   nullptr, -StackSize + stackGrowth));
1448     }
1449 
1450     // Emit DWARF info specifying the offsets of the callee-saved registers.
1451     emitCalleeSavedFrameMoves(MBB, MBBI, DL);
1452   }
1453 
1454   // X86 Interrupt handling function cannot assume anything about the direction
1455   // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
1456   // in each prologue of interrupt handler function.
1457   //
1458   // FIXME: Create "cld" instruction only in these cases:
1459   // 1. The interrupt handling function uses any of the "rep" instructions.
1460   // 2. Interrupt handling function calls another function.
1461   //
1462   if (Fn.getCallingConv() == CallingConv::X86_INTR)
1463     BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
1464         .setMIFlag(MachineInstr::FrameSetup);
1465 
1466   // At this point we know if the function has WinCFI or not.
1467   MF.setHasWinCFI(HasWinCFI);
1468 }
1469 
1470 bool X86FrameLowering::canUseLEAForSPInEpilogue(
1471     const MachineFunction &MF) const {
1472   // We can't use LEA instructions for adjusting the stack pointer if we don't
1473   // have a frame pointer in the Win64 ABI.  Only ADD instructions may be used
1474   // to deallocate the stack.
1475   // This means that we can use LEA for SP in two situations:
1476   // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
1477   // 2. We *have* a frame pointer which means we are permitted to use LEA.
1478   return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
1479 }
1480 
1481 static bool isFuncletReturnInstr(MachineInstr &MI) {
1482   switch (MI.getOpcode()) {
1483   case X86::CATCHRET:
1484   case X86::CLEANUPRET:
1485     return true;
1486   default:
1487     return false;
1488   }
1489   llvm_unreachable("impossible");
1490 }
1491 
1492 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the
1493 // stack. It holds a pointer to the bottom of the root function frame.  The
1494 // establisher frame pointer passed to a nested funclet may point to the
1495 // (mostly empty) frame of its parent funclet, but it will need to find
1496 // the frame of the root function to access locals.  To facilitate this,
1497 // every funclet copies the pointer to the bottom of the root function
1498 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the
1499 // same offset for the PSPSym in the root function frame that's used in the
1500 // funclets' frames allows each funclet to dynamically accept any ancestor
1501 // frame as its establisher argument (the runtime doesn't guarantee the
1502 // immediate parent for some reason lost to history), and also allows the GC,
1503 // which uses the PSPSym for some bookkeeping, to find it in any funclet's
1504 // frame with only a single offset reported for the entire method.
1505 unsigned
1506 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
1507   const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
1508   unsigned SPReg;
1509   int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
1510                                               /*IgnoreSPUpdates*/ true);
1511   assert(Offset >= 0 && SPReg == TRI->getStackRegister());
1512   return static_cast<unsigned>(Offset);
1513 }
1514 
1515 unsigned
1516 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
1517   // This is the size of the pushed CSRs.
1518   unsigned CSSize =
1519       MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
1520   // This is the amount of stack a funclet needs to allocate.
1521   unsigned UsedSize;
1522   EHPersonality Personality =
1523       classifyEHPersonality(MF.getFunction().getPersonalityFn());
1524   if (Personality == EHPersonality::CoreCLR) {
1525     // CLR funclets need to hold enough space to include the PSPSym, at the
1526     // same offset from the stack pointer (immediately after the prolog) as it
1527     // resides at in the main function.
1528     UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
1529   } else {
1530     // Other funclets just need enough stack for outgoing call arguments.
1531     UsedSize = MF.getFrameInfo().getMaxCallFrameSize();
1532   }
1533   // RBP is not included in the callee saved register block. After pushing RBP,
1534   // everything is 16 byte aligned. Everything we allocate before an outgoing
1535   // call must also be 16 byte aligned.
1536   unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
1537   // Subtract out the size of the callee saved registers. This is how much stack
1538   // each funclet will allocate.
1539   return FrameSizeMinusRBP - CSSize;
1540 }
1541 
1542 static bool isTailCallOpcode(unsigned Opc) {
1543     return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
1544         Opc == X86::TCRETURNmi ||
1545         Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
1546         Opc == X86::TCRETURNmi64;
1547 }
1548 
1549 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
1550                                     MachineBasicBlock &MBB) const {
1551   const MachineFrameInfo &MFI = MF.getFrameInfo();
1552   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1553   MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator();
1554   MachineBasicBlock::iterator MBBI = Terminator;
1555   DebugLoc DL;
1556   if (MBBI != MBB.end())
1557     DL = MBBI->getDebugLoc();
1558   // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
1559   const bool Is64BitILP32 = STI.isTarget64BitILP32();
1560   unsigned FramePtr = TRI->getFrameRegister(MF);
1561   unsigned MachineFramePtr =
1562       Is64BitILP32 ? getX86SubSuperRegister(FramePtr, 64) : FramePtr;
1563 
1564   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1565   bool NeedsWin64CFI =
1566       IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
1567   bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
1568 
1569   // Get the number of bytes to allocate from the FrameInfo.
1570   uint64_t StackSize = MFI.getStackSize();
1571   uint64_t MaxAlign = calculateMaxStackAlign(MF);
1572   unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1573   bool HasFP = hasFP(MF);
1574   uint64_t NumBytes = 0;
1575 
1576   if (IsFunclet) {
1577     assert(HasFP && "EH funclets without FP not yet implemented");
1578     NumBytes = getWinEHFuncletFrameSize(MF);
1579   } else if (HasFP) {
1580     // Calculate required stack adjustment.
1581     uint64_t FrameSize = StackSize - SlotSize;
1582     NumBytes = FrameSize - CSSize;
1583 
1584     // Callee-saved registers were pushed on stack before the stack was
1585     // realigned.
1586     if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1587       NumBytes = alignTo(FrameSize, MaxAlign);
1588   } else {
1589     NumBytes = StackSize - CSSize;
1590   }
1591   uint64_t SEHStackAllocAmt = NumBytes;
1592 
1593   if (HasFP) {
1594     // Pop EBP.
1595     BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1596             MachineFramePtr)
1597         .setMIFlag(MachineInstr::FrameDestroy);
1598   }
1599 
1600   MachineBasicBlock::iterator FirstCSPop = MBBI;
1601   // Skip the callee-saved pop instructions.
1602   while (MBBI != MBB.begin()) {
1603     MachineBasicBlock::iterator PI = std::prev(MBBI);
1604     unsigned Opc = PI->getOpcode();
1605 
1606     if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
1607       if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1608           (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)))
1609         break;
1610       FirstCSPop = PI;
1611     }
1612 
1613     --MBBI;
1614   }
1615   MBBI = FirstCSPop;
1616 
1617   if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
1618     emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);
1619 
1620   if (MBBI != MBB.end())
1621     DL = MBBI->getDebugLoc();
1622 
1623   // If there is an ADD32ri or SUB32ri of ESP immediately before this
1624   // instruction, merge the two instructions.
1625   if (NumBytes || MFI.hasVarSizedObjects())
1626     NumBytes += mergeSPUpdates(MBB, MBBI, true);
1627 
1628   // If dynamic alloca is used, then reset esp to point to the last callee-saved
1629   // slot before popping them off! Same applies for the case, when stack was
1630   // realigned. Don't do this if this was a funclet epilogue, since the funclets
1631   // will not do realignment or dynamic stack allocation.
1632   if ((TRI->needsStackRealignment(MF) || MFI.hasVarSizedObjects()) &&
1633       !IsFunclet) {
1634     if (TRI->needsStackRealignment(MF))
1635       MBBI = FirstCSPop;
1636     unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
1637     uint64_t LEAAmount =
1638         IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
1639 
1640     // There are only two legal forms of epilogue:
1641     // - add SEHAllocationSize, %rsp
1642     // - lea SEHAllocationSize(%FramePtr), %rsp
1643     //
1644     // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
1645     // However, we may use this sequence if we have a frame pointer because the
1646     // effects of the prologue can safely be undone.
1647     if (LEAAmount != 0) {
1648       unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
1649       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
1650                    FramePtr, false, LEAAmount);
1651       --MBBI;
1652     } else {
1653       unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
1654       BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
1655         .addReg(FramePtr);
1656       --MBBI;
1657     }
1658   } else if (NumBytes) {
1659     // Adjust stack pointer back: ESP += numbytes.
1660     emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true);
1661     --MBBI;
1662   }
1663 
1664   // Windows unwinder will not invoke function's exception handler if IP is
1665   // either in prologue or in epilogue.  This behavior causes a problem when a
1666   // call immediately precedes an epilogue, because the return address points
1667   // into the epilogue.  To cope with that, we insert an epilogue marker here,
1668   // then replace it with a 'nop' if it ends up immediately after a CALL in the
1669   // final emitted code.
1670   if (NeedsWin64CFI && MF.hasWinCFI())
1671     BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
1672 
1673   if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) {
1674     // Add the return addr area delta back since we are not tail calling.
1675     int Offset = -1 * X86FI->getTCReturnAddrDelta();
1676     assert(Offset >= 0 && "TCDelta should never be positive");
1677     if (Offset) {
1678       // Check for possible merge with preceding ADD instruction.
1679       Offset += mergeSPUpdates(MBB, Terminator, true);
1680       emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true);
1681     }
1682   }
1683 }
1684 
1685 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1686                                              unsigned &FrameReg) const {
1687   const MachineFrameInfo &MFI = MF.getFrameInfo();
1688 
1689   bool IsFixed = MFI.isFixedObjectIndex(FI);
1690   // We can't calculate offset from frame pointer if the stack is realigned,
1691   // so enforce usage of stack/base pointer.  The base pointer is used when we
1692   // have dynamic allocas in addition to dynamic realignment.
1693   if (TRI->hasBasePointer(MF))
1694     FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister();
1695   else if (TRI->needsStackRealignment(MF))
1696     FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister();
1697   else
1698     FrameReg = TRI->getFrameRegister(MF);
1699 
1700   // Offset will hold the offset from the stack pointer at function entry to the
1701   // object.
1702   // We need to factor in additional offsets applied during the prologue to the
1703   // frame, base, and stack pointer depending on which is used.
1704   int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea();
1705   const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1706   unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1707   uint64_t StackSize = MFI.getStackSize();
1708   bool HasFP = hasFP(MF);
1709   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1710   int64_t FPDelta = 0;
1711 
1712   if (IsWin64Prologue) {
1713     assert(!MFI.hasCalls() || (StackSize % 16) == 8);
1714 
1715     // Calculate required stack adjustment.
1716     uint64_t FrameSize = StackSize - SlotSize;
1717     // If required, include space for extra hidden slot for stashing base pointer.
1718     if (X86FI->getRestoreBasePointer())
1719       FrameSize += SlotSize;
1720     uint64_t NumBytes = FrameSize - CSSize;
1721 
1722     uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
1723     if (FI && FI == X86FI->getFAIndex())
1724       return -SEHFrameOffset;
1725 
1726     // FPDelta is the offset from the "traditional" FP location of the old base
1727     // pointer followed by return address and the location required by the
1728     // restricted Win64 prologue.
1729     // Add FPDelta to all offsets below that go through the frame pointer.
1730     FPDelta = FrameSize - SEHFrameOffset;
1731     assert((!MFI.hasCalls() || (FPDelta % 16) == 0) &&
1732            "FPDelta isn't aligned per the Win64 ABI!");
1733   }
1734 
1735 
1736   if (TRI->hasBasePointer(MF)) {
1737     assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
1738     if (FI < 0) {
1739       // Skip the saved EBP.
1740       return Offset + SlotSize + FPDelta;
1741     } else {
1742       assert((-(Offset + StackSize)) % MFI.getObjectAlignment(FI) == 0);
1743       return Offset + StackSize;
1744     }
1745   } else if (TRI->needsStackRealignment(MF)) {
1746     if (FI < 0) {
1747       // Skip the saved EBP.
1748       return Offset + SlotSize + FPDelta;
1749     } else {
1750       assert((-(Offset + StackSize)) % MFI.getObjectAlignment(FI) == 0);
1751       return Offset + StackSize;
1752     }
1753     // FIXME: Support tail calls
1754   } else {
1755     if (!HasFP)
1756       return Offset + StackSize;
1757 
1758     // Skip the saved EBP.
1759     Offset += SlotSize;
1760 
1761     // Skip the RETADDR move area
1762     int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1763     if (TailCallReturnAddrDelta < 0)
1764       Offset -= TailCallReturnAddrDelta;
1765   }
1766 
1767   return Offset + FPDelta;
1768 }
1769 
1770 int X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF,
1771                                                int FI, unsigned &FrameReg,
1772                                                int Adjustment) const {
1773   const MachineFrameInfo &MFI = MF.getFrameInfo();
1774   FrameReg = TRI->getStackRegister();
1775   return MFI.getObjectOffset(FI) - getOffsetOfLocalArea() + Adjustment;
1776 }
1777 
1778 int
1779 X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
1780                                                  int FI, unsigned &FrameReg,
1781                                                  bool IgnoreSPUpdates) const {
1782 
1783   const MachineFrameInfo &MFI = MF.getFrameInfo();
1784   // Does not include any dynamic realign.
1785   const uint64_t StackSize = MFI.getStackSize();
1786   // LLVM arranges the stack as follows:
1787   //   ...
1788   //   ARG2
1789   //   ARG1
1790   //   RETADDR
1791   //   PUSH RBP   <-- RBP points here
1792   //   PUSH CSRs
1793   //   ~~~~~~~    <-- possible stack realignment (non-win64)
1794   //   ...
1795   //   STACK OBJECTS
1796   //   ...        <-- RSP after prologue points here
1797   //   ~~~~~~~    <-- possible stack realignment (win64)
1798   //
1799   // if (hasVarSizedObjects()):
1800   //   ...        <-- "base pointer" (ESI/RBX) points here
1801   //   DYNAMIC ALLOCAS
1802   //   ...        <-- RSP points here
1803   //
1804   // Case 1: In the simple case of no stack realignment and no dynamic
1805   // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
1806   // with fixed offsets from RSP.
1807   //
1808   // Case 2: In the case of stack realignment with no dynamic allocas, fixed
1809   // stack objects are addressed with RBP and regular stack objects with RSP.
1810   //
1811   // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
1812   // to address stack arguments for outgoing calls and nothing else. The "base
1813   // pointer" points to local variables, and RBP points to fixed objects.
1814   //
1815   // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
1816   // answer we give is relative to the SP after the prologue, and not the
1817   // SP in the middle of the function.
1818 
1819   if (MFI.isFixedObjectIndex(FI) && TRI->needsStackRealignment(MF) &&
1820       !STI.isTargetWin64())
1821     return getFrameIndexReference(MF, FI, FrameReg);
1822 
1823   // If !hasReservedCallFrame the function might have SP adjustement in the
1824   // body.  So, even though the offset is statically known, it depends on where
1825   // we are in the function.
1826   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
1827   if (!IgnoreSPUpdates && !TFI->hasReservedCallFrame(MF))
1828     return getFrameIndexReference(MF, FI, FrameReg);
1829 
1830   // We don't handle tail calls, and shouldn't be seeing them either.
1831   assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
1832          "we don't handle this case!");
1833 
1834   // This is how the math works out:
1835   //
1836   //  %rsp grows (i.e. gets lower) left to right. Each box below is
1837   //  one word (eight bytes).  Obj0 is the stack slot we're trying to
1838   //  get to.
1839   //
1840   //    ----------------------------------
1841   //    | BP | Obj0 | Obj1 | ... | ObjN |
1842   //    ----------------------------------
1843   //    ^    ^      ^                   ^
1844   //    A    B      C                   E
1845   //
1846   // A is the incoming stack pointer.
1847   // (B - A) is the local area offset (-8 for x86-64) [1]
1848   // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2]
1849   //
1850   // |(E - B)| is the StackSize (absolute value, positive).  For a
1851   // stack that grown down, this works out to be (B - E). [3]
1852   //
1853   // E is also the value of %rsp after stack has been set up, and we
1854   // want (C - E) -- the value we can add to %rsp to get to Obj0.  Now
1855   // (C - E) == (C - A) - (B - A) + (B - E)
1856   //            { Using [1], [2] and [3] above }
1857   //         == getObjectOffset - LocalAreaOffset + StackSize
1858 
1859   return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize);
1860 }
1861 
1862 bool X86FrameLowering::assignCalleeSavedSpillSlots(
1863     MachineFunction &MF, const TargetRegisterInfo *TRI,
1864     std::vector<CalleeSavedInfo> &CSI) const {
1865   MachineFrameInfo &MFI = MF.getFrameInfo();
1866   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1867 
1868   unsigned CalleeSavedFrameSize = 0;
1869   int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
1870 
1871   int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1872 
1873   if (TailCallReturnAddrDelta < 0) {
1874     // create RETURNADDR area
1875     //   arg
1876     //   arg
1877     //   RETADDR
1878     //   { ...
1879     //     RETADDR area
1880     //     ...
1881     //   }
1882     //   [EBP]
1883     MFI.CreateFixedObject(-TailCallReturnAddrDelta,
1884                            TailCallReturnAddrDelta - SlotSize, true);
1885   }
1886 
1887   // Spill the BasePtr if it's used.
1888   if (this->TRI->hasBasePointer(MF)) {
1889     // Allocate a spill slot for EBP if we have a base pointer and EH funclets.
1890     if (MF.hasEHFunclets()) {
1891       int FI = MFI.CreateSpillStackObject(SlotSize, SlotSize);
1892       X86FI->setHasSEHFramePtrSave(true);
1893       X86FI->setSEHFramePtrSaveIndex(FI);
1894     }
1895   }
1896 
1897   if (hasFP(MF)) {
1898     // emitPrologue always spills frame register the first thing.
1899     SpillSlotOffset -= SlotSize;
1900     MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1901 
1902     // Since emitPrologue and emitEpilogue will handle spilling and restoring of
1903     // the frame register, we can delete it from CSI list and not have to worry
1904     // about avoiding it later.
1905     unsigned FPReg = TRI->getFrameRegister(MF);
1906     for (unsigned i = 0; i < CSI.size(); ++i) {
1907       if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
1908         CSI.erase(CSI.begin() + i);
1909         break;
1910       }
1911     }
1912   }
1913 
1914   // Assign slots for GPRs. It increases frame size.
1915   for (unsigned i = CSI.size(); i != 0; --i) {
1916     unsigned Reg = CSI[i - 1].getReg();
1917 
1918     if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1919       continue;
1920 
1921     SpillSlotOffset -= SlotSize;
1922     CalleeSavedFrameSize += SlotSize;
1923 
1924     int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1925     CSI[i - 1].setFrameIdx(SlotIndex);
1926   }
1927 
1928   X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
1929 
1930   // Assign slots for XMMs.
1931   for (unsigned i = CSI.size(); i != 0; --i) {
1932     unsigned Reg = CSI[i - 1].getReg();
1933     if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1934       continue;
1935 
1936     // If this is k-register make sure we lookup via the largest legal type.
1937     MVT VT = MVT::Other;
1938     if (X86::VK16RegClass.contains(Reg))
1939       VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
1940 
1941     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
1942     unsigned Size = TRI->getSpillSize(*RC);
1943     unsigned Align = TRI->getSpillAlignment(*RC);
1944     // ensure alignment
1945     SpillSlotOffset -= std::abs(SpillSlotOffset) % Align;
1946     // spill into slot
1947     SpillSlotOffset -= Size;
1948     int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset);
1949     CSI[i - 1].setFrameIdx(SlotIndex);
1950     MFI.ensureMaxAlignment(Align);
1951   }
1952 
1953   return true;
1954 }
1955 
1956 bool X86FrameLowering::spillCalleeSavedRegisters(
1957     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1958     const std::vector<CalleeSavedInfo> &CSI,
1959     const TargetRegisterInfo *TRI) const {
1960   DebugLoc DL = MBB.findDebugLoc(MI);
1961 
1962   // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
1963   // for us, and there are no XMM CSRs on Win32.
1964   if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
1965     return true;
1966 
1967   // Push GPRs. It increases frame size.
1968   const MachineFunction &MF = *MBB.getParent();
1969   unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
1970   for (unsigned i = CSI.size(); i != 0; --i) {
1971     unsigned Reg = CSI[i - 1].getReg();
1972 
1973     if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1974       continue;
1975 
1976     const MachineRegisterInfo &MRI = MF.getRegInfo();
1977     bool isLiveIn = MRI.isLiveIn(Reg);
1978     if (!isLiveIn)
1979       MBB.addLiveIn(Reg);
1980 
1981     // Decide whether we can add a kill flag to the use.
1982     bool CanKill = !isLiveIn;
1983     // Check if any subregister is live-in
1984     if (CanKill) {
1985       for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) {
1986         if (MRI.isLiveIn(*AReg)) {
1987           CanKill = false;
1988           break;
1989         }
1990       }
1991     }
1992 
1993     // Do not set a kill flag on values that are also marked as live-in. This
1994     // happens with the @llvm-returnaddress intrinsic and with arguments
1995     // passed in callee saved registers.
1996     // Omitting the kill flags is conservatively correct even if the live-in
1997     // is not used after all.
1998     BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
1999       .setMIFlag(MachineInstr::FrameSetup);
2000   }
2001 
2002   // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
2003   // It can be done by spilling XMMs to stack frame.
2004   for (unsigned i = CSI.size(); i != 0; --i) {
2005     unsigned Reg = CSI[i-1].getReg();
2006     if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2007       continue;
2008 
2009     // If this is k-register make sure we lookup via the largest legal type.
2010     MVT VT = MVT::Other;
2011     if (X86::VK16RegClass.contains(Reg))
2012       VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2013 
2014     // Add the callee-saved register as live-in. It's killed at the spill.
2015     MBB.addLiveIn(Reg);
2016     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2017 
2018     TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
2019                             TRI);
2020     --MI;
2021     MI->setFlag(MachineInstr::FrameSetup);
2022     ++MI;
2023   }
2024 
2025   return true;
2026 }
2027 
2028 void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB,
2029                                                MachineBasicBlock::iterator MBBI,
2030                                                MachineInstr *CatchRet) const {
2031   // SEH shouldn't use catchret.
2032   assert(!isAsynchronousEHPersonality(classifyEHPersonality(
2033              MBB.getParent()->getFunction().getPersonalityFn())) &&
2034          "SEH should not use CATCHRET");
2035   DebugLoc DL = CatchRet->getDebugLoc();
2036   MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
2037 
2038   // Fill EAX/RAX with the address of the target block.
2039   if (STI.is64Bit()) {
2040     // LEA64r CatchRetTarget(%rip), %rax
2041     BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX)
2042         .addReg(X86::RIP)
2043         .addImm(0)
2044         .addReg(0)
2045         .addMBB(CatchRetTarget)
2046         .addReg(0);
2047   } else {
2048     // MOV32ri $CatchRetTarget, %eax
2049     BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
2050         .addMBB(CatchRetTarget);
2051   }
2052 
2053   // Record that we've taken the address of CatchRetTarget and no longer just
2054   // reference it in a terminator.
2055   CatchRetTarget->setHasAddressTaken();
2056 }
2057 
2058 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
2059                                                MachineBasicBlock::iterator MI,
2060                                           std::vector<CalleeSavedInfo> &CSI,
2061                                           const TargetRegisterInfo *TRI) const {
2062   if (CSI.empty())
2063     return false;
2064 
2065   if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
2066     // Don't restore CSRs in 32-bit EH funclets. Matches
2067     // spillCalleeSavedRegisters.
2068     if (STI.is32Bit())
2069       return true;
2070     // Don't restore CSRs before an SEH catchret. SEH except blocks do not form
2071     // funclets. emitEpilogue transforms these to normal jumps.
2072     if (MI->getOpcode() == X86::CATCHRET) {
2073       const Function &F = MBB.getParent()->getFunction();
2074       bool IsSEH = isAsynchronousEHPersonality(
2075           classifyEHPersonality(F.getPersonalityFn()));
2076       if (IsSEH)
2077         return true;
2078     }
2079   }
2080 
2081   DebugLoc DL = MBB.findDebugLoc(MI);
2082 
2083   // Reload XMMs from stack frame.
2084   for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2085     unsigned Reg = CSI[i].getReg();
2086     if (X86::GR64RegClass.contains(Reg) ||
2087         X86::GR32RegClass.contains(Reg))
2088       continue;
2089 
2090     // If this is k-register make sure we lookup via the largest legal type.
2091     MVT VT = MVT::Other;
2092     if (X86::VK16RegClass.contains(Reg))
2093       VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2094 
2095     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2096     TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
2097   }
2098 
2099   // POP GPRs.
2100   unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
2101   for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2102     unsigned Reg = CSI[i].getReg();
2103     if (!X86::GR64RegClass.contains(Reg) &&
2104         !X86::GR32RegClass.contains(Reg))
2105       continue;
2106 
2107     BuildMI(MBB, MI, DL, TII.get(Opc), Reg)
2108         .setMIFlag(MachineInstr::FrameDestroy);
2109   }
2110   return true;
2111 }
2112 
2113 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
2114                                             BitVector &SavedRegs,
2115                                             RegScavenger *RS) const {
2116   TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
2117 
2118   // Spill the BasePtr if it's used.
2119   if (TRI->hasBasePointer(MF)){
2120     unsigned BasePtr = TRI->getBaseRegister();
2121     if (STI.isTarget64BitILP32())
2122       BasePtr = getX86SubSuperRegister(BasePtr, 64);
2123     SavedRegs.set(BasePtr);
2124   }
2125 }
2126 
2127 static bool
2128 HasNestArgument(const MachineFunction *MF) {
2129   const Function &F = MF->getFunction();
2130   for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
2131        I != E; I++) {
2132     if (I->hasNestAttr())
2133       return true;
2134   }
2135   return false;
2136 }
2137 
2138 /// GetScratchRegister - Get a temp register for performing work in the
2139 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
2140 /// and the properties of the function either one or two registers will be
2141 /// needed. Set primary to true for the first register, false for the second.
2142 static unsigned
2143 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
2144   CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
2145 
2146   // Erlang stuff.
2147   if (CallingConvention == CallingConv::HiPE) {
2148     if (Is64Bit)
2149       return Primary ? X86::R14 : X86::R13;
2150     else
2151       return Primary ? X86::EBX : X86::EDI;
2152   }
2153 
2154   if (Is64Bit) {
2155     if (IsLP64)
2156       return Primary ? X86::R11 : X86::R12;
2157     else
2158       return Primary ? X86::R11D : X86::R12D;
2159   }
2160 
2161   bool IsNested = HasNestArgument(&MF);
2162 
2163   if (CallingConvention == CallingConv::X86_FastCall ||
2164       CallingConvention == CallingConv::Fast) {
2165     if (IsNested)
2166       report_fatal_error("Segmented stacks does not support fastcall with "
2167                          "nested function.");
2168     return Primary ? X86::EAX : X86::ECX;
2169   }
2170   if (IsNested)
2171     return Primary ? X86::EDX : X86::EAX;
2172   return Primary ? X86::ECX : X86::EAX;
2173 }
2174 
2175 // The stack limit in the TCB is set to this many bytes above the actual stack
2176 // limit.
2177 static const uint64_t kSplitStackAvailable = 256;
2178 
2179 void X86FrameLowering::adjustForSegmentedStacks(
2180     MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2181   MachineFrameInfo &MFI = MF.getFrameInfo();
2182   uint64_t StackSize;
2183   unsigned TlsReg, TlsOffset;
2184   DebugLoc DL;
2185 
2186   // To support shrink-wrapping we would need to insert the new blocks
2187   // at the right place and update the branches to PrologueMBB.
2188   assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
2189 
2190   unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2191   assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2192          "Scratch register is live-in");
2193 
2194   if (MF.getFunction().isVarArg())
2195     report_fatal_error("Segmented stacks do not support vararg functions.");
2196   if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
2197       !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
2198       !STI.isTargetDragonFly())
2199     report_fatal_error("Segmented stacks not supported on this platform.");
2200 
2201   // Eventually StackSize will be calculated by a link-time pass; which will
2202   // also decide whether checking code needs to be injected into this particular
2203   // prologue.
2204   StackSize = MFI.getStackSize();
2205 
2206   // Do not generate a prologue for functions with a stack of size zero
2207   if (StackSize == 0)
2208     return;
2209 
2210   MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
2211   MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
2212   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2213   bool IsNested = false;
2214 
2215   // We need to know if the function has a nest argument only in 64 bit mode.
2216   if (Is64Bit)
2217     IsNested = HasNestArgument(&MF);
2218 
2219   // The MOV R10, RAX needs to be in a different block, since the RET we emit in
2220   // allocMBB needs to be last (terminating) instruction.
2221 
2222   for (const auto &LI : PrologueMBB.liveins()) {
2223     allocMBB->addLiveIn(LI);
2224     checkMBB->addLiveIn(LI);
2225   }
2226 
2227   if (IsNested)
2228     allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
2229 
2230   MF.push_front(allocMBB);
2231   MF.push_front(checkMBB);
2232 
2233   // When the frame size is less than 256 we just compare the stack
2234   // boundary directly to the value of the stack pointer, per gcc.
2235   bool CompareStackPointer = StackSize < kSplitStackAvailable;
2236 
2237   // Read the limit off the current stacklet off the stack_guard location.
2238   if (Is64Bit) {
2239     if (STI.isTargetLinux()) {
2240       TlsReg = X86::FS;
2241       TlsOffset = IsLP64 ? 0x70 : 0x40;
2242     } else if (STI.isTargetDarwin()) {
2243       TlsReg = X86::GS;
2244       TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
2245     } else if (STI.isTargetWin64()) {
2246       TlsReg = X86::GS;
2247       TlsOffset = 0x28; // pvArbitrary, reserved for application use
2248     } else if (STI.isTargetFreeBSD()) {
2249       TlsReg = X86::FS;
2250       TlsOffset = 0x18;
2251     } else if (STI.isTargetDragonFly()) {
2252       TlsReg = X86::FS;
2253       TlsOffset = 0x20; // use tls_tcb.tcb_segstack
2254     } else {
2255       report_fatal_error("Segmented stacks not supported on this platform.");
2256     }
2257 
2258     if (CompareStackPointer)
2259       ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
2260     else
2261       BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
2262         .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2263 
2264     BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
2265       .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2266   } else {
2267     if (STI.isTargetLinux()) {
2268       TlsReg = X86::GS;
2269       TlsOffset = 0x30;
2270     } else if (STI.isTargetDarwin()) {
2271       TlsReg = X86::GS;
2272       TlsOffset = 0x48 + 90*4;
2273     } else if (STI.isTargetWin32()) {
2274       TlsReg = X86::FS;
2275       TlsOffset = 0x14; // pvArbitrary, reserved for application use
2276     } else if (STI.isTargetDragonFly()) {
2277       TlsReg = X86::FS;
2278       TlsOffset = 0x10; // use tls_tcb.tcb_segstack
2279     } else if (STI.isTargetFreeBSD()) {
2280       report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
2281     } else {
2282       report_fatal_error("Segmented stacks not supported on this platform.");
2283     }
2284 
2285     if (CompareStackPointer)
2286       ScratchReg = X86::ESP;
2287     else
2288       BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
2289         .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2290 
2291     if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
2292         STI.isTargetDragonFly()) {
2293       BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
2294         .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2295     } else if (STI.isTargetDarwin()) {
2296 
2297       // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
2298       unsigned ScratchReg2;
2299       bool SaveScratch2;
2300       if (CompareStackPointer) {
2301         // The primary scratch register is available for holding the TLS offset.
2302         ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2303         SaveScratch2 = false;
2304       } else {
2305         // Need to use a second register to hold the TLS offset
2306         ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
2307 
2308         // Unfortunately, with fastcc the second scratch register may hold an
2309         // argument.
2310         SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
2311       }
2312 
2313       // If Scratch2 is live-in then it needs to be saved.
2314       assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
2315              "Scratch register is live-in and not saved");
2316 
2317       if (SaveScratch2)
2318         BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
2319           .addReg(ScratchReg2, RegState::Kill);
2320 
2321       BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
2322         .addImm(TlsOffset);
2323       BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
2324         .addReg(ScratchReg)
2325         .addReg(ScratchReg2).addImm(1).addReg(0)
2326         .addImm(0)
2327         .addReg(TlsReg);
2328 
2329       if (SaveScratch2)
2330         BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
2331     }
2332   }
2333 
2334   // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
2335   // It jumps to normal execution of the function body.
2336   BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&PrologueMBB);
2337 
2338   // On 32 bit we first push the arguments size and then the frame size. On 64
2339   // bit, we pass the stack frame size in r10 and the argument size in r11.
2340   if (Is64Bit) {
2341     // Functions with nested arguments use R10, so it needs to be saved across
2342     // the call to _morestack
2343 
2344     const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
2345     const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
2346     const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
2347     const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
2348     const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
2349 
2350     if (IsNested)
2351       BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
2352 
2353     BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
2354       .addImm(StackSize);
2355     BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
2356       .addImm(X86FI->getArgumentStackSize());
2357   } else {
2358     BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2359       .addImm(X86FI->getArgumentStackSize());
2360     BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2361       .addImm(StackSize);
2362   }
2363 
2364   // __morestack is in libgcc
2365   if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
2366     // Under the large code model, we cannot assume that __morestack lives
2367     // within 2^31 bytes of the call site, so we cannot use pc-relative
2368     // addressing. We cannot perform the call via a temporary register,
2369     // as the rax register may be used to store the static chain, and all
2370     // other suitable registers may be either callee-save or used for
2371     // parameter passing. We cannot use the stack at this point either
2372     // because __morestack manipulates the stack directly.
2373     //
2374     // To avoid these issues, perform an indirect call via a read-only memory
2375     // location containing the address.
2376     //
2377     // This solution is not perfect, as it assumes that the .rodata section
2378     // is laid out within 2^31 bytes of each function body, but this seems
2379     // to be sufficient for JIT.
2380     // FIXME: Add retpoline support and remove the error here..
2381     if (STI.useRetpoline())
2382       report_fatal_error("Emitting morestack calls on 64-bit with the large "
2383                          "code model and retpoline not yet implemented.");
2384     BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
2385         .addReg(X86::RIP)
2386         .addImm(0)
2387         .addReg(0)
2388         .addExternalSymbol("__morestack_addr")
2389         .addReg(0);
2390     MF.getMMI().setUsesMorestackAddr(true);
2391   } else {
2392     if (Is64Bit)
2393       BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
2394         .addExternalSymbol("__morestack");
2395     else
2396       BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
2397         .addExternalSymbol("__morestack");
2398   }
2399 
2400   if (IsNested)
2401     BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
2402   else
2403     BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
2404 
2405   allocMBB->addSuccessor(&PrologueMBB);
2406 
2407   checkMBB->addSuccessor(allocMBB);
2408   checkMBB->addSuccessor(&PrologueMBB);
2409 
2410 #ifdef EXPENSIVE_CHECKS
2411   MF.verify();
2412 #endif
2413 }
2414 
2415 /// Lookup an ERTS parameter in the !hipe.literals named metadata node.
2416 /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
2417 /// to fields it needs, through a named metadata node "hipe.literals" containing
2418 /// name-value pairs.
2419 static unsigned getHiPELiteral(
2420     NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) {
2421   for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
2422     MDNode *Node = HiPELiteralsMD->getOperand(i);
2423     if (Node->getNumOperands() != 2) continue;
2424     MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
2425     ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
2426     if (!NodeName || !NodeVal) continue;
2427     ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
2428     if (ValConst && NodeName->getString() == LiteralName) {
2429       return ValConst->getZExtValue();
2430     }
2431   }
2432 
2433   report_fatal_error("HiPE literal " + LiteralName
2434                      + " required but not provided");
2435 }
2436 
2437 /// Erlang programs may need a special prologue to handle the stack size they
2438 /// might need at runtime. That is because Erlang/OTP does not implement a C
2439 /// stack but uses a custom implementation of hybrid stack/heap architecture.
2440 /// (for more information see Eric Stenman's Ph.D. thesis:
2441 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
2442 ///
2443 /// CheckStack:
2444 ///       temp0 = sp - MaxStack
2445 ///       if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
2446 /// OldStart:
2447 ///       ...
2448 /// IncStack:
2449 ///       call inc_stack   # doubles the stack space
2450 ///       temp0 = sp - MaxStack
2451 ///       if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
2452 void X86FrameLowering::adjustForHiPEPrologue(
2453     MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2454   MachineFrameInfo &MFI = MF.getFrameInfo();
2455   DebugLoc DL;
2456 
2457   // To support shrink-wrapping we would need to insert the new blocks
2458   // at the right place and update the branches to PrologueMBB.
2459   assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
2460 
2461   // HiPE-specific values
2462   NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule()
2463     ->getNamedMetadata("hipe.literals");
2464   if (!HiPELiteralsMD)
2465     report_fatal_error(
2466         "Can't generate HiPE prologue without runtime parameters");
2467   const unsigned HipeLeafWords
2468     = getHiPELiteral(HiPELiteralsMD,
2469                      Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
2470   const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
2471   const unsigned Guaranteed = HipeLeafWords * SlotSize;
2472   unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ?
2473                             MF.getFunction().arg_size() - CCRegisteredArgs : 0;
2474   unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize;
2475 
2476   assert(STI.isTargetLinux() &&
2477          "HiPE prologue is only supported on Linux operating systems.");
2478 
2479   // Compute the largest caller's frame that is needed to fit the callees'
2480   // frames. This 'MaxStack' is computed from:
2481   //
2482   // a) the fixed frame size, which is the space needed for all spilled temps,
2483   // b) outgoing on-stack parameter areas, and
2484   // c) the minimum stack space this function needs to make available for the
2485   //    functions it calls (a tunable ABI property).
2486   if (MFI.hasCalls()) {
2487     unsigned MoreStackForCalls = 0;
2488 
2489     for (auto &MBB : MF) {
2490       for (auto &MI : MBB) {
2491         if (!MI.isCall())
2492           continue;
2493 
2494         // Get callee operand.
2495         const MachineOperand &MO = MI.getOperand(0);
2496 
2497         // Only take account of global function calls (no closures etc.).
2498         if (!MO.isGlobal())
2499           continue;
2500 
2501         const Function *F = dyn_cast<Function>(MO.getGlobal());
2502         if (!F)
2503           continue;
2504 
2505         // Do not update 'MaxStack' for primitive and built-in functions
2506         // (encoded with names either starting with "erlang."/"bif_" or not
2507         // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
2508         // "_", such as the BIF "suspend_0") as they are executed on another
2509         // stack.
2510         if (F->getName().find("erlang.") != StringRef::npos ||
2511             F->getName().find("bif_") != StringRef::npos ||
2512             F->getName().find_first_of("._") == StringRef::npos)
2513           continue;
2514 
2515         unsigned CalleeStkArity =
2516           F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
2517         if (HipeLeafWords - 1 > CalleeStkArity)
2518           MoreStackForCalls = std::max(MoreStackForCalls,
2519                                (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
2520       }
2521     }
2522     MaxStack += MoreStackForCalls;
2523   }
2524 
2525   // If the stack frame needed is larger than the guaranteed then runtime checks
2526   // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
2527   if (MaxStack > Guaranteed) {
2528     MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
2529     MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
2530 
2531     for (const auto &LI : PrologueMBB.liveins()) {
2532       stackCheckMBB->addLiveIn(LI);
2533       incStackMBB->addLiveIn(LI);
2534     }
2535 
2536     MF.push_front(incStackMBB);
2537     MF.push_front(stackCheckMBB);
2538 
2539     unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
2540     unsigned LEAop, CMPop, CALLop;
2541     SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
2542     if (Is64Bit) {
2543       SPReg = X86::RSP;
2544       PReg  = X86::RBP;
2545       LEAop = X86::LEA64r;
2546       CMPop = X86::CMP64rm;
2547       CALLop = X86::CALL64pcrel32;
2548     } else {
2549       SPReg = X86::ESP;
2550       PReg  = X86::EBP;
2551       LEAop = X86::LEA32r;
2552       CMPop = X86::CMP32rm;
2553       CALLop = X86::CALLpcrel32;
2554     }
2555 
2556     ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2557     assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2558            "HiPE prologue scratch register is live-in");
2559 
2560     // Create new MBB for StackCheck:
2561     addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
2562                  SPReg, false, -MaxStack);
2563     // SPLimitOffset is in a fixed heap location (pointed by BP).
2564     addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
2565                  .addReg(ScratchReg), PReg, false, SPLimitOffset);
2566     BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&PrologueMBB);
2567 
2568     // Create new MBB for IncStack:
2569     BuildMI(incStackMBB, DL, TII.get(CALLop)).
2570       addExternalSymbol("inc_stack_0");
2571     addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
2572                  SPReg, false, -MaxStack);
2573     addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
2574                  .addReg(ScratchReg), PReg, false, SPLimitOffset);
2575     BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
2576 
2577     stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
2578     stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
2579     incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
2580     incStackMBB->addSuccessor(incStackMBB, {1, 100});
2581   }
2582 #ifdef EXPENSIVE_CHECKS
2583   MF.verify();
2584 #endif
2585 }
2586 
2587 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
2588                                            MachineBasicBlock::iterator MBBI,
2589                                            const DebugLoc &DL,
2590                                            int Offset) const {
2591 
2592   if (Offset <= 0)
2593     return false;
2594 
2595   if (Offset % SlotSize)
2596     return false;
2597 
2598   int NumPops = Offset / SlotSize;
2599   // This is only worth it if we have at most 2 pops.
2600   if (NumPops != 1 && NumPops != 2)
2601     return false;
2602 
2603   // Handle only the trivial case where the adjustment directly follows
2604   // a call. This is the most common one, anyway.
2605   if (MBBI == MBB.begin())
2606     return false;
2607   MachineBasicBlock::iterator Prev = std::prev(MBBI);
2608   if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
2609     return false;
2610 
2611   unsigned Regs[2];
2612   unsigned FoundRegs = 0;
2613 
2614   auto &MRI = MBB.getParent()->getRegInfo();
2615   auto RegMask = Prev->getOperand(1);
2616 
2617   auto &RegClass =
2618       Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
2619   // Try to find up to NumPops free registers.
2620   for (auto Candidate : RegClass) {
2621 
2622     // Poor man's liveness:
2623     // Since we're immediately after a call, any register that is clobbered
2624     // by the call and not defined by it can be considered dead.
2625     if (!RegMask.clobbersPhysReg(Candidate))
2626       continue;
2627 
2628     // Don't clobber reserved registers
2629     if (MRI.isReserved(Candidate))
2630       continue;
2631 
2632     bool IsDef = false;
2633     for (const MachineOperand &MO : Prev->implicit_operands()) {
2634       if (MO.isReg() && MO.isDef() &&
2635           TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
2636         IsDef = true;
2637         break;
2638       }
2639     }
2640 
2641     if (IsDef)
2642       continue;
2643 
2644     Regs[FoundRegs++] = Candidate;
2645     if (FoundRegs == (unsigned)NumPops)
2646       break;
2647   }
2648 
2649   if (FoundRegs == 0)
2650     return false;
2651 
2652   // If we found only one free register, but need two, reuse the same one twice.
2653   while (FoundRegs < (unsigned)NumPops)
2654     Regs[FoundRegs++] = Regs[0];
2655 
2656   for (int i = 0; i < NumPops; ++i)
2657     BuildMI(MBB, MBBI, DL,
2658             TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
2659 
2660   return true;
2661 }
2662 
2663 MachineBasicBlock::iterator X86FrameLowering::
2664 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
2665                               MachineBasicBlock::iterator I) const {
2666   bool reserveCallFrame = hasReservedCallFrame(MF);
2667   unsigned Opcode = I->getOpcode();
2668   bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
2669   DebugLoc DL = I->getDebugLoc();
2670   uint64_t Amount = !reserveCallFrame ? TII.getFrameSize(*I) : 0;
2671   uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0;
2672   I = MBB.erase(I);
2673   auto InsertPos = skipDebugInstructionsForward(I, MBB.end());
2674 
2675   if (!reserveCallFrame) {
2676     // If the stack pointer can be changed after prologue, turn the
2677     // adjcallstackup instruction into a 'sub ESP, <amt>' and the
2678     // adjcallstackdown instruction into 'add ESP, <amt>'
2679 
2680     // We need to keep the stack aligned properly.  To do this, we round the
2681     // amount of space needed for the outgoing arguments up to the next
2682     // alignment boundary.
2683     unsigned StackAlign = getStackAlignment();
2684     Amount = alignTo(Amount, StackAlign);
2685 
2686     MachineModuleInfo &MMI = MF.getMMI();
2687     const Function &F = MF.getFunction();
2688     bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2689     bool DwarfCFI = !WindowsCFI &&
2690                     (MMI.hasDebugInfo() || F.needsUnwindTableEntry());
2691 
2692     // If we have any exception handlers in this function, and we adjust
2693     // the SP before calls, we may need to indicate this to the unwinder
2694     // using GNU_ARGS_SIZE. Note that this may be necessary even when
2695     // Amount == 0, because the preceding function may have set a non-0
2696     // GNU_ARGS_SIZE.
2697     // TODO: We don't need to reset this between subsequent functions,
2698     // if it didn't change.
2699     bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();
2700 
2701     if (HasDwarfEHHandlers && !isDestroy &&
2702         MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
2703       BuildCFI(MBB, InsertPos, DL,
2704                MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
2705 
2706     if (Amount == 0)
2707       return I;
2708 
2709     // Factor out the amount that gets handled inside the sequence
2710     // (Pushes of argument for frame setup, callee pops for frame destroy)
2711     Amount -= InternalAmt;
2712 
2713     // TODO: This is needed only if we require precise CFA.
2714     // If this is a callee-pop calling convention, emit a CFA adjust for
2715     // the amount the callee popped.
2716     if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF))
2717       BuildCFI(MBB, InsertPos, DL,
2718                MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
2719 
2720     // Add Amount to SP to destroy a frame, or subtract to setup.
2721     int64_t StackAdjustment = isDestroy ? Amount : -Amount;
2722     int64_t CfaAdjustment = -StackAdjustment;
2723 
2724     if (StackAdjustment) {
2725       // Merge with any previous or following adjustment instruction. Note: the
2726       // instructions merged with here do not have CFI, so their stack
2727       // adjustments do not feed into CfaAdjustment.
2728       StackAdjustment += mergeSPUpdates(MBB, InsertPos, true);
2729       StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
2730 
2731       if (StackAdjustment) {
2732         if (!(F.optForMinSize() &&
2733               adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
2734           BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
2735                                /*InEpilogue=*/false);
2736       }
2737     }
2738 
2739     if (DwarfCFI && !hasFP(MF)) {
2740       // If we don't have FP, but need to generate unwind information,
2741       // we need to set the correct CFA offset after the stack adjustment.
2742       // How much we adjust the CFA offset depends on whether we're emitting
2743       // CFI only for EH purposes or for debugging. EH only requires the CFA
2744       // offset to be correct at each call site, while for debugging we want
2745       // it to be more precise.
2746 
2747       // TODO: When not using precise CFA, we also need to adjust for the
2748       // InternalAmt here.
2749       if (CfaAdjustment) {
2750         BuildCFI(MBB, InsertPos, DL,
2751                  MCCFIInstruction::createAdjustCfaOffset(nullptr,
2752                                                          CfaAdjustment));
2753       }
2754     }
2755 
2756     return I;
2757   }
2758 
2759   if (isDestroy && InternalAmt) {
2760     // If we are performing frame pointer elimination and if the callee pops
2761     // something off the stack pointer, add it back.  We do this until we have
2762     // more advanced stack pointer tracking ability.
2763     // We are not tracking the stack pointer adjustment by the callee, so make
2764     // sure we restore the stack pointer immediately after the call, there may
2765     // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
2766     MachineBasicBlock::iterator CI = I;
2767     MachineBasicBlock::iterator B = MBB.begin();
2768     while (CI != B && !std::prev(CI)->isCall())
2769       --CI;
2770     BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
2771   }
2772 
2773   return I;
2774 }
2775 
2776 bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
2777   assert(MBB.getParent() && "Block is not attached to a function!");
2778   const MachineFunction &MF = *MBB.getParent();
2779   return !TRI->needsStackRealignment(MF) || !MBB.isLiveIn(X86::EFLAGS);
2780 }
2781 
2782 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
2783   assert(MBB.getParent() && "Block is not attached to a function!");
2784 
2785   // Win64 has strict requirements in terms of epilogue and we are
2786   // not taking a chance at messing with them.
2787   // I.e., unless this block is already an exit block, we can't use
2788   // it as an epilogue.
2789   if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock())
2790     return false;
2791 
2792   if (canUseLEAForSPInEpilogue(*MBB.getParent()))
2793     return true;
2794 
2795   // If we cannot use LEA to adjust SP, we may need to use ADD, which
2796   // clobbers the EFLAGS. Check that we do not need to preserve it,
2797   // otherwise, conservatively assume this is not
2798   // safe to insert the epilogue here.
2799   return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
2800 }
2801 
2802 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
2803   // If we may need to emit frameless compact unwind information, give
2804   // up as this is currently broken: PR25614.
2805   return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
2806          // The lowering of segmented stack and HiPE only support entry blocks
2807          // as prologue blocks: PR26107.
2808          // This limitation may be lifted if we fix:
2809          // - adjustForSegmentedStacks
2810          // - adjustForHiPEPrologue
2811          MF.getFunction().getCallingConv() != CallingConv::HiPE &&
2812          !MF.shouldSplitStack();
2813 }
2814 
2815 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
2816     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
2817     const DebugLoc &DL, bool RestoreSP) const {
2818   assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
2819   assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
2820   assert(STI.is32Bit() && !Uses64BitFramePtr &&
2821          "restoring EBP/ESI on non-32-bit target");
2822 
2823   MachineFunction &MF = *MBB.getParent();
2824   unsigned FramePtr = TRI->getFrameRegister(MF);
2825   unsigned BasePtr = TRI->getBaseRegister();
2826   WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo();
2827   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2828   MachineFrameInfo &MFI = MF.getFrameInfo();
2829 
2830   // FIXME: Don't set FrameSetup flag in catchret case.
2831 
2832   int FI = FuncInfo.EHRegNodeFrameIndex;
2833   int EHRegSize = MFI.getObjectSize(FI);
2834 
2835   if (RestoreSP) {
2836     // MOV32rm -EHRegSize(%ebp), %esp
2837     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
2838                  X86::EBP, true, -EHRegSize)
2839         .setMIFlag(MachineInstr::FrameSetup);
2840   }
2841 
2842   unsigned UsedReg;
2843   int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg);
2844   int EndOffset = -EHRegOffset - EHRegSize;
2845   FuncInfo.EHRegNodeEndOffset = EndOffset;
2846 
2847   if (UsedReg == FramePtr) {
2848     // ADD $offset, %ebp
2849     unsigned ADDri = getADDriOpcode(false, EndOffset);
2850     BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
2851         .addReg(FramePtr)
2852         .addImm(EndOffset)
2853         .setMIFlag(MachineInstr::FrameSetup)
2854         ->getOperand(3)
2855         .setIsDead();
2856     assert(EndOffset >= 0 &&
2857            "end of registration object above normal EBP position!");
2858   } else if (UsedReg == BasePtr) {
2859     // LEA offset(%ebp), %esi
2860     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
2861                  FramePtr, false, EndOffset)
2862         .setMIFlag(MachineInstr::FrameSetup);
2863     // MOV32rm SavedEBPOffset(%esi), %ebp
2864     assert(X86FI->getHasSEHFramePtrSave());
2865     int Offset =
2866         getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
2867     assert(UsedReg == BasePtr);
2868     addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
2869                  UsedReg, true, Offset)
2870         .setMIFlag(MachineInstr::FrameSetup);
2871   } else {
2872     llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
2873   }
2874   return MBBI;
2875 }
2876 
2877 namespace {
2878 // Struct used by orderFrameObjects to help sort the stack objects.
2879 struct X86FrameSortingObject {
2880   bool IsValid = false;         // true if we care about this Object.
2881   unsigned ObjectIndex = 0;     // Index of Object into MFI list.
2882   unsigned ObjectSize = 0;      // Size of Object in bytes.
2883   unsigned ObjectAlignment = 1; // Alignment of Object in bytes.
2884   unsigned ObjectNumUses = 0;   // Object static number of uses.
2885 };
2886 
2887 // The comparison function we use for std::sort to order our local
2888 // stack symbols. The current algorithm is to use an estimated
2889 // "density". This takes into consideration the size and number of
2890 // uses each object has in order to roughly minimize code size.
2891 // So, for example, an object of size 16B that is referenced 5 times
2892 // will get higher priority than 4 4B objects referenced 1 time each.
2893 // It's not perfect and we may be able to squeeze a few more bytes out of
2894 // it (for example : 0(esp) requires fewer bytes, symbols allocated at the
2895 // fringe end can have special consideration, given their size is less
2896 // important, etc.), but the algorithmic complexity grows too much to be
2897 // worth the extra gains we get. This gets us pretty close.
2898 // The final order leaves us with objects with highest priority going
2899 // at the end of our list.
2900 struct X86FrameSortingComparator {
2901   inline bool operator()(const X86FrameSortingObject &A,
2902                          const X86FrameSortingObject &B) {
2903     uint64_t DensityAScaled, DensityBScaled;
2904 
2905     // For consistency in our comparison, all invalid objects are placed
2906     // at the end. This also allows us to stop walking when we hit the
2907     // first invalid item after it's all sorted.
2908     if (!A.IsValid)
2909       return false;
2910     if (!B.IsValid)
2911       return true;
2912 
2913     // The density is calculated by doing :
2914     //     (double)DensityA = A.ObjectNumUses / A.ObjectSize
2915     //     (double)DensityB = B.ObjectNumUses / B.ObjectSize
2916     // Since this approach may cause inconsistencies in
2917     // the floating point <, >, == comparisons, depending on the floating
2918     // point model with which the compiler was built, we're going
2919     // to scale both sides by multiplying with
2920     // A.ObjectSize * B.ObjectSize. This ends up factoring away
2921     // the division and, with it, the need for any floating point
2922     // arithmetic.
2923     DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
2924       static_cast<uint64_t>(B.ObjectSize);
2925     DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
2926       static_cast<uint64_t>(A.ObjectSize);
2927 
2928     // If the two densities are equal, prioritize highest alignment
2929     // objects. This allows for similar alignment objects
2930     // to be packed together (given the same density).
2931     // There's room for improvement here, also, since we can pack
2932     // similar alignment (different density) objects next to each
2933     // other to save padding. This will also require further
2934     // complexity/iterations, and the overall gain isn't worth it,
2935     // in general. Something to keep in mind, though.
2936     if (DensityAScaled == DensityBScaled)
2937       return A.ObjectAlignment < B.ObjectAlignment;
2938 
2939     return DensityAScaled < DensityBScaled;
2940   }
2941 };
2942 } // namespace
2943 
2944 // Order the symbols in the local stack.
2945 // We want to place the local stack objects in some sort of sensible order.
2946 // The heuristic we use is to try and pack them according to static number
2947 // of uses and size of object in order to minimize code size.
2948 void X86FrameLowering::orderFrameObjects(
2949     const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
2950   const MachineFrameInfo &MFI = MF.getFrameInfo();
2951 
2952   // Don't waste time if there's nothing to do.
2953   if (ObjectsToAllocate.empty())
2954     return;
2955 
2956   // Create an array of all MFI objects. We won't need all of these
2957   // objects, but we're going to create a full array of them to make
2958   // it easier to index into when we're counting "uses" down below.
2959   // We want to be able to easily/cheaply access an object by simply
2960   // indexing into it, instead of having to search for it every time.
2961   std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd());
2962 
2963   // Walk the objects we care about and mark them as such in our working
2964   // struct.
2965   for (auto &Obj : ObjectsToAllocate) {
2966     SortingObjects[Obj].IsValid = true;
2967     SortingObjects[Obj].ObjectIndex = Obj;
2968     SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlignment(Obj);
2969     // Set the size.
2970     int ObjectSize = MFI.getObjectSize(Obj);
2971     if (ObjectSize == 0)
2972       // Variable size. Just use 4.
2973       SortingObjects[Obj].ObjectSize = 4;
2974     else
2975       SortingObjects[Obj].ObjectSize = ObjectSize;
2976   }
2977 
2978   // Count the number of uses for each object.
2979   for (auto &MBB : MF) {
2980     for (auto &MI : MBB) {
2981       if (MI.isDebugValue())
2982         continue;
2983       for (const MachineOperand &MO : MI.operands()) {
2984         // Check to see if it's a local stack symbol.
2985         if (!MO.isFI())
2986           continue;
2987         int Index = MO.getIndex();
2988         // Check to see if it falls within our range, and is tagged
2989         // to require ordering.
2990         if (Index >= 0 && Index < MFI.getObjectIndexEnd() &&
2991             SortingObjects[Index].IsValid)
2992           SortingObjects[Index].ObjectNumUses++;
2993       }
2994     }
2995   }
2996 
2997   // Sort the objects using X86FrameSortingAlgorithm (see its comment for
2998   // info).
2999   std::stable_sort(SortingObjects.begin(), SortingObjects.end(),
3000                    X86FrameSortingComparator());
3001 
3002   // Now modify the original list to represent the final order that
3003   // we want. The order will depend on whether we're going to access them
3004   // from the stack pointer or the frame pointer. For SP, the list should
3005   // end up with the END containing objects that we want with smaller offsets.
3006   // For FP, it should be flipped.
3007   int i = 0;
3008   for (auto &Obj : SortingObjects) {
3009     // All invalid items are sorted at the end, so it's safe to stop.
3010     if (!Obj.IsValid)
3011       break;
3012     ObjectsToAllocate[i++] = Obj.ObjectIndex;
3013   }
3014 
3015   // Flip it if we're accessing off of the FP.
3016   if (!TRI->needsStackRealignment(MF) && hasFP(MF))
3017     std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
3018 }
3019 
3020 
3021 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
3022   // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
3023   unsigned Offset = 16;
3024   // RBP is immediately pushed.
3025   Offset += SlotSize;
3026   // All callee-saved registers are then pushed.
3027   Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
3028   // Every funclet allocates enough stack space for the largest outgoing call.
3029   Offset += getWinEHFuncletFrameSize(MF);
3030   return Offset;
3031 }
3032 
3033 void X86FrameLowering::processFunctionBeforeFrameFinalized(
3034     MachineFunction &MF, RegScavenger *RS) const {
3035   // Mark the function as not having WinCFI. We will set it back to true in
3036   // emitPrologue if it gets called and emits CFI.
3037   MF.setHasWinCFI(false);
3038 
3039   // If this function isn't doing Win64-style C++ EH, we don't need to do
3040   // anything.
3041   const Function &F = MF.getFunction();
3042   if (!STI.is64Bit() || !MF.hasEHFunclets() ||
3043       classifyEHPersonality(F.getPersonalityFn()) != EHPersonality::MSVC_CXX)
3044     return;
3045 
3046   // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
3047   // relative to RSP after the prologue.  Find the offset of the last fixed
3048   // object, so that we can allocate a slot immediately following it. If there
3049   // were no fixed objects, use offset -SlotSize, which is immediately after the
3050   // return address. Fixed objects have negative frame indices.
3051   MachineFrameInfo &MFI = MF.getFrameInfo();
3052   WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
3053   int64_t MinFixedObjOffset = -SlotSize;
3054   for (int I = MFI.getObjectIndexBegin(); I < 0; ++I)
3055     MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));
3056 
3057   for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
3058     for (WinEHHandlerType &H : TBME.HandlerArray) {
3059       int FrameIndex = H.CatchObj.FrameIndex;
3060       if (FrameIndex != INT_MAX) {
3061         // Ensure alignment.
3062         unsigned Align = MFI.getObjectAlignment(FrameIndex);
3063         MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
3064         MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);
3065         MFI.setObjectOffset(FrameIndex, MinFixedObjOffset);
3066       }
3067     }
3068   }
3069 
3070   // Ensure alignment.
3071   MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
3072   int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
3073   int UnwindHelpFI =
3074       MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false);
3075   EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
3076 
3077   // Store -2 into UnwindHelp on function entry. We have to scan forwards past
3078   // other frame setup instructions.
3079   MachineBasicBlock &MBB = MF.front();
3080   auto MBBI = MBB.begin();
3081   while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
3082     ++MBBI;
3083 
3084   DebugLoc DL = MBB.findDebugLoc(MBBI);
3085   addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)),
3086                     UnwindHelpFI)
3087       .addImm(-2);
3088 }
3089