113fbd452SMichael Kuperstein //===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===//
213fbd452SMichael Kuperstein //
32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information.
52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
613fbd452SMichael Kuperstein //
713fbd452SMichael Kuperstein //===----------------------------------------------------------------------===//
813fbd452SMichael Kuperstein //
913fbd452SMichael Kuperstein // This file defines a pass that optimizes call sequences on x86.
1013fbd452SMichael Kuperstein // Currently, it converts movs of function parameters onto the stack into
1113fbd452SMichael Kuperstein // pushes. This is beneficial for two main reasons:
12d5cb3411SDavid L Kreitzer // 1) The push instruction encoding is much smaller than a stack-ptr-based mov.
1313fbd452SMichael Kuperstein // 2) It is possible to push memory arguments directly. So, if the
1499775c1bSDavid L Kreitzer //    the transformation is performed pre-reg-alloc, it can help relieve
1513fbd452SMichael Kuperstein //    register pressure.
1613fbd452SMichael Kuperstein //
1713fbd452SMichael Kuperstein //===----------------------------------------------------------------------===//
1813fbd452SMichael Kuperstein 
19fbd13c5cSEugene Zelenko #include "MCTargetDesc/X86BaseInfo.h"
2095595570SSimon Pilgrim #include "X86.h"
21fbd13c5cSEugene Zelenko #include "X86FrameLowering.h"
2213fbd452SMichael Kuperstein #include "X86InstrInfo.h"
2313fbd452SMichael Kuperstein #include "X86MachineFunctionInfo.h"
24fbd13c5cSEugene Zelenko #include "X86RegisterInfo.h"
2599775c1bSDavid L Kreitzer #include "X86Subtarget.h"
26fbd13c5cSEugene Zelenko #include "llvm/ADT/DenseSet.h"
27fbd13c5cSEugene Zelenko #include "llvm/ADT/SmallVector.h"
28fbd13c5cSEugene Zelenko #include "llvm/ADT/StringRef.h"
29fbd13c5cSEugene Zelenko #include "llvm/CodeGen/MachineBasicBlock.h"
30fbd13c5cSEugene Zelenko #include "llvm/CodeGen/MachineFrameInfo.h"
31fbd13c5cSEugene Zelenko #include "llvm/CodeGen/MachineFunction.h"
3213fbd452SMichael Kuperstein #include "llvm/CodeGen/MachineFunctionPass.h"
33fbd13c5cSEugene Zelenko #include "llvm/CodeGen/MachineInstr.h"
3413fbd452SMichael Kuperstein #include "llvm/CodeGen/MachineInstrBuilder.h"
35fbd13c5cSEugene Zelenko #include "llvm/CodeGen/MachineOperand.h"
3613fbd452SMichael Kuperstein #include "llvm/CodeGen/MachineRegisterInfo.h"
373f833edcSDavid Blaikie #include "llvm/CodeGen/TargetInstrInfo.h"
38b3bde2eaSDavid Blaikie #include "llvm/CodeGen/TargetRegisterInfo.h"
39fbd13c5cSEugene Zelenko #include "llvm/IR/DebugLoc.h"
4013fbd452SMichael Kuperstein #include "llvm/IR/Function.h"
41fbd13c5cSEugene Zelenko #include "llvm/MC/MCDwarf.h"
42fbd13c5cSEugene Zelenko #include "llvm/Support/CommandLine.h"
43fbd13c5cSEugene Zelenko #include "llvm/Support/ErrorHandling.h"
44fbd13c5cSEugene Zelenko #include "llvm/Support/MathExtras.h"
45fbd13c5cSEugene Zelenko #include <cassert>
46fbd13c5cSEugene Zelenko #include <cstddef>
47fbd13c5cSEugene Zelenko #include <cstdint>
48fbd13c5cSEugene Zelenko #include <iterator>
4913fbd452SMichael Kuperstein 
5013fbd452SMichael Kuperstein using namespace llvm;
5113fbd452SMichael Kuperstein 
5213fbd452SMichael Kuperstein #define DEBUG_TYPE "x86-cf-opt"
5313fbd452SMichael Kuperstein 
54970eac40SBenjamin Kramer static cl::opt<bool>
55970eac40SBenjamin Kramer     NoX86CFOpt("no-x86-call-frame-opt",
5613fbd452SMichael Kuperstein                cl::desc("Avoid optimizing x86 call frames for size"),
5713fbd452SMichael Kuperstein                cl::init(false), cl::Hidden);
5813fbd452SMichael Kuperstein 
5913fbd452SMichael Kuperstein namespace {
60fbd13c5cSEugene Zelenko 
6113fbd452SMichael Kuperstein class X86CallFrameOptimization : public MachineFunctionPass {
6213fbd452SMichael Kuperstein public:
X86CallFrameOptimization()63f3356722STom Stellard   X86CallFrameOptimization() : MachineFunctionPass(ID) { }
6413fbd452SMichael Kuperstein 
6513fbd452SMichael Kuperstein   bool runOnMachineFunction(MachineFunction &MF) override;
6613fbd452SMichael Kuperstein 
67c6d0b6c1SZvi Rackover   static char ID;
68c6d0b6c1SZvi Rackover 
6913fbd452SMichael Kuperstein private:
701921d3d6SMichael Kuperstein   // Information we know about a particular call site
711921d3d6SMichael Kuperstein   struct CallContext {
CallContext__anon7c4e379f0111::X86CallFrameOptimization::CallContext72bf31bf78SZvi Rackover     CallContext() : FrameSetup(nullptr), ArgStoreVector(4, nullptr) {}
731921d3d6SMichael Kuperstein 
74e2ea93c6SAndrew Kaylor     // Iterator referring to the frame setup instruction
75e2ea93c6SAndrew Kaylor     MachineBasicBlock::iterator FrameSetup;
76e2ea93c6SAndrew Kaylor 
77e2ea93c6SAndrew Kaylor     // Actual call instruction
78fbd13c5cSEugene Zelenko     MachineInstr *Call = nullptr;
791921d3d6SMichael Kuperstein 
801921d3d6SMichael Kuperstein     // A copy of the stack pointer
81fbd13c5cSEugene Zelenko     MachineInstr *SPCopy = nullptr;
821921d3d6SMichael Kuperstein 
831921d3d6SMichael Kuperstein     // The total displacement of all passed parameters
84fbd13c5cSEugene Zelenko     int64_t ExpectedDist = 0;
851921d3d6SMichael Kuperstein 
86bf31bf78SZvi Rackover     // The sequence of storing instructions used to pass the parameters
87bf31bf78SZvi Rackover     SmallVector<MachineInstr *, 4> ArgStoreVector;
881921d3d6SMichael Kuperstein 
89db95d04bSMichael Kuperstein     // True if this call site has no stack parameters
90fbd13c5cSEugene Zelenko     bool NoStackParams = false;
91db95d04bSMichael Kuperstein 
9299775c1bSDavid L Kreitzer     // True if this call site can use push instructions
93fbd13c5cSEugene Zelenko     bool UsePush = false;
941921d3d6SMichael Kuperstein   };
951921d3d6SMichael Kuperstein 
96e2ea93c6SAndrew Kaylor   typedef SmallVector<CallContext, 8> ContextVector;
97db95d04bSMichael Kuperstein 
98db95d04bSMichael Kuperstein   bool isLegal(MachineFunction &MF);
99db95d04bSMichael Kuperstein 
100e2ea93c6SAndrew Kaylor   bool isProfitable(MachineFunction &MF, ContextVector &CallSeqMap);
101db95d04bSMichael Kuperstein 
1021921d3d6SMichael Kuperstein   void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB,
1031921d3d6SMichael Kuperstein                        MachineBasicBlock::iterator I, CallContext &Context);
1041921d3d6SMichael Kuperstein 
105501e739dSHans Wennborg   void adjustCallSequence(MachineFunction &MF, const CallContext &Context);
10613fbd452SMichael Kuperstein 
10713fbd452SMichael Kuperstein   MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,
1083726b144SGaurav Jain                                    Register Reg);
10913fbd452SMichael Kuperstein 
110dadb8474SMichael Kuperstein   enum InstClassification { Convert, Skip, Exit };
111dadb8474SMichael Kuperstein 
112dadb8474SMichael Kuperstein   InstClassification classifyInstruction(MachineBasicBlock &MBB,
113dadb8474SMichael Kuperstein                                          MachineBasicBlock::iterator MI,
114dadb8474SMichael Kuperstein                                          const X86RegisterInfo &RegInfo,
115dadb8474SMichael Kuperstein                                          DenseSet<unsigned int> &UsedRegs);
116dadb8474SMichael Kuperstein 
getPassName() const117117296c0SMehdi Amini   StringRef getPassName() const override { return "X86 Optimize Call Frame"; }
11813fbd452SMichael Kuperstein 
119eabd405eSSimon Pilgrim   const X86InstrInfo *TII = nullptr;
120eabd405eSSimon Pilgrim   const X86FrameLowering *TFL = nullptr;
121eabd405eSSimon Pilgrim   const X86Subtarget *STI = nullptr;
122eabd405eSSimon Pilgrim   MachineRegisterInfo *MRI = nullptr;
123eabd405eSSimon Pilgrim   unsigned SlotSize = 0;
124eabd405eSSimon Pilgrim   unsigned Log2SlotSize = 0;
12513fbd452SMichael Kuperstein };
12613fbd452SMichael Kuperstein 
127fbd13c5cSEugene Zelenko } // end anonymous namespace
128c6d0b6c1SZvi Rackover char X86CallFrameOptimization::ID = 0;
129c6d0b6c1SZvi Rackover INITIALIZE_PASS(X86CallFrameOptimization, DEBUG_TYPE,
130c6d0b6c1SZvi Rackover                 "X86 Call Frame Optimization", false, false)
13113fbd452SMichael Kuperstein 
132db95d04bSMichael Kuperstein // This checks whether the transformation is legal.
133db95d04bSMichael Kuperstein // Also returns false in cases where it's potentially legal, but
134db95d04bSMichael Kuperstein // we don't even want to try.
isLegal(MachineFunction & MF)135db95d04bSMichael Kuperstein bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
13613fbd452SMichael Kuperstein   if (NoX86CFOpt.getValue())
13713fbd452SMichael Kuperstein     return false;
13813fbd452SMichael Kuperstein 
13973dc8529SMichael Kuperstein   // We can't encode multiple DW_CFA_GNU_args_size or DW_CFA_def_cfa_offset
14073dc8529SMichael Kuperstein   // in the compact unwind encoding that Darwin uses. So, bail if there
14173dc8529SMichael Kuperstein   // is a danger of that being generated.
14273dc8529SMichael Kuperstein   if (STI->isTargetDarwin() &&
143d0ee66c2SMatthias Braun       (!MF.getLandingPads().empty() ||
144f1caa283SMatthias Braun        (MF.getFunction().needsUnwindTableEntry() && !TFL->hasFP(MF))))
145263b772bSFrederic Riss     return false;
146263b772bSFrederic Riss 
1470fe4632bSDavid L Kreitzer   // It is not valid to change the stack pointer outside the prolog/epilog
1480fe4632bSDavid L Kreitzer   // on 64-bit Windows.
1490fe4632bSDavid L Kreitzer   if (STI->isTargetWin64())
1500fe4632bSDavid L Kreitzer     return false;
1510fe4632bSDavid L Kreitzer 
15213fbd452SMichael Kuperstein   // You would expect straight-line code between call-frame setup and
15313fbd452SMichael Kuperstein   // call-frame destroy. You would be wrong. There are circumstances (e.g.
15413fbd452SMichael Kuperstein   // CMOV_GR8 expansion of a select that feeds a function call!) where we can
15513fbd452SMichael Kuperstein   // end up with the setup and the destroy in different basic blocks.
15613fbd452SMichael Kuperstein   // This is bad, and breaks SP adjustment.
15713fbd452SMichael Kuperstein   // So, check that all of the frames in the function are closed inside
15813fbd452SMichael Kuperstein   // the same block, and, for good measure, that there are no nested frames.
159c6c86f4fSLuo, Yuanke   //
160c6c86f4fSLuo, Yuanke   // If any call allocates more argument stack memory than the stack
161c6c86f4fSLuo, Yuanke   // probe size, don't do this optimization. Otherwise, this pass
162c6c86f4fSLuo, Yuanke   // would need to synthesize additional stack probe calls to allocate
163c6c86f4fSLuo, Yuanke   // memory for arguments.
164fa3872e7SMatthias Braun   unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
165fa3872e7SMatthias Braun   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
166e67cbac8Sserge_sans_paille   bool EmitStackProbeCall = STI->getTargetLowering()->hasStackProbeSymbol(MF);
167c6c86f4fSLuo, Yuanke   unsigned StackProbeSize = STI->getTargetLowering()->getStackProbeSize(MF);
16813fbd452SMichael Kuperstein   for (MachineBasicBlock &BB : MF) {
16913fbd452SMichael Kuperstein     bool InsideFrameSequence = false;
17013fbd452SMichael Kuperstein     for (MachineInstr &MI : BB) {
17113fbd452SMichael Kuperstein       if (MI.getOpcode() == FrameSetupOpcode) {
172e67cbac8Sserge_sans_paille         if (TII->getFrameSize(MI) >= StackProbeSize && EmitStackProbeCall)
173c6c86f4fSLuo, Yuanke           return false;
17413fbd452SMichael Kuperstein         if (InsideFrameSequence)
17513fbd452SMichael Kuperstein           return false;
17613fbd452SMichael Kuperstein         InsideFrameSequence = true;
1771921d3d6SMichael Kuperstein       } else if (MI.getOpcode() == FrameDestroyOpcode) {
17813fbd452SMichael Kuperstein         if (!InsideFrameSequence)
17913fbd452SMichael Kuperstein           return false;
18013fbd452SMichael Kuperstein         InsideFrameSequence = false;
18113fbd452SMichael Kuperstein       }
18213fbd452SMichael Kuperstein     }
18313fbd452SMichael Kuperstein 
18413fbd452SMichael Kuperstein     if (InsideFrameSequence)
18513fbd452SMichael Kuperstein       return false;
18613fbd452SMichael Kuperstein   }
18713fbd452SMichael Kuperstein 
188db95d04bSMichael Kuperstein   return true;
189db95d04bSMichael Kuperstein }
19013fbd452SMichael Kuperstein 
191772bb5b6SJoerg Sonnenberger // Check whether this transformation is profitable for a particular
192db95d04bSMichael Kuperstein // function - in terms of code size.
isProfitable(MachineFunction & MF,ContextVector & CallSeqVector)193db95d04bSMichael Kuperstein bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
194e2ea93c6SAndrew Kaylor                                             ContextVector &CallSeqVector) {
195db95d04bSMichael Kuperstein   // This transformation is always a win when we do not expect to have
19613fbd452SMichael Kuperstein   // a reserved call frame. Under other circumstances, it may be either
19713fbd452SMichael Kuperstein   // a win or a loss, and requires a heuristic.
198941a705bSMatthias Braun   bool CannotReserveFrame = MF.getFrameInfo().hasVarSizedObjects();
19913fbd452SMichael Kuperstein   if (CannotReserveFrame)
20013fbd452SMichael Kuperstein     return true;
20113fbd452SMichael Kuperstein 
2023ba550a0SGuillaume Chatelet   Align StackAlign = TFL->getStackAlign();
203db95d04bSMichael Kuperstein 
204db95d04bSMichael Kuperstein   int64_t Advantage = 0;
2056e45b989SSimon Pilgrim   for (const auto &CC : CallSeqVector) {
206db95d04bSMichael Kuperstein     // Call sites where no parameters are passed on the stack
207db95d04bSMichael Kuperstein     // do not affect the cost, since there needs to be no
208db95d04bSMichael Kuperstein     // stack adjustment.
209e2ea93c6SAndrew Kaylor     if (CC.NoStackParams)
210db95d04bSMichael Kuperstein       continue;
211db95d04bSMichael Kuperstein 
212e2ea93c6SAndrew Kaylor     if (!CC.UsePush) {
213db95d04bSMichael Kuperstein       // If we don't use pushes for a particular call site,
214db95d04bSMichael Kuperstein       // we pay for not having a reserved call frame with an
215db95d04bSMichael Kuperstein       // additional sub/add esp pair. The cost is ~3 bytes per instruction,
216db95d04bSMichael Kuperstein       // depending on the size of the constant.
217db95d04bSMichael Kuperstein       // TODO: Callee-pop functions should have a smaller penalty, because
218db95d04bSMichael Kuperstein       // an add is needed even with a reserved call frame.
219db95d04bSMichael Kuperstein       Advantage -= 6;
220db95d04bSMichael Kuperstein     } else {
221db95d04bSMichael Kuperstein       // We can use pushes. First, account for the fixed costs.
222db95d04bSMichael Kuperstein       // We'll need a add after the call.
223db95d04bSMichael Kuperstein       Advantage -= 3;
2240fe4632bSDavid L Kreitzer       // If we have to realign the stack, we'll also need a sub before
2253ba550a0SGuillaume Chatelet       if (!isAligned(StackAlign, CC.ExpectedDist))
226db95d04bSMichael Kuperstein         Advantage -= 3;
227db95d04bSMichael Kuperstein       // Now, for each push, we save ~3 bytes. For small constants, we actually,
228db95d04bSMichael Kuperstein       // save more (up to 5 bytes), but 3 should be a good approximation.
229d5cb3411SDavid L Kreitzer       Advantage += (CC.ExpectedDist >> Log2SlotSize) * 3;
23013fbd452SMichael Kuperstein     }
231db95d04bSMichael Kuperstein   }
232db95d04bSMichael Kuperstein 
23399775c1bSDavid L Kreitzer   return Advantage >= 0;
234db95d04bSMichael Kuperstein }
235db95d04bSMichael Kuperstein 
runOnMachineFunction(MachineFunction & MF)23613fbd452SMichael Kuperstein bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
23773dc8529SMichael Kuperstein   STI = &MF.getSubtarget<X86Subtarget>();
23873dc8529SMichael Kuperstein   TII = STI->getInstrInfo();
23973dc8529SMichael Kuperstein   TFL = STI->getFrameLowering();
24013fbd452SMichael Kuperstein   MRI = &MF.getRegInfo();
24113fbd452SMichael Kuperstein 
242d5cb3411SDavid L Kreitzer   const X86RegisterInfo &RegInfo =
243d5cb3411SDavid L Kreitzer       *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
244d5cb3411SDavid L Kreitzer   SlotSize = RegInfo.getSlotSize();
245d5cb3411SDavid L Kreitzer   assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size");
246d5cb3411SDavid L Kreitzer   Log2SlotSize = Log2_32(SlotSize);
247d5cb3411SDavid L Kreitzer 
248f1caa283SMatthias Braun   if (skipFunction(MF.getFunction()) || !isLegal(MF))
24913fbd452SMichael Kuperstein     return false;
25013fbd452SMichael Kuperstein 
251fa3872e7SMatthias Braun   unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
25213fbd452SMichael Kuperstein 
25313fbd452SMichael Kuperstein   bool Changed = false;
25413fbd452SMichael Kuperstein 
255e2ea93c6SAndrew Kaylor   ContextVector CallSeqVector;
2561921d3d6SMichael Kuperstein 
257048cc3b7SMichael Kuperstein   for (auto &MBB : MF)
258048cc3b7SMichael Kuperstein     for (auto &MI : MBB)
259048cc3b7SMichael Kuperstein       if (MI.getOpcode() == FrameSetupOpcode) {
260e2ea93c6SAndrew Kaylor         CallContext Context;
261048cc3b7SMichael Kuperstein         collectCallInfo(MF, MBB, MI, Context);
262e2ea93c6SAndrew Kaylor         CallSeqVector.push_back(Context);
2631921d3d6SMichael Kuperstein       }
2641921d3d6SMichael Kuperstein 
265e2ea93c6SAndrew Kaylor   if (!isProfitable(MF, CallSeqVector))
266db95d04bSMichael Kuperstein     return false;
267db95d04bSMichael Kuperstein 
2686e45b989SSimon Pilgrim   for (const auto &CC : CallSeqVector) {
269501e739dSHans Wennborg     if (CC.UsePush) {
270501e739dSHans Wennborg       adjustCallSequence(MF, CC);
271501e739dSHans Wennborg       Changed = true;
272501e739dSHans Wennborg     }
273501e739dSHans Wennborg   }
27413fbd452SMichael Kuperstein 
27513fbd452SMichael Kuperstein   return Changed;
27613fbd452SMichael Kuperstein }
27713fbd452SMichael Kuperstein 
278dadb8474SMichael Kuperstein X86CallFrameOptimization::InstClassification
classifyInstruction(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const X86RegisterInfo & RegInfo,DenseSet<unsigned int> & UsedRegs)279dadb8474SMichael Kuperstein X86CallFrameOptimization::classifyInstruction(
280dadb8474SMichael Kuperstein     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
281dadb8474SMichael Kuperstein     const X86RegisterInfo &RegInfo, DenseSet<unsigned int> &UsedRegs) {
282dadb8474SMichael Kuperstein   if (MI == MBB.end())
283dadb8474SMichael Kuperstein     return Exit;
284dadb8474SMichael Kuperstein 
28531b101a1SZvi Rackover   // The instructions we actually care about are movs onto the stack or special
28631b101a1SZvi Rackover   // cases of constant-stores to stack
28731b101a1SZvi Rackover   switch (MI->getOpcode()) {
28831b101a1SZvi Rackover     case X86::AND16mi8:
28931b101a1SZvi Rackover     case X86::AND32mi8:
29031b101a1SZvi Rackover     case X86::AND64mi8: {
2916e45b989SSimon Pilgrim       const MachineOperand &ImmOp = MI->getOperand(X86::AddrNumOperands);
29231b101a1SZvi Rackover       return ImmOp.getImm() == 0 ? Convert : Exit;
29331b101a1SZvi Rackover     }
29431b101a1SZvi Rackover     case X86::OR16mi8:
29531b101a1SZvi Rackover     case X86::OR32mi8:
29631b101a1SZvi Rackover     case X86::OR64mi8: {
2976e45b989SSimon Pilgrim       const MachineOperand &ImmOp = MI->getOperand(X86::AddrNumOperands);
29831b101a1SZvi Rackover       return ImmOp.getImm() == -1 ? Convert : Exit;
29931b101a1SZvi Rackover     }
30031b101a1SZvi Rackover     case X86::MOV32mi:
30131b101a1SZvi Rackover     case X86::MOV32mr:
30231b101a1SZvi Rackover     case X86::MOV64mi32:
30331b101a1SZvi Rackover     case X86::MOV64mr:
304dadb8474SMichael Kuperstein       return Convert;
30531b101a1SZvi Rackover   }
306dadb8474SMichael Kuperstein 
307dadb8474SMichael Kuperstein   // Not all calling conventions have only stack MOVs between the stack
308dadb8474SMichael Kuperstein   // adjust and the call.
309dadb8474SMichael Kuperstein 
310dadb8474SMichael Kuperstein   // We want to tolerate other instructions, to cover more cases.
311dadb8474SMichael Kuperstein   // In particular:
312dadb8474SMichael Kuperstein   // a) PCrel calls, where we expect an additional COPY of the basereg.
313dadb8474SMichael Kuperstein   // b) Passing frame-index addresses.
314dadb8474SMichael Kuperstein   // c) Calling conventions that have inreg parameters. These generate
315dadb8474SMichael Kuperstein   //    both copies and movs into registers.
316dadb8474SMichael Kuperstein   // To avoid creating lots of special cases, allow any instruction
317dadb8474SMichael Kuperstein   // that does not write into memory, does not def or use the stack
318dadb8474SMichael Kuperstein   // pointer, and does not def any register that was used by a preceding
319dadb8474SMichael Kuperstein   // push.
320dadb8474SMichael Kuperstein   // (Reading from memory is allowed, even if referenced through a
321dadb8474SMichael Kuperstein   // frame index, since these will get adjusted properly in PEI)
322dadb8474SMichael Kuperstein 
323dadb8474SMichael Kuperstein   // The reason for the last condition is that the pushes can't replace
324dadb8474SMichael Kuperstein   // the movs in place, because the order must be reversed.
325dadb8474SMichael Kuperstein   // So if we have a MOV32mr that uses EDX, then an instruction that defs
326dadb8474SMichael Kuperstein   // EDX, and then the call, after the transformation the push will use
327dadb8474SMichael Kuperstein   // the modified version of EDX, and not the original one.
328dadb8474SMichael Kuperstein   // Since we are still in SSA form at this point, we only need to
329dadb8474SMichael Kuperstein   // make sure we don't clobber any *physical* registers that were
330dadb8474SMichael Kuperstein   // used by an earlier mov that will become a push.
331dadb8474SMichael Kuperstein 
332dadb8474SMichael Kuperstein   if (MI->isCall() || MI->mayStore())
333dadb8474SMichael Kuperstein     return Exit;
334dadb8474SMichael Kuperstein 
335dadb8474SMichael Kuperstein   for (const MachineOperand &MO : MI->operands()) {
336dadb8474SMichael Kuperstein     if (!MO.isReg())
337dadb8474SMichael Kuperstein       continue;
3380c476111SDaniel Sanders     Register Reg = MO.getReg();
3393726b144SGaurav Jain     if (!Reg.isPhysical())
340dadb8474SMichael Kuperstein       continue;
341dadb8474SMichael Kuperstein     if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister()))
342dadb8474SMichael Kuperstein       return Exit;
343dadb8474SMichael Kuperstein     if (MO.isDef()) {
344dadb8474SMichael Kuperstein       for (unsigned int U : UsedRegs)
345dadb8474SMichael Kuperstein         if (RegInfo.regsOverlap(Reg, U))
346dadb8474SMichael Kuperstein           return Exit;
347dadb8474SMichael Kuperstein     }
348dadb8474SMichael Kuperstein   }
349dadb8474SMichael Kuperstein 
350dadb8474SMichael Kuperstein   return Skip;
351dadb8474SMichael Kuperstein }
352dadb8474SMichael Kuperstein 
collectCallInfo(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I,CallContext & Context)3531921d3d6SMichael Kuperstein void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
35413fbd452SMichael Kuperstein                                                MachineBasicBlock &MBB,
3551921d3d6SMichael Kuperstein                                                MachineBasicBlock::iterator I,
3561921d3d6SMichael Kuperstein                                                CallContext &Context) {
35713fbd452SMichael Kuperstein   // Check that this particular call sequence is amenable to the
35813fbd452SMichael Kuperstein   // transformation.
35999775c1bSDavid L Kreitzer   const X86RegisterInfo &RegInfo =
36099775c1bSDavid L Kreitzer       *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
36113fbd452SMichael Kuperstein 
36213fbd452SMichael Kuperstein   // We expect to enter this at the beginning of a call sequence
36313fbd452SMichael Kuperstein   assert(I->getOpcode() == TII->getCallFrameSetupOpcode());
36413fbd452SMichael Kuperstein   MachineBasicBlock::iterator FrameSetup = I++;
365e2ea93c6SAndrew Kaylor   Context.FrameSetup = FrameSetup;
36613fbd452SMichael Kuperstein 
367db95d04bSMichael Kuperstein   // How much do we adjust the stack? This puts an upper bound on
368db95d04bSMichael Kuperstein   // the number of parameters actually passed on it.
36949acf9c8SSerge Pavlov   unsigned int MaxAdjust = TII->getFrameSize(*FrameSetup) >> Log2SlotSize;
370db95d04bSMichael Kuperstein 
371db95d04bSMichael Kuperstein   // A zero adjustment means no stack parameters
372db95d04bSMichael Kuperstein   if (!MaxAdjust) {
373db95d04bSMichael Kuperstein     Context.NoStackParams = true;
374db95d04bSMichael Kuperstein     return;
375db95d04bSMichael Kuperstein   }
376db95d04bSMichael Kuperstein 
3775842b206SMichael Kuperstein   // Skip over DEBUG_VALUE.
3785842b206SMichael Kuperstein   // For globals in PIC mode, we can have some LEAs here. Skip them as well.
37913fbd452SMichael Kuperstein   // TODO: Extend this to something that covers more cases.
380801bf7ebSShiva Chen   while (I->getOpcode() == X86::LEA32r || I->isDebugInstr())
38113fbd452SMichael Kuperstein     ++I;
38213fbd452SMichael Kuperstein 
3830c476111SDaniel Sanders   Register StackPtr = RegInfo.getStackRegister();
3843c0d3855SZvi Rackover   auto StackPtrCopyInst = MBB.end();
385eb9488b1SNico Weber   // SelectionDAG (but not FastISel) inserts a copy of ESP into a virtual
3863c0d3855SZvi Rackover   // register.  If it's there, use that virtual register as stack pointer
3873c0d3855SZvi Rackover   // instead. Also, we need to locate this instruction so that we can later
3883c0d3855SZvi Rackover   // safely ignore it while doing the conservative processing of the call chain.
3893c0d3855SZvi Rackover   // The COPY can be located anywhere between the call-frame setup
3903c0d3855SZvi Rackover   // instruction and its first use. We use the call instruction as a boundary
3913c0d3855SZvi Rackover   // because it is usually cheaper to check if an instruction is a call than
3923c0d3855SZvi Rackover   // checking if an instruction uses a register.
3933c0d3855SZvi Rackover   for (auto J = I; !J->isCall(); ++J)
3943c0d3855SZvi Rackover     if (J->isCopy() && J->getOperand(0).isReg() && J->getOperand(1).isReg() &&
3953c0d3855SZvi Rackover         J->getOperand(1).getReg() == StackPtr) {
3963c0d3855SZvi Rackover       StackPtrCopyInst = J;
3973c0d3855SZvi Rackover       Context.SPCopy = &*J++;
398eb9488b1SNico Weber       StackPtr = Context.SPCopy->getOperand(0).getReg();
3993c0d3855SZvi Rackover       break;
400eb9488b1SNico Weber     }
40113fbd452SMichael Kuperstein 
40213fbd452SMichael Kuperstein   // Scan the call setup sequence for the pattern we're looking for.
403d5cb3411SDavid L Kreitzer   // We only handle a simple case - a sequence of store instructions that
404d5cb3411SDavid L Kreitzer   // push a sequence of stack-slot-aligned values onto the stack, with
40513fbd452SMichael Kuperstein   // no gaps between them.
40613fbd452SMichael Kuperstein   if (MaxAdjust > 4)
407bf31bf78SZvi Rackover     Context.ArgStoreVector.resize(MaxAdjust, nullptr);
40813fbd452SMichael Kuperstein 
409dadb8474SMichael Kuperstein   DenseSet<unsigned int> UsedRegs;
41013fbd452SMichael Kuperstein 
4113c0d3855SZvi Rackover   for (InstClassification Classification = Skip; Classification != Exit; ++I) {
4123c0d3855SZvi Rackover     // If this is the COPY of the stack pointer, it's ok to ignore.
4133c0d3855SZvi Rackover     if (I == StackPtrCopyInst)
414dadb8474SMichael Kuperstein       continue;
4153c0d3855SZvi Rackover     Classification = classifyInstruction(MBB, I, RegInfo, UsedRegs);
4163c0d3855SZvi Rackover     if (Classification != Convert)
4173c0d3855SZvi Rackover       continue;
418d5cb3411SDavid L Kreitzer     // We know the instruction has a supported store opcode.
41913fbd452SMichael Kuperstein     // We only want movs of the form:
420d5cb3411SDavid L Kreitzer     // mov imm/reg, k(%StackPtr)
42113fbd452SMichael Kuperstein     // If we run into something else, bail.
42213fbd452SMichael Kuperstein     // Note that AddrBaseReg may, counter to its name, not be a register,
42313fbd452SMichael Kuperstein     // but rather a frame index.
42413fbd452SMichael Kuperstein     // TODO: Support the fi case. This should probably work now that we
42513fbd452SMichael Kuperstein     // have the infrastructure to track the stack pointer within a call
42613fbd452SMichael Kuperstein     // sequence.
42713fbd452SMichael Kuperstein     if (!I->getOperand(X86::AddrBaseReg).isReg() ||
42813fbd452SMichael Kuperstein         (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||
42913fbd452SMichael Kuperstein         !I->getOperand(X86::AddrScaleAmt).isImm() ||
43013fbd452SMichael Kuperstein         (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||
43113fbd452SMichael Kuperstein         (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||
43213fbd452SMichael Kuperstein         (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||
43313fbd452SMichael Kuperstein         !I->getOperand(X86::AddrDisp).isImm())
4341921d3d6SMichael Kuperstein       return;
43513fbd452SMichael Kuperstein 
43613fbd452SMichael Kuperstein     int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();
4371921d3d6SMichael Kuperstein     assert(StackDisp >= 0 &&
4381921d3d6SMichael Kuperstein            "Negative stack displacement when passing parameters");
43913fbd452SMichael Kuperstein 
44013fbd452SMichael Kuperstein     // We really don't want to consider the unaligned case.
441d5cb3411SDavid L Kreitzer     if (StackDisp & (SlotSize - 1))
4421921d3d6SMichael Kuperstein       return;
443d5cb3411SDavid L Kreitzer     StackDisp >>= Log2SlotSize;
44413fbd452SMichael Kuperstein 
445bf31bf78SZvi Rackover     assert((size_t)StackDisp < Context.ArgStoreVector.size() &&
44613fbd452SMichael Kuperstein            "Function call has more parameters than the stack is adjusted for.");
44713fbd452SMichael Kuperstein 
44813fbd452SMichael Kuperstein     // If the same stack slot is being filled twice, something's fishy.
449bf31bf78SZvi Rackover     if (Context.ArgStoreVector[StackDisp] != nullptr)
4501921d3d6SMichael Kuperstein       return;
451bf31bf78SZvi Rackover     Context.ArgStoreVector[StackDisp] = &*I;
45213fbd452SMichael Kuperstein 
453dadb8474SMichael Kuperstein     for (const MachineOperand &MO : I->uses()) {
454dadb8474SMichael Kuperstein       if (!MO.isReg())
455dadb8474SMichael Kuperstein         continue;
4560c476111SDaniel Sanders       Register Reg = MO.getReg();
4573726b144SGaurav Jain       if (Reg.isPhysical())
458dadb8474SMichael Kuperstein         UsedRegs.insert(Reg);
459fc21951cSMichael Kuperstein     }
460dadb8474SMichael Kuperstein   }
461dadb8474SMichael Kuperstein 
4623c0d3855SZvi Rackover   --I;
4633c0d3855SZvi Rackover 
464dadb8474SMichael Kuperstein   // We now expect the end of the sequence. If we stopped early,
465dadb8474SMichael Kuperstein   // or reached the end of the block without finding a call, bail.
466dadb8474SMichael Kuperstein   if (I == MBB.end() || !I->isCall())
4671921d3d6SMichael Kuperstein     return;
4681921d3d6SMichael Kuperstein 
4697b4c18e8SDuncan P. N. Exon Smith   Context.Call = &*I;
47049acf9c8SSerge Pavlov   if ((++I)->getOpcode() != TII->getCallFrameDestroyOpcode())
4711921d3d6SMichael Kuperstein     return;
47213fbd452SMichael Kuperstein 
47313fbd452SMichael Kuperstein   // Now, go through the vector, and see that we don't have any gaps,
474bf31bf78SZvi Rackover   // but only a series of storing instructions.
475bf31bf78SZvi Rackover   auto MMI = Context.ArgStoreVector.begin(), MME = Context.ArgStoreVector.end();
476d5cb3411SDavid L Kreitzer   for (; MMI != MME; ++MMI, Context.ExpectedDist += SlotSize)
47713fbd452SMichael Kuperstein     if (*MMI == nullptr)
47813fbd452SMichael Kuperstein       break;
47913fbd452SMichael Kuperstein 
48013fbd452SMichael Kuperstein   // If the call had no parameters, do nothing
481bf31bf78SZvi Rackover   if (MMI == Context.ArgStoreVector.begin())
4821921d3d6SMichael Kuperstein     return;
48313fbd452SMichael Kuperstein 
48413fbd452SMichael Kuperstein   // We are either at the last parameter, or a gap.
48513fbd452SMichael Kuperstein   // Make sure it's not a gap
48613fbd452SMichael Kuperstein   for (; MMI != MME; ++MMI)
48713fbd452SMichael Kuperstein     if (*MMI != nullptr)
4881921d3d6SMichael Kuperstein       return;
48913fbd452SMichael Kuperstein 
4901921d3d6SMichael Kuperstein   Context.UsePush = true;
4911921d3d6SMichael Kuperstein }
4921921d3d6SMichael Kuperstein 
adjustCallSequence(MachineFunction & MF,const CallContext & Context)493501e739dSHans Wennborg void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
4941921d3d6SMichael Kuperstein                                                   const CallContext &Context) {
49513fbd452SMichael Kuperstein   // Ok, we can in fact do the transformation for this call.
49613fbd452SMichael Kuperstein   // Do not remove the FrameSetup instruction, but adjust the parameters.
49713fbd452SMichael Kuperstein   // PEI will end up finalizing the handling of this.
498e2ea93c6SAndrew Kaylor   MachineBasicBlock::iterator FrameSetup = Context.FrameSetup;
499e2ea93c6SAndrew Kaylor   MachineBasicBlock &MBB = *(FrameSetup->getParent());
50049acf9c8SSerge Pavlov   TII->setFrameAdjustment(*FrameSetup, Context.ExpectedDist);
50113fbd452SMichael Kuperstein 
502*9ca2c50bSSimon Pilgrim   const DebugLoc &DL = FrameSetup->getDebugLoc();
5030fe4632bSDavid L Kreitzer   bool Is64Bit = STI->is64Bit();
50431b101a1SZvi Rackover   // Now, iterate through the vector in reverse order, and replace the store to
50531b101a1SZvi Rackover   // stack with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
50613fbd452SMichael Kuperstein   // replace uses.
507d5cb3411SDavid L Kreitzer   for (int Idx = (Context.ExpectedDist >> Log2SlotSize) - 1; Idx >= 0; --Idx) {
508bf31bf78SZvi Rackover     MachineBasicBlock::iterator Store = *Context.ArgStoreVector[Idx];
5096e45b989SSimon Pilgrim     const MachineOperand &PushOp = Store->getOperand(X86::AddrNumOperands);
51073dc8529SMichael Kuperstein     MachineBasicBlock::iterator Push = nullptr;
511d5cb3411SDavid L Kreitzer     unsigned PushOpcode;
512bf31bf78SZvi Rackover     switch (Store->getOpcode()) {
513d5cb3411SDavid L Kreitzer     default:
514d5cb3411SDavid L Kreitzer       llvm_unreachable("Unexpected Opcode!");
51531b101a1SZvi Rackover     case X86::AND16mi8:
51631b101a1SZvi Rackover     case X86::AND32mi8:
51731b101a1SZvi Rackover     case X86::AND64mi8:
51831b101a1SZvi Rackover     case X86::OR16mi8:
51931b101a1SZvi Rackover     case X86::OR32mi8:
52031b101a1SZvi Rackover     case X86::OR64mi8:
521d5cb3411SDavid L Kreitzer     case X86::MOV32mi:
5220fe4632bSDavid L Kreitzer     case X86::MOV64mi32:
5230fe4632bSDavid L Kreitzer       PushOpcode = Is64Bit ? X86::PUSH64i32 : X86::PUSHi32;
52413fbd452SMichael Kuperstein       // If the operand is a small (8-bit) immediate, we can use a
52513fbd452SMichael Kuperstein       // PUSH instruction with a shorter encoding.
52613fbd452SMichael Kuperstein       // Note that isImm() may fail even though this is a MOVmi, because
52713fbd452SMichael Kuperstein       // the operand can also be a symbol.
52813fbd452SMichael Kuperstein       if (PushOp.isImm()) {
52913fbd452SMichael Kuperstein         int64_t Val = PushOp.getImm();
53013fbd452SMichael Kuperstein         if (isInt<8>(Val))
5310fe4632bSDavid L Kreitzer           PushOpcode = Is64Bit ? X86::PUSH64i8 : X86::PUSH32i8;
53213fbd452SMichael Kuperstein       }
533116bbab4SDiana Picus       Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)).add(PushOp);
534f5192d7fSJean-Michel Gorius       Push->cloneMemRefs(MF, *Store);
535d5cb3411SDavid L Kreitzer       break;
536d5cb3411SDavid L Kreitzer     case X86::MOV32mr:
537fbd13c5cSEugene Zelenko     case X86::MOV64mr: {
5380c476111SDaniel Sanders       Register Reg = PushOp.getReg();
53913fbd452SMichael Kuperstein 
5400fe4632bSDavid L Kreitzer       // If storing a 32-bit vreg on 64-bit targets, extend to a 64-bit vreg
5410fe4632bSDavid L Kreitzer       // in preparation for the PUSH64. The upper 32 bits can be undef.
542bf31bf78SZvi Rackover       if (Is64Bit && Store->getOpcode() == X86::MOV32mr) {
5430c476111SDaniel Sanders         Register UndefReg = MRI->createVirtualRegister(&X86::GR64RegClass);
5440fe4632bSDavid L Kreitzer         Reg = MRI->createVirtualRegister(&X86::GR64RegClass);
5450fe4632bSDavid L Kreitzer         BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg);
5460fe4632bSDavid L Kreitzer         BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg)
5470fe4632bSDavid L Kreitzer             .addReg(UndefReg)
548116bbab4SDiana Picus             .add(PushOp)
5490fe4632bSDavid L Kreitzer             .addImm(X86::sub_32bit);
5500fe4632bSDavid L Kreitzer       }
5510fe4632bSDavid L Kreitzer 
55213fbd452SMichael Kuperstein       // If PUSHrmm is not slow on this target, try to fold the source of the
55313fbd452SMichael Kuperstein       // push into the instruction.
554a7115d51SSimon Pilgrim       bool SlowPUSHrmm = STI->slowTwoMemOps();
55513fbd452SMichael Kuperstein 
55613fbd452SMichael Kuperstein       // Check that this is legal to fold. Right now, we're extremely
55713fbd452SMichael Kuperstein       // conservative about that.
55813fbd452SMichael Kuperstein       MachineInstr *DefMov = nullptr;
55913fbd452SMichael Kuperstein       if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {
5600fe4632bSDavid L Kreitzer         PushOpcode = Is64Bit ? X86::PUSH64rmm : X86::PUSH32rmm;
561d5cb3411SDavid L Kreitzer         Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode));
56213fbd452SMichael Kuperstein 
56313fbd452SMichael Kuperstein         unsigned NumOps = DefMov->getDesc().getNumOperands();
56413fbd452SMichael Kuperstein         for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
56513fbd452SMichael Kuperstein           Push->addOperand(DefMov->getOperand(i));
5665eec0496SSimon Pilgrim         Push->cloneMergedMemRefs(MF, {DefMov, &*Store});
56713fbd452SMichael Kuperstein         DefMov->eraseFromParent();
56813fbd452SMichael Kuperstein       } else {
5690fe4632bSDavid L Kreitzer         PushOpcode = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
570d5cb3411SDavid L Kreitzer         Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
5711921d3d6SMichael Kuperstein                    .addReg(Reg)
5721921d3d6SMichael Kuperstein                    .getInstr();
573f5192d7fSJean-Michel Gorius         Push->cloneMemRefs(MF, *Store);
57413fbd452SMichael Kuperstein       }
575d5cb3411SDavid L Kreitzer       break;
57613fbd452SMichael Kuperstein     }
577fbd13c5cSEugene Zelenko     }
57813fbd452SMichael Kuperstein 
57973dc8529SMichael Kuperstein     // For debugging, when using SP-based CFA, we need to adjust the CFA
58073dc8529SMichael Kuperstein     // offset after each push.
58177ce9d3bSMichael Kuperstein     // TODO: This is needed only if we require precise CFA.
582559aa753SDaniel Jasper     if (!TFL->hasFP(MF))
583559aa753SDaniel Jasper       TFL->BuildCFI(
584559aa753SDaniel Jasper           MBB, std::next(Push), DL,
585d5cb3411SDavid L Kreitzer           MCCFIInstruction::createAdjustCfaOffset(nullptr, SlotSize));
586559aa753SDaniel Jasper 
587bf31bf78SZvi Rackover     MBB.erase(Store);
58813fbd452SMichael Kuperstein   }
58913fbd452SMichael Kuperstein 
59013fbd452SMichael Kuperstein   // The stack-pointer copy is no longer used in the call sequences.
59113fbd452SMichael Kuperstein   // There should not be any other users, but we can't commit to that, so:
592eb9488b1SNico Weber   if (Context.SPCopy && MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
5931921d3d6SMichael Kuperstein     Context.SPCopy->eraseFromParent();
59413fbd452SMichael Kuperstein 
59513fbd452SMichael Kuperstein   // Once we've done this, we need to make sure PEI doesn't assume a reserved
59613fbd452SMichael Kuperstein   // frame.
59713fbd452SMichael Kuperstein   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
59813fbd452SMichael Kuperstein   FuncInfo->setHasPushSequences(true);
59913fbd452SMichael Kuperstein }
60013fbd452SMichael Kuperstein 
canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,Register Reg)60113fbd452SMichael Kuperstein MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
6023726b144SGaurav Jain     MachineBasicBlock::iterator FrameSetup, Register Reg) {
60313fbd452SMichael Kuperstein   // Do an extremely restricted form of load folding.
60413fbd452SMichael Kuperstein   // ISel will often create patterns like:
60513fbd452SMichael Kuperstein   // movl    4(%edi), %eax
60613fbd452SMichael Kuperstein   // movl    8(%edi), %ecx
60713fbd452SMichael Kuperstein   // movl    12(%edi), %edx
60813fbd452SMichael Kuperstein   // movl    %edx, 8(%esp)
60913fbd452SMichael Kuperstein   // movl    %ecx, 4(%esp)
61013fbd452SMichael Kuperstein   // movl    %eax, (%esp)
61113fbd452SMichael Kuperstein   // call
61213fbd452SMichael Kuperstein   // Get rid of those with prejudice.
6133726b144SGaurav Jain   if (!Reg.isVirtual())
61413fbd452SMichael Kuperstein     return nullptr;
61513fbd452SMichael Kuperstein 
61613fbd452SMichael Kuperstein   // Make sure this is the only use of Reg.
61713fbd452SMichael Kuperstein   if (!MRI->hasOneNonDBGUse(Reg))
61813fbd452SMichael Kuperstein     return nullptr;
61913fbd452SMichael Kuperstein 
6207b4c18e8SDuncan P. N. Exon Smith   MachineInstr &DefMI = *MRI->getVRegDef(Reg);
62113fbd452SMichael Kuperstein 
62213fbd452SMichael Kuperstein   // Make sure the def is a MOV from memory.
62329711c0dSDavid L Kreitzer   // If the def is in another block, give up.
6247b4c18e8SDuncan P. N. Exon Smith   if ((DefMI.getOpcode() != X86::MOV32rm &&
6257b4c18e8SDuncan P. N. Exon Smith        DefMI.getOpcode() != X86::MOV64rm) ||
6267b4c18e8SDuncan P. N. Exon Smith       DefMI.getParent() != FrameSetup->getParent())
62713fbd452SMichael Kuperstein     return nullptr;
62813fbd452SMichael Kuperstein 
629bc7f99a3SMichael Kuperstein   // Make sure we don't have any instructions between DefMI and the
630bc7f99a3SMichael Kuperstein   // push that make folding the load illegal.
6317b4c18e8SDuncan P. N. Exon Smith   for (MachineBasicBlock::iterator I = DefMI; I != FrameSetup; ++I)
632bc7f99a3SMichael Kuperstein     if (I->isLoadFoldBarrier())
63313fbd452SMichael Kuperstein       return nullptr;
63413fbd452SMichael Kuperstein 
6357b4c18e8SDuncan P. N. Exon Smith   return &DefMI;
63613fbd452SMichael Kuperstein }
637fbd13c5cSEugene Zelenko 
createX86CallFrameOptimization()638fbd13c5cSEugene Zelenko FunctionPass *llvm::createX86CallFrameOptimization() {
639fbd13c5cSEugene Zelenko   return new X86CallFrameOptimization();
640fbd13c5cSEugene Zelenko }
641