1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass is responsible for finalizing the functions frame layout, saving
10 // callee saved registers, and for emitting prolog & epilog code for the
11 // function.
12 //
13 // This pass must be run after register allocation. After this pass is
14 // executed, it is illegal to construct MO_FrameIndex operands.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineDominators.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineLoopInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineOperand.h"
38 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
39 #include "llvm/CodeGen/MachineRegisterInfo.h"
40 #include "llvm/CodeGen/RegisterScavenging.h"
41 #include "llvm/CodeGen/TargetFrameLowering.h"
42 #include "llvm/CodeGen/TargetInstrInfo.h"
43 #include "llvm/CodeGen/TargetOpcodes.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSubtargetInfo.h"
46 #include "llvm/CodeGen/WinEHFuncInfo.h"
47 #include "llvm/IR/Attributes.h"
48 #include "llvm/IR/CallingConv.h"
49 #include "llvm/IR/DebugInfoMetadata.h"
50 #include "llvm/IR/DiagnosticInfo.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InlineAsm.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/InitializePasses.h"
55 #include "llvm/MC/MCRegisterInfo.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/CodeGen.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Target/TargetMachine.h"
62 #include "llvm/Target/TargetOptions.h"
63 #include <algorithm>
64 #include <cassert>
65 #include <cstdint>
66 #include <functional>
67 #include <limits>
68 #include <utility>
69 #include <vector>
70
71 using namespace llvm;
72
73 #define DEBUG_TYPE "prologepilog"
74
75 using MBBVector = SmallVector<MachineBasicBlock *, 4>;
76
77 STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs");
78 STATISTIC(NumFuncSeen, "Number of functions seen in PEI");
79
80
81 namespace {
82
83 class PEI : public MachineFunctionPass {
84 public:
85 static char ID;
86
PEI()87 PEI() : MachineFunctionPass(ID) {
88 initializePEIPass(*PassRegistry::getPassRegistry());
89 }
90
91 void getAnalysisUsage(AnalysisUsage &AU) const override;
92
93 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
94 /// frame indexes with appropriate references.
95 bool runOnMachineFunction(MachineFunction &MF) override;
96
97 private:
98 RegScavenger *RS;
99
100 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved
101 // stack frame indexes.
102 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max();
103 unsigned MaxCSFrameIndex = 0;
104
105 // Save and Restore blocks of the current function. Typically there is a
106 // single save block, unless Windows EH funclets are involved.
107 MBBVector SaveBlocks;
108 MBBVector RestoreBlocks;
109
110 // Flag to control whether to use the register scavenger to resolve
111 // frame index materialization registers. Set according to
112 // TRI->requiresFrameIndexScavenging() for the current function.
113 bool FrameIndexVirtualScavenging;
114
115 // Flag to control whether the scavenger should be passed even though
116 // FrameIndexVirtualScavenging is used.
117 bool FrameIndexEliminationScavenging;
118
119 // Emit remarks.
120 MachineOptimizationRemarkEmitter *ORE = nullptr;
121
122 void calculateCallFrameInfo(MachineFunction &MF);
123 void calculateSaveRestoreBlocks(MachineFunction &MF);
124 void spillCalleeSavedRegs(MachineFunction &MF);
125
126 void calculateFrameObjectOffsets(MachineFunction &MF);
127 void replaceFrameIndices(MachineFunction &MF);
128 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF,
129 int &SPAdj);
130 void insertPrologEpilogCode(MachineFunction &MF);
131 void insertZeroCallUsedRegs(MachineFunction &MF);
132 };
133
134 } // end anonymous namespace
135
136 char PEI::ID = 0;
137
138 char &llvm::PrologEpilogCodeInserterID = PEI::ID;
139
140 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false,
141 false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)142 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
143 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
144 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
145 INITIALIZE_PASS_END(PEI, DEBUG_TYPE,
146 "Prologue/Epilogue Insertion & Frame Finalization", false,
147 false)
148
149 MachineFunctionPass *llvm::createPrologEpilogInserterPass() {
150 return new PEI();
151 }
152
153 STATISTIC(NumBytesStackSpace,
154 "Number of bytes used for stack in all functions");
155
getAnalysisUsage(AnalysisUsage & AU) const156 void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
157 AU.setPreservesCFG();
158 AU.addPreserved<MachineLoopInfo>();
159 AU.addPreserved<MachineDominatorTree>();
160 AU.addRequired<MachineOptimizationRemarkEmitterPass>();
161 MachineFunctionPass::getAnalysisUsage(AU);
162 }
163
164 /// StackObjSet - A set of stack object indexes
165 using StackObjSet = SmallSetVector<int, 8>;
166
167 using SavedDbgValuesMap =
168 SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>;
169
170 /// Stash DBG_VALUEs that describe parameters and which are placed at the start
171 /// of the block. Later on, after the prologue code has been emitted, the
172 /// stashed DBG_VALUEs will be reinserted at the start of the block.
stashEntryDbgValues(MachineBasicBlock & MBB,SavedDbgValuesMap & EntryDbgValues)173 static void stashEntryDbgValues(MachineBasicBlock &MBB,
174 SavedDbgValuesMap &EntryDbgValues) {
175 SmallVector<const MachineInstr *, 4> FrameIndexValues;
176
177 for (auto &MI : MBB) {
178 if (!MI.isDebugInstr())
179 break;
180 if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter())
181 continue;
182 if (any_of(MI.debug_operands(),
183 [](const MachineOperand &MO) { return MO.isFI(); })) {
184 // We can only emit valid locations for frame indices after the frame
185 // setup, so do not stash away them.
186 FrameIndexValues.push_back(&MI);
187 continue;
188 }
189 const DILocalVariable *Var = MI.getDebugVariable();
190 const DIExpression *Expr = MI.getDebugExpression();
191 auto Overlaps = [Var, Expr](const MachineInstr *DV) {
192 return Var == DV->getDebugVariable() &&
193 Expr->fragmentsOverlap(DV->getDebugExpression());
194 };
195 // See if the debug value overlaps with any preceding debug value that will
196 // not be stashed. If that is the case, then we can't stash this value, as
197 // we would then reorder the values at reinsertion.
198 if (llvm::none_of(FrameIndexValues, Overlaps))
199 EntryDbgValues[&MBB].push_back(&MI);
200 }
201
202 // Remove stashed debug values from the block.
203 if (EntryDbgValues.count(&MBB))
204 for (auto *MI : EntryDbgValues[&MBB])
205 MI->removeFromParent();
206 }
207
208 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
209 /// frame indexes with appropriate references.
runOnMachineFunction(MachineFunction & MF)210 bool PEI::runOnMachineFunction(MachineFunction &MF) {
211 NumFuncSeen++;
212 const Function &F = MF.getFunction();
213 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
214 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
215
216 RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr;
217 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF);
218 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
219
220 // Calculate the MaxCallFrameSize and AdjustsStack variables for the
221 // function's frame information. Also eliminates call frame pseudo
222 // instructions.
223 calculateCallFrameInfo(MF);
224
225 // Determine placement of CSR spill/restore code and prolog/epilog code:
226 // place all spills in the entry block, all restores in return blocks.
227 calculateSaveRestoreBlocks(MF);
228
229 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code.
230 SavedDbgValuesMap EntryDbgValues;
231 for (MachineBasicBlock *SaveBlock : SaveBlocks)
232 stashEntryDbgValues(*SaveBlock, EntryDbgValues);
233
234 // Handle CSR spilling and restoring, for targets that need it.
235 if (MF.getTarget().usesPhysRegsForValues())
236 spillCalleeSavedRegs(MF);
237
238 // Allow the target machine to make final modifications to the function
239 // before the frame layout is finalized.
240 TFI->processFunctionBeforeFrameFinalized(MF, RS);
241
242 // Calculate actual frame offsets for all abstract stack objects...
243 calculateFrameObjectOffsets(MF);
244
245 // Add prolog and epilog code to the function. This function is required
246 // to align the stack frame as necessary for any stack variables or
247 // called functions. Because of this, calculateCalleeSavedRegisters()
248 // must be called before this function in order to set the AdjustsStack
249 // and MaxCallFrameSize variables.
250 if (!F.hasFnAttribute(Attribute::Naked))
251 insertPrologEpilogCode(MF);
252
253 // Reinsert stashed debug values at the start of the entry blocks.
254 for (auto &I : EntryDbgValues)
255 I.first->insert(I.first->begin(), I.second.begin(), I.second.end());
256
257 // Allow the target machine to make final modifications to the function
258 // before the frame layout is finalized.
259 TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS);
260
261 // Replace all MO_FrameIndex operands with physical register references
262 // and actual offsets.
263 //
264 replaceFrameIndices(MF);
265
266 // If register scavenging is needed, as we've enabled doing it as a
267 // post-pass, scavenge the virtual registers that frame index elimination
268 // inserted.
269 if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging)
270 scavengeFrameVirtualRegs(MF, *RS);
271
272 // Warn on stack size when we exceeds the given limit.
273 MachineFrameInfo &MFI = MF.getFrameInfo();
274 uint64_t StackSize = MFI.getStackSize();
275
276 unsigned Threshold = UINT_MAX;
277 if (MF.getFunction().hasFnAttribute("warn-stack-size")) {
278 bool Failed = MF.getFunction()
279 .getFnAttribute("warn-stack-size")
280 .getValueAsString()
281 .getAsInteger(10, Threshold);
282 // Verifier should have caught this.
283 assert(!Failed && "Invalid warn-stack-size fn attr value");
284 (void)Failed;
285 }
286 if (MF.getFunction().hasFnAttribute(Attribute::SafeStack)) {
287 StackSize += MFI.getUnsafeStackSize();
288 }
289 if (StackSize > Threshold) {
290 DiagnosticInfoStackSize DiagStackSize(F, StackSize, Threshold, DS_Warning);
291 F.getContext().diagnose(DiagStackSize);
292 }
293 ORE->emit([&]() {
294 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize",
295 MF.getFunction().getSubprogram(),
296 &MF.front())
297 << ore::NV("NumStackBytes", StackSize) << " stack bytes in function";
298 });
299
300 delete RS;
301 SaveBlocks.clear();
302 RestoreBlocks.clear();
303 MFI.setSavePoint(nullptr);
304 MFI.setRestorePoint(nullptr);
305 return true;
306 }
307
308 /// Calculate the MaxCallFrameSize and AdjustsStack
309 /// variables for the function's frame information and eliminate call frame
310 /// pseudo instructions.
calculateCallFrameInfo(MachineFunction & MF)311 void PEI::calculateCallFrameInfo(MachineFunction &MF) {
312 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
313 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
314 MachineFrameInfo &MFI = MF.getFrameInfo();
315
316 unsigned MaxCallFrameSize = 0;
317 bool AdjustsStack = MFI.adjustsStack();
318
319 // Get the function call frame set-up and tear-down instruction opcode
320 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
321 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
322
323 // Early exit for targets which have no call frame setup/destroy pseudo
324 // instructions.
325 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
326 return;
327
328 std::vector<MachineBasicBlock::iterator> FrameSDOps;
329 for (MachineBasicBlock &BB : MF)
330 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I)
331 if (TII.isFrameInstr(*I)) {
332 unsigned Size = TII.getFrameSize(*I);
333 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
334 AdjustsStack = true;
335 FrameSDOps.push_back(I);
336 } else if (I->isInlineAsm()) {
337 // Some inline asm's need a stack frame, as indicated by operand 1.
338 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
339 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
340 AdjustsStack = true;
341 }
342
343 assert(!MFI.isMaxCallFrameSizeComputed() ||
344 (MFI.getMaxCallFrameSize() == MaxCallFrameSize &&
345 MFI.adjustsStack() == AdjustsStack));
346 MFI.setAdjustsStack(AdjustsStack);
347 MFI.setMaxCallFrameSize(MaxCallFrameSize);
348
349 for (MachineBasicBlock::iterator I : FrameSDOps) {
350 // If call frames are not being included as part of the stack frame, and
351 // the target doesn't indicate otherwise, remove the call frame pseudos
352 // here. The sub/add sp instruction pairs are still inserted, but we don't
353 // need to track the SP adjustment for frame index elimination.
354 if (TFI->canSimplifyCallFramePseudos(MF))
355 TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I);
356 }
357 }
358
359 /// Compute the sets of entry and return blocks for saving and restoring
360 /// callee-saved registers, and placing prolog and epilog code.
calculateSaveRestoreBlocks(MachineFunction & MF)361 void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) {
362 const MachineFrameInfo &MFI = MF.getFrameInfo();
363
364 // Even when we do not change any CSR, we still want to insert the
365 // prologue and epilogue of the function.
366 // So set the save points for those.
367
368 // Use the points found by shrink-wrapping, if any.
369 if (MFI.getSavePoint()) {
370 SaveBlocks.push_back(MFI.getSavePoint());
371 assert(MFI.getRestorePoint() && "Both restore and save must be set");
372 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint();
373 // If RestoreBlock does not have any successor and is not a return block
374 // then the end point is unreachable and we do not need to insert any
375 // epilogue.
376 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock())
377 RestoreBlocks.push_back(RestoreBlock);
378 return;
379 }
380
381 // Save refs to entry and return blocks.
382 SaveBlocks.push_back(&MF.front());
383 for (MachineBasicBlock &MBB : MF) {
384 if (MBB.isEHFuncletEntry())
385 SaveBlocks.push_back(&MBB);
386 if (MBB.isReturnBlock())
387 RestoreBlocks.push_back(&MBB);
388 }
389 }
390
assignCalleeSavedSpillSlots(MachineFunction & F,const BitVector & SavedRegs,unsigned & MinCSFrameIndex,unsigned & MaxCSFrameIndex)391 static void assignCalleeSavedSpillSlots(MachineFunction &F,
392 const BitVector &SavedRegs,
393 unsigned &MinCSFrameIndex,
394 unsigned &MaxCSFrameIndex) {
395 if (SavedRegs.empty())
396 return;
397
398 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo();
399 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs();
400 BitVector CSMask(SavedRegs.size());
401
402 for (unsigned i = 0; CSRegs[i]; ++i)
403 CSMask.set(CSRegs[i]);
404
405 std::vector<CalleeSavedInfo> CSI;
406 for (unsigned i = 0; CSRegs[i]; ++i) {
407 unsigned Reg = CSRegs[i];
408 if (SavedRegs.test(Reg)) {
409 bool SavedSuper = false;
410 for (const MCPhysReg &SuperReg : RegInfo->superregs(Reg)) {
411 // Some backends set all aliases for some registers as saved, such as
412 // Mips's $fp, so they appear in SavedRegs but not CSRegs.
413 if (SavedRegs.test(SuperReg) && CSMask.test(SuperReg)) {
414 SavedSuper = true;
415 break;
416 }
417 }
418
419 if (!SavedSuper)
420 CSI.push_back(CalleeSavedInfo(Reg));
421 }
422 }
423
424 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering();
425 MachineFrameInfo &MFI = F.getFrameInfo();
426 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex,
427 MaxCSFrameIndex)) {
428 // If target doesn't implement this, use generic code.
429
430 if (CSI.empty())
431 return; // Early exit if no callee saved registers are modified!
432
433 unsigned NumFixedSpillSlots;
434 const TargetFrameLowering::SpillSlot *FixedSpillSlots =
435 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots);
436
437 // Now that we know which registers need to be saved and restored, allocate
438 // stack slots for them.
439 for (auto &CS : CSI) {
440 // If the target has spilled this register to another register, we don't
441 // need to allocate a stack slot.
442 if (CS.isSpilledToReg())
443 continue;
444
445 unsigned Reg = CS.getReg();
446 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
447
448 int FrameIdx;
449 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) {
450 CS.setFrameIdx(FrameIdx);
451 continue;
452 }
453
454 // Check to see if this physreg must be spilled to a particular stack slot
455 // on this target.
456 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots;
457 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots &&
458 FixedSlot->Reg != Reg)
459 ++FixedSlot;
460
461 unsigned Size = RegInfo->getSpillSize(*RC);
462 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
463 // Nope, just spill it anywhere convenient.
464 Align Alignment = RegInfo->getSpillAlign(*RC);
465 // We may not be able to satisfy the desired alignment specification of
466 // the TargetRegisterClass if the stack alignment is smaller. Use the
467 // min.
468 Alignment = std::min(Alignment, TFI->getStackAlign());
469 FrameIdx = MFI.CreateStackObject(Size, Alignment, true);
470 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
471 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
472 } else {
473 // Spill it to the stack where we must.
474 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset);
475 }
476
477 CS.setFrameIdx(FrameIdx);
478 }
479 }
480
481 MFI.setCalleeSavedInfo(CSI);
482 }
483
484 /// Helper function to update the liveness information for the callee-saved
485 /// registers.
updateLiveness(MachineFunction & MF)486 static void updateLiveness(MachineFunction &MF) {
487 MachineFrameInfo &MFI = MF.getFrameInfo();
488 // Visited will contain all the basic blocks that are in the region
489 // where the callee saved registers are alive:
490 // - Anything that is not Save or Restore -> LiveThrough.
491 // - Save -> LiveIn.
492 // - Restore -> LiveOut.
493 // The live-out is not attached to the block, so no need to keep
494 // Restore in this set.
495 SmallPtrSet<MachineBasicBlock *, 8> Visited;
496 SmallVector<MachineBasicBlock *, 8> WorkList;
497 MachineBasicBlock *Entry = &MF.front();
498 MachineBasicBlock *Save = MFI.getSavePoint();
499
500 if (!Save)
501 Save = Entry;
502
503 if (Entry != Save) {
504 WorkList.push_back(Entry);
505 Visited.insert(Entry);
506 }
507 Visited.insert(Save);
508
509 MachineBasicBlock *Restore = MFI.getRestorePoint();
510 if (Restore)
511 // By construction Restore cannot be visited, otherwise it
512 // means there exists a path to Restore that does not go
513 // through Save.
514 WorkList.push_back(Restore);
515
516 while (!WorkList.empty()) {
517 const MachineBasicBlock *CurBB = WorkList.pop_back_val();
518 // By construction, the region that is after the save point is
519 // dominated by the Save and post-dominated by the Restore.
520 if (CurBB == Save && Save != Restore)
521 continue;
522 // Enqueue all the successors not already visited.
523 // Those are by construction either before Save or after Restore.
524 for (MachineBasicBlock *SuccBB : CurBB->successors())
525 if (Visited.insert(SuccBB).second)
526 WorkList.push_back(SuccBB);
527 }
528
529 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
530
531 MachineRegisterInfo &MRI = MF.getRegInfo();
532 for (const CalleeSavedInfo &I : CSI) {
533 for (MachineBasicBlock *MBB : Visited) {
534 MCPhysReg Reg = I.getReg();
535 // Add the callee-saved register as live-in.
536 // It's killed at the spill.
537 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg))
538 MBB->addLiveIn(Reg);
539 }
540 // If callee-saved register is spilled to another register rather than
541 // spilling to stack, the destination register has to be marked as live for
542 // each MBB between the prologue and epilogue so that it is not clobbered
543 // before it is reloaded in the epilogue. The Visited set contains all
544 // blocks outside of the region delimited by prologue/epilogue.
545 if (I.isSpilledToReg()) {
546 for (MachineBasicBlock &MBB : MF) {
547 if (Visited.count(&MBB))
548 continue;
549 MCPhysReg DstReg = I.getDstReg();
550 if (!MBB.isLiveIn(DstReg))
551 MBB.addLiveIn(DstReg);
552 }
553 }
554 }
555 }
556
557 /// Insert spill code for the callee-saved registers used in the function.
insertCSRSaves(MachineBasicBlock & SaveBlock,ArrayRef<CalleeSavedInfo> CSI)558 static void insertCSRSaves(MachineBasicBlock &SaveBlock,
559 ArrayRef<CalleeSavedInfo> CSI) {
560 MachineFunction &MF = *SaveBlock.getParent();
561 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
562 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
563 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
564
565 MachineBasicBlock::iterator I = SaveBlock.begin();
566 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) {
567 for (const CalleeSavedInfo &CS : CSI) {
568 // Insert the spill to the stack frame.
569 unsigned Reg = CS.getReg();
570
571 if (CS.isSpilledToReg()) {
572 BuildMI(SaveBlock, I, DebugLoc(),
573 TII.get(TargetOpcode::COPY), CS.getDstReg())
574 .addReg(Reg, getKillRegState(true));
575 } else {
576 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
577 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC,
578 TRI);
579 }
580 }
581 }
582 }
583
584 /// Insert restore code for the callee-saved registers used in the function.
insertCSRRestores(MachineBasicBlock & RestoreBlock,std::vector<CalleeSavedInfo> & CSI)585 static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
586 std::vector<CalleeSavedInfo> &CSI) {
587 MachineFunction &MF = *RestoreBlock.getParent();
588 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
589 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
590 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
591
592 // Restore all registers immediately before the return and any
593 // terminators that precede it.
594 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator();
595
596 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) {
597 for (const CalleeSavedInfo &CI : reverse(CSI)) {
598 unsigned Reg = CI.getReg();
599 if (CI.isSpilledToReg()) {
600 BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg)
601 .addReg(CI.getDstReg(), getKillRegState(true));
602 } else {
603 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
604 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI);
605 assert(I != RestoreBlock.begin() &&
606 "loadRegFromStackSlot didn't insert any code!");
607 // Insert in reverse order. loadRegFromStackSlot can insert
608 // multiple instructions.
609 }
610 }
611 }
612 }
613
spillCalleeSavedRegs(MachineFunction & MF)614 void PEI::spillCalleeSavedRegs(MachineFunction &MF) {
615 // We can't list this requirement in getRequiredProperties because some
616 // targets (WebAssembly) use virtual registers past this point, and the pass
617 // pipeline is set up without giving the passes a chance to look at the
618 // TargetMachine.
619 // FIXME: Find a way to express this in getRequiredProperties.
620 assert(MF.getProperties().hasProperty(
621 MachineFunctionProperties::Property::NoVRegs));
622
623 const Function &F = MF.getFunction();
624 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
625 MachineFrameInfo &MFI = MF.getFrameInfo();
626 MinCSFrameIndex = std::numeric_limits<unsigned>::max();
627 MaxCSFrameIndex = 0;
628
629 // Determine which of the registers in the callee save list should be saved.
630 BitVector SavedRegs;
631 TFI->determineCalleeSaves(MF, SavedRegs, RS);
632
633 // Assign stack slots for any callee-saved registers that must be spilled.
634 assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex);
635
636 // Add the code to save and restore the callee saved registers.
637 if (!F.hasFnAttribute(Attribute::Naked)) {
638 MFI.setCalleeSavedInfoValid(true);
639
640 std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
641 if (!CSI.empty()) {
642 if (!MFI.hasCalls())
643 NumLeafFuncWithSpills++;
644
645 for (MachineBasicBlock *SaveBlock : SaveBlocks)
646 insertCSRSaves(*SaveBlock, CSI);
647
648 // Update the live-in information of all the blocks up to the save point.
649 updateLiveness(MF);
650
651 for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
652 insertCSRRestores(*RestoreBlock, CSI);
653 }
654 }
655 }
656
657 /// AdjustStackOffset - Helper function used to adjust the stack frame offset.
AdjustStackOffset(MachineFrameInfo & MFI,int FrameIdx,bool StackGrowsDown,int64_t & Offset,Align & MaxAlign,unsigned Skew)658 static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx,
659 bool StackGrowsDown, int64_t &Offset,
660 Align &MaxAlign, unsigned Skew) {
661 // If the stack grows down, add the object size to find the lowest address.
662 if (StackGrowsDown)
663 Offset += MFI.getObjectSize(FrameIdx);
664
665 Align Alignment = MFI.getObjectAlign(FrameIdx);
666
667 // If the alignment of this object is greater than that of the stack, then
668 // increase the stack alignment to match.
669 MaxAlign = std::max(MaxAlign, Alignment);
670
671 // Adjust to alignment boundary.
672 Offset = alignTo(Offset, Alignment, Skew);
673
674 if (StackGrowsDown) {
675 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset
676 << "]\n");
677 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset
678 } else {
679 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset
680 << "]\n");
681 MFI.setObjectOffset(FrameIdx, Offset);
682 Offset += MFI.getObjectSize(FrameIdx);
683 }
684 }
685
686 /// Compute which bytes of fixed and callee-save stack area are unused and keep
687 /// track of them in StackBytesFree.
688 static inline void
computeFreeStackSlots(MachineFrameInfo & MFI,bool StackGrowsDown,unsigned MinCSFrameIndex,unsigned MaxCSFrameIndex,int64_t FixedCSEnd,BitVector & StackBytesFree)689 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown,
690 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex,
691 int64_t FixedCSEnd, BitVector &StackBytesFree) {
692 // Avoid undefined int64_t -> int conversion below in extreme case.
693 if (FixedCSEnd > std::numeric_limits<int>::max())
694 return;
695
696 StackBytesFree.resize(FixedCSEnd, true);
697
698 SmallVector<int, 16> AllocatedFrameSlots;
699 // Add fixed objects.
700 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i)
701 // StackSlot scavenging is only implemented for the default stack.
702 if (MFI.getStackID(i) == TargetStackID::Default)
703 AllocatedFrameSlots.push_back(i);
704 // Add callee-save objects if there are any.
705 if (MinCSFrameIndex <= MaxCSFrameIndex) {
706 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i)
707 if (MFI.getStackID(i) == TargetStackID::Default)
708 AllocatedFrameSlots.push_back(i);
709 }
710
711 for (int i : AllocatedFrameSlots) {
712 // These are converted from int64_t, but they should always fit in int
713 // because of the FixedCSEnd check above.
714 int ObjOffset = MFI.getObjectOffset(i);
715 int ObjSize = MFI.getObjectSize(i);
716 int ObjStart, ObjEnd;
717 if (StackGrowsDown) {
718 // ObjOffset is negative when StackGrowsDown is true.
719 ObjStart = -ObjOffset - ObjSize;
720 ObjEnd = -ObjOffset;
721 } else {
722 ObjStart = ObjOffset;
723 ObjEnd = ObjOffset + ObjSize;
724 }
725 // Ignore fixed holes that are in the previous stack frame.
726 if (ObjEnd > 0)
727 StackBytesFree.reset(ObjStart, ObjEnd);
728 }
729 }
730
731 /// Assign frame object to an unused portion of the stack in the fixed stack
732 /// object range. Return true if the allocation was successful.
scavengeStackSlot(MachineFrameInfo & MFI,int FrameIdx,bool StackGrowsDown,Align MaxAlign,BitVector & StackBytesFree)733 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx,
734 bool StackGrowsDown, Align MaxAlign,
735 BitVector &StackBytesFree) {
736 if (MFI.isVariableSizedObjectIndex(FrameIdx))
737 return false;
738
739 if (StackBytesFree.none()) {
740 // clear it to speed up later scavengeStackSlot calls to
741 // StackBytesFree.none()
742 StackBytesFree.clear();
743 return false;
744 }
745
746 Align ObjAlign = MFI.getObjectAlign(FrameIdx);
747 if (ObjAlign > MaxAlign)
748 return false;
749
750 int64_t ObjSize = MFI.getObjectSize(FrameIdx);
751 int FreeStart;
752 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1;
753 FreeStart = StackBytesFree.find_next(FreeStart)) {
754
755 // Check that free space has suitable alignment.
756 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart;
757 if (alignTo(ObjStart, ObjAlign) != ObjStart)
758 continue;
759
760 if (FreeStart + ObjSize > StackBytesFree.size())
761 return false;
762
763 bool AllBytesFree = true;
764 for (unsigned Byte = 0; Byte < ObjSize; ++Byte)
765 if (!StackBytesFree.test(FreeStart + Byte)) {
766 AllBytesFree = false;
767 break;
768 }
769 if (AllBytesFree)
770 break;
771 }
772
773 if (FreeStart == -1)
774 return false;
775
776 if (StackGrowsDown) {
777 int ObjStart = -(FreeStart + ObjSize);
778 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP["
779 << ObjStart << "]\n");
780 MFI.setObjectOffset(FrameIdx, ObjStart);
781 } else {
782 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP["
783 << FreeStart << "]\n");
784 MFI.setObjectOffset(FrameIdx, FreeStart);
785 }
786
787 StackBytesFree.reset(FreeStart, FreeStart + ObjSize);
788 return true;
789 }
790
791 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
792 /// those required to be close to the Stack Protector) to stack offsets.
AssignProtectedObjSet(const StackObjSet & UnassignedObjs,SmallSet<int,16> & ProtectedObjs,MachineFrameInfo & MFI,bool StackGrowsDown,int64_t & Offset,Align & MaxAlign,unsigned Skew)793 static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
794 SmallSet<int, 16> &ProtectedObjs,
795 MachineFrameInfo &MFI, bool StackGrowsDown,
796 int64_t &Offset, Align &MaxAlign,
797 unsigned Skew) {
798
799 for (int i : UnassignedObjs) {
800 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
801 ProtectedObjs.insert(i);
802 }
803 }
804
805 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
806 /// abstract stack objects.
calculateFrameObjectOffsets(MachineFunction & MF)807 void PEI::calculateFrameObjectOffsets(MachineFunction &MF) {
808 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
809
810 bool StackGrowsDown =
811 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
812
813 // Loop over all of the stack objects, assigning sequential addresses...
814 MachineFrameInfo &MFI = MF.getFrameInfo();
815
816 // Start at the beginning of the local area.
817 // The Offset is the distance from the stack top in the direction
818 // of stack growth -- so it's always nonnegative.
819 int LocalAreaOffset = TFI.getOffsetOfLocalArea();
820 if (StackGrowsDown)
821 LocalAreaOffset = -LocalAreaOffset;
822 assert(LocalAreaOffset >= 0
823 && "Local area offset should be in direction of stack growth");
824 int64_t Offset = LocalAreaOffset;
825
826 // Skew to be applied to alignment.
827 unsigned Skew = TFI.getStackAlignmentSkew(MF);
828
829 #ifdef EXPENSIVE_CHECKS
830 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i)
831 if (!MFI.isDeadObjectIndex(i) &&
832 MFI.getStackID(i) == TargetStackID::Default)
833 assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() &&
834 "MaxAlignment is invalid");
835 #endif
836
837 // If there are fixed sized objects that are preallocated in the local area,
838 // non-fixed objects can't be allocated right at the start of local area.
839 // Adjust 'Offset' to point to the end of last fixed sized preallocated
840 // object.
841 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) {
842 // Only allocate objects on the default stack.
843 if (MFI.getStackID(i) != TargetStackID::Default)
844 continue;
845
846 int64_t FixedOff;
847 if (StackGrowsDown) {
848 // The maximum distance from the stack pointer is at lower address of
849 // the object -- which is given by offset. For down growing stack
850 // the offset is negative, so we negate the offset to get the distance.
851 FixedOff = -MFI.getObjectOffset(i);
852 } else {
853 // The maximum distance from the start pointer is at the upper
854 // address of the object.
855 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i);
856 }
857 if (FixedOff > Offset) Offset = FixedOff;
858 }
859
860 Align MaxAlign = MFI.getMaxAlign();
861 // First assign frame offsets to stack objects that are used to spill
862 // callee saved registers.
863 if (MaxCSFrameIndex >= MinCSFrameIndex) {
864 for (unsigned i = 0; i <= MaxCSFrameIndex - MinCSFrameIndex; ++i) {
865 unsigned FrameIndex =
866 StackGrowsDown ? MinCSFrameIndex + i : MaxCSFrameIndex - i;
867
868 // Only allocate objects on the default stack.
869 if (MFI.getStackID(FrameIndex) != TargetStackID::Default)
870 continue;
871
872 // TODO: should this just be if (MFI.isDeadObjectIndex(FrameIndex))
873 if (!StackGrowsDown && MFI.isDeadObjectIndex(FrameIndex))
874 continue;
875
876 AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign,
877 Skew);
878 }
879 }
880
881 assert(MaxAlign == MFI.getMaxAlign() &&
882 "MFI.getMaxAlign should already account for all callee-saved "
883 "registers without a fixed stack slot");
884
885 // FixedCSEnd is the stack offset to the end of the fixed and callee-save
886 // stack area.
887 int64_t FixedCSEnd = Offset;
888
889 // Make sure the special register scavenging spill slot is closest to the
890 // incoming stack pointer if a frame pointer is required and is closer
891 // to the incoming rather than the final stack pointer.
892 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
893 bool EarlyScavengingSlots = TFI.allocateScavengingFrameIndexesNearIncomingSP(MF);
894 if (RS && EarlyScavengingSlots) {
895 SmallVector<int, 2> SFIs;
896 RS->getScavengingFrameIndices(SFIs);
897 for (int SFI : SFIs)
898 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew);
899 }
900
901 // FIXME: Once this is working, then enable flag will change to a target
902 // check for whether the frame is large enough to want to use virtual
903 // frame index registers. Functions which don't want/need this optimization
904 // will continue to use the existing code path.
905 if (MFI.getUseLocalStackAllocationBlock()) {
906 Align Alignment = MFI.getLocalFrameMaxAlign();
907
908 // Adjust to alignment boundary.
909 Offset = alignTo(Offset, Alignment, Skew);
910
911 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
912
913 // Resolve offsets for objects in the local block.
914 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) {
915 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i);
916 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
917 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset
918 << "]\n");
919 MFI.setObjectOffset(Entry.first, FIOffset);
920 }
921 // Allocate the local block
922 Offset += MFI.getLocalFrameSize();
923
924 MaxAlign = std::max(Alignment, MaxAlign);
925 }
926
927 // Retrieve the Exception Handler registration node.
928 int EHRegNodeFrameIndex = std::numeric_limits<int>::max();
929 if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo())
930 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex;
931
932 // Make sure that the stack protector comes before the local variables on the
933 // stack.
934 SmallSet<int, 16> ProtectedObjs;
935 if (MFI.hasStackProtectorIndex()) {
936 int StackProtectorFI = MFI.getStackProtectorIndex();
937 StackObjSet LargeArrayObjs;
938 StackObjSet SmallArrayObjs;
939 StackObjSet AddrOfObjs;
940
941 // If we need a stack protector, we need to make sure that
942 // LocalStackSlotPass didn't already allocate a slot for it.
943 // If we are told to use the LocalStackAllocationBlock, the stack protector
944 // is expected to be already pre-allocated.
945 if (MFI.getStackID(StackProtectorFI) != TargetStackID::Default) {
946 // If the stack protector isn't on the default stack then it's up to the
947 // target to set the stack offset.
948 assert(MFI.getObjectOffset(StackProtectorFI) != 0 &&
949 "Offset of stack protector on non-default stack expected to be "
950 "already set.");
951 assert(!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex()) &&
952 "Stack protector on non-default stack expected to not be "
953 "pre-allocated by LocalStackSlotPass.");
954 } else if (!MFI.getUseLocalStackAllocationBlock()) {
955 AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, MaxAlign,
956 Skew);
957 } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) {
958 llvm_unreachable(
959 "Stack protector not pre-allocated by LocalStackSlotPass.");
960 }
961
962 // Assign large stack objects first.
963 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
964 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock())
965 continue;
966 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
967 continue;
968 if (RS && RS->isScavengingFrameIndex((int)i))
969 continue;
970 if (MFI.isDeadObjectIndex(i))
971 continue;
972 if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i)
973 continue;
974 // Only allocate objects on the default stack.
975 if (MFI.getStackID(i) != TargetStackID::Default)
976 continue;
977
978 switch (MFI.getObjectSSPLayout(i)) {
979 case MachineFrameInfo::SSPLK_None:
980 continue;
981 case MachineFrameInfo::SSPLK_SmallArray:
982 SmallArrayObjs.insert(i);
983 continue;
984 case MachineFrameInfo::SSPLK_AddrOf:
985 AddrOfObjs.insert(i);
986 continue;
987 case MachineFrameInfo::SSPLK_LargeArray:
988 LargeArrayObjs.insert(i);
989 continue;
990 }
991 llvm_unreachable("Unexpected SSPLayoutKind.");
992 }
993
994 // We expect **all** the protected stack objects to be pre-allocated by
995 // LocalStackSlotPass. If it turns out that PEI still has to allocate some
996 // of them, we may end up messing up the expected order of the objects.
997 if (MFI.getUseLocalStackAllocationBlock() &&
998 !(LargeArrayObjs.empty() && SmallArrayObjs.empty() &&
999 AddrOfObjs.empty()))
1000 llvm_unreachable("Found protected stack objects not pre-allocated by "
1001 "LocalStackSlotPass.");
1002
1003 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
1004 Offset, MaxAlign, Skew);
1005 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
1006 Offset, MaxAlign, Skew);
1007 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
1008 Offset, MaxAlign, Skew);
1009 }
1010
1011 SmallVector<int, 8> ObjectsToAllocate;
1012
1013 // Then prepare to assign frame offsets to stack objects that are not used to
1014 // spill callee saved registers.
1015 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
1016 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock())
1017 continue;
1018 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
1019 continue;
1020 if (RS && RS->isScavengingFrameIndex((int)i))
1021 continue;
1022 if (MFI.isDeadObjectIndex(i))
1023 continue;
1024 if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i)
1025 continue;
1026 if (ProtectedObjs.count(i))
1027 continue;
1028 // Only allocate objects on the default stack.
1029 if (MFI.getStackID(i) != TargetStackID::Default)
1030 continue;
1031
1032 // Add the objects that we need to allocate to our working set.
1033 ObjectsToAllocate.push_back(i);
1034 }
1035
1036 // Allocate the EH registration node first if one is present.
1037 if (EHRegNodeFrameIndex != std::numeric_limits<int>::max())
1038 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset,
1039 MaxAlign, Skew);
1040
1041 // Give the targets a chance to order the objects the way they like it.
1042 if (MF.getTarget().getOptLevel() != CodeGenOpt::None &&
1043 MF.getTarget().Options.StackSymbolOrdering)
1044 TFI.orderFrameObjects(MF, ObjectsToAllocate);
1045
1046 // Keep track of which bytes in the fixed and callee-save range are used so we
1047 // can use the holes when allocating later stack objects. Only do this if
1048 // stack protector isn't being used and the target requests it and we're
1049 // optimizing.
1050 BitVector StackBytesFree;
1051 if (!ObjectsToAllocate.empty() &&
1052 MF.getTarget().getOptLevel() != CodeGenOpt::None &&
1053 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF))
1054 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex,
1055 FixedCSEnd, StackBytesFree);
1056
1057 // Now walk the objects and actually assign base offsets to them.
1058 for (auto &Object : ObjectsToAllocate)
1059 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign,
1060 StackBytesFree))
1061 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew);
1062
1063 // Make sure the special register scavenging spill slot is closest to the
1064 // stack pointer.
1065 if (RS && !EarlyScavengingSlots) {
1066 SmallVector<int, 2> SFIs;
1067 RS->getScavengingFrameIndices(SFIs);
1068 for (int SFI : SFIs)
1069 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew);
1070 }
1071
1072 if (!TFI.targetHandlesStackFrameRounding()) {
1073 // If we have reserved argument space for call sites in the function
1074 // immediately on entry to the current function, count it as part of the
1075 // overall stack size.
1076 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF))
1077 Offset += MFI.getMaxCallFrameSize();
1078
1079 // Round up the size to a multiple of the alignment. If the function has
1080 // any calls or alloca's, align to the target's StackAlignment value to
1081 // ensure that the callee's frame or the alloca data is suitably aligned;
1082 // otherwise, for leaf functions, align to the TransientStackAlignment
1083 // value.
1084 Align StackAlign;
1085 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() ||
1086 (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0))
1087 StackAlign = TFI.getStackAlign();
1088 else
1089 StackAlign = TFI.getTransientStackAlign();
1090
1091 // If the frame pointer is eliminated, all frame offsets will be relative to
1092 // SP not FP. Align to MaxAlign so this works.
1093 StackAlign = std::max(StackAlign, MaxAlign);
1094 int64_t OffsetBeforeAlignment = Offset;
1095 Offset = alignTo(Offset, StackAlign, Skew);
1096
1097 // If we have increased the offset to fulfill the alignment constrants,
1098 // then the scavenging spill slots may become harder to reach from the
1099 // stack pointer, float them so they stay close.
1100 if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS &&
1101 !EarlyScavengingSlots) {
1102 SmallVector<int, 2> SFIs;
1103 RS->getScavengingFrameIndices(SFIs);
1104 LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs()
1105 << "Adjusting emergency spill slots!\n";);
1106 int64_t Delta = Offset - OffsetBeforeAlignment;
1107 for (int SFI : SFIs) {
1108 LLVM_DEBUG(llvm::dbgs()
1109 << "Adjusting offset of emergency spill slot #" << SFI
1110 << " from " << MFI.getObjectOffset(SFI););
1111 MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta);
1112 LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";);
1113 }
1114 }
1115 }
1116
1117 // Update frame info to pretend that this is part of the stack...
1118 int64_t StackSize = Offset - LocalAreaOffset;
1119 MFI.setStackSize(StackSize);
1120 NumBytesStackSpace += StackSize;
1121 }
1122
1123 /// insertPrologEpilogCode - Scan the function for modified callee saved
1124 /// registers, insert spill code for these callee saved registers, then add
1125 /// prolog and epilog code to the function.
insertPrologEpilogCode(MachineFunction & MF)1126 void PEI::insertPrologEpilogCode(MachineFunction &MF) {
1127 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
1128
1129 // Add prologue to the function...
1130 for (MachineBasicBlock *SaveBlock : SaveBlocks)
1131 TFI.emitPrologue(MF, *SaveBlock);
1132
1133 // Add epilogue to restore the callee-save registers in each exiting block.
1134 for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
1135 TFI.emitEpilogue(MF, *RestoreBlock);
1136
1137 // Zero call used registers before restoring callee-saved registers.
1138 insertZeroCallUsedRegs(MF);
1139
1140 for (MachineBasicBlock *SaveBlock : SaveBlocks)
1141 TFI.inlineStackProbe(MF, *SaveBlock);
1142
1143 // Emit additional code that is required to support segmented stacks, if
1144 // we've been asked for it. This, when linked with a runtime with support
1145 // for segmented stacks (libgcc is one), will result in allocating stack
1146 // space in small chunks instead of one large contiguous block.
1147 if (MF.shouldSplitStack()) {
1148 for (MachineBasicBlock *SaveBlock : SaveBlocks)
1149 TFI.adjustForSegmentedStacks(MF, *SaveBlock);
1150 }
1151
1152 // Emit additional code that is required to explicitly handle the stack in
1153 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The
1154 // approach is rather similar to that of Segmented Stacks, but it uses a
1155 // different conditional check and another BIF for allocating more stack
1156 // space.
1157 if (MF.getFunction().getCallingConv() == CallingConv::HiPE)
1158 for (MachineBasicBlock *SaveBlock : SaveBlocks)
1159 TFI.adjustForHiPEPrologue(MF, *SaveBlock);
1160 }
1161
1162 /// insertZeroCallUsedRegs - Zero out call used registers.
insertZeroCallUsedRegs(MachineFunction & MF)1163 void PEI::insertZeroCallUsedRegs(MachineFunction &MF) {
1164 const Function &F = MF.getFunction();
1165
1166 if (!F.hasFnAttribute("zero-call-used-regs"))
1167 return;
1168
1169 using namespace ZeroCallUsedRegs;
1170
1171 ZeroCallUsedRegsKind ZeroRegsKind =
1172 StringSwitch<ZeroCallUsedRegsKind>(
1173 F.getFnAttribute("zero-call-used-regs").getValueAsString())
1174 .Case("skip", ZeroCallUsedRegsKind::Skip)
1175 .Case("used-gpr-arg", ZeroCallUsedRegsKind::UsedGPRArg)
1176 .Case("used-gpr", ZeroCallUsedRegsKind::UsedGPR)
1177 .Case("used-arg", ZeroCallUsedRegsKind::UsedArg)
1178 .Case("used", ZeroCallUsedRegsKind::Used)
1179 .Case("all-gpr-arg", ZeroCallUsedRegsKind::AllGPRArg)
1180 .Case("all-gpr", ZeroCallUsedRegsKind::AllGPR)
1181 .Case("all-arg", ZeroCallUsedRegsKind::AllArg)
1182 .Case("all", ZeroCallUsedRegsKind::All);
1183
1184 if (ZeroRegsKind == ZeroCallUsedRegsKind::Skip)
1185 return;
1186
1187 const bool OnlyGPR = static_cast<unsigned>(ZeroRegsKind) & ONLY_GPR;
1188 const bool OnlyUsed = static_cast<unsigned>(ZeroRegsKind) & ONLY_USED;
1189 const bool OnlyArg = static_cast<unsigned>(ZeroRegsKind) & ONLY_ARG;
1190
1191 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
1192 const BitVector AllocatableSet(TRI.getAllocatableSet(MF));
1193
1194 // Mark all used registers.
1195 BitVector UsedRegs(TRI.getNumRegs());
1196 if (OnlyUsed)
1197 for (const MachineBasicBlock &MBB : MF)
1198 for (const MachineInstr &MI : MBB)
1199 for (const MachineOperand &MO : MI.operands()) {
1200 if (!MO.isReg())
1201 continue;
1202
1203 MCRegister Reg = MO.getReg();
1204 if (AllocatableSet[Reg] && !MO.isImplicit() &&
1205 (MO.isDef() || MO.isUse()))
1206 UsedRegs.set(Reg);
1207 }
1208
1209 BitVector RegsToZero(TRI.getNumRegs());
1210 for (MCRegister Reg : AllocatableSet.set_bits()) {
1211 // Skip over fixed registers.
1212 if (TRI.isFixedRegister(MF, Reg))
1213 continue;
1214
1215 // Want only general purpose registers.
1216 if (OnlyGPR && !TRI.isGeneralPurposeRegister(MF, Reg))
1217 continue;
1218
1219 // Want only used registers.
1220 if (OnlyUsed && !UsedRegs[Reg])
1221 continue;
1222
1223 // Want only registers used for arguments.
1224 if (OnlyArg && !TRI.isArgumentRegister(MF, Reg))
1225 continue;
1226
1227 RegsToZero.set(Reg);
1228 }
1229
1230 // Don't clear registers that are live when leaving the function.
1231 for (const MachineBasicBlock &MBB : MF)
1232 for (const MachineInstr &MI : MBB.terminators()) {
1233 if (!MI.isReturn())
1234 continue;
1235
1236 for (const auto &MO : MI.operands()) {
1237 if (!MO.isReg())
1238 continue;
1239
1240 MCRegister Reg = MO.getReg();
1241
1242 // This picks up sibling registers (e.q. %al -> %ah).
1243 for (MCRegUnitIterator Unit(Reg, &TRI); Unit.isValid(); ++Unit)
1244 RegsToZero.reset(*Unit);
1245
1246 for (MCPhysReg SReg : TRI.sub_and_superregs_inclusive(Reg))
1247 RegsToZero.reset(SReg);
1248 }
1249 }
1250
1251 // Don't need to clear registers that are used/clobbered by terminating
1252 // instructions.
1253 for (const MachineBasicBlock &MBB : MF) {
1254 if (!MBB.isReturnBlock())
1255 continue;
1256
1257 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
1258 for (MachineBasicBlock::const_iterator I = MBBI, E = MBB.end(); I != E;
1259 ++I) {
1260 for (const MachineOperand &MO : I->operands()) {
1261 if (!MO.isReg())
1262 continue;
1263
1264 for (const MCPhysReg &Reg :
1265 TRI.sub_and_superregs_inclusive(MO.getReg()))
1266 RegsToZero.reset(Reg);
1267 }
1268 }
1269 }
1270
1271 // Don't clear registers that must be preserved.
1272 for (const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF);
1273 MCPhysReg CSReg = *CSRegs; ++CSRegs)
1274 for (MCRegister Reg : TRI.sub_and_superregs_inclusive(CSReg))
1275 RegsToZero.reset(Reg);
1276
1277 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
1278 for (MachineBasicBlock &MBB : MF)
1279 if (MBB.isReturnBlock())
1280 TFI.emitZeroCallUsedRegs(RegsToZero, MBB);
1281 }
1282
1283 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
1284 /// register references and actual offsets.
replaceFrameIndices(MachineFunction & MF)1285 void PEI::replaceFrameIndices(MachineFunction &MF) {
1286 const auto &ST = MF.getSubtarget();
1287 const TargetFrameLowering &TFI = *ST.getFrameLowering();
1288 if (!TFI.needsFrameIndexResolution(MF))
1289 return;
1290
1291 const TargetRegisterInfo *TRI = ST.getRegisterInfo();
1292
1293 // Allow the target to determine this after knowing the frame size.
1294 FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) ||
1295 TRI->requiresFrameIndexReplacementScavenging(MF);
1296
1297 // Store SPAdj at exit of a basic block.
1298 SmallVector<int, 8> SPState;
1299 SPState.resize(MF.getNumBlockIDs());
1300 df_iterator_default_set<MachineBasicBlock*> Reachable;
1301
1302 // Iterate over the reachable blocks in DFS order.
1303 for (auto DFI = df_ext_begin(&MF, Reachable), DFE = df_ext_end(&MF, Reachable);
1304 DFI != DFE; ++DFI) {
1305 int SPAdj = 0;
1306 // Check the exit state of the DFS stack predecessor.
1307 if (DFI.getPathLength() >= 2) {
1308 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
1309 assert(Reachable.count(StackPred) &&
1310 "DFS stack predecessor is already visited.\n");
1311 SPAdj = SPState[StackPred->getNumber()];
1312 }
1313 MachineBasicBlock *BB = *DFI;
1314 replaceFrameIndices(BB, MF, SPAdj);
1315 SPState[BB->getNumber()] = SPAdj;
1316 }
1317
1318 // Handle the unreachable blocks.
1319 for (auto &BB : MF) {
1320 if (Reachable.count(&BB))
1321 // Already handled in DFS traversal.
1322 continue;
1323 int SPAdj = 0;
1324 replaceFrameIndices(&BB, MF, SPAdj);
1325 }
1326 }
1327
replaceFrameIndices(MachineBasicBlock * BB,MachineFunction & MF,int & SPAdj)1328 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF,
1329 int &SPAdj) {
1330 assert(MF.getSubtarget().getRegisterInfo() &&
1331 "getRegisterInfo() must be implemented!");
1332 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1333 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
1334 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
1335
1336 if (RS && FrameIndexEliminationScavenging)
1337 RS->enterBasicBlock(*BB);
1338
1339 bool InsideCallSequence = false;
1340
1341 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
1342 if (TII.isFrameInstr(*I)) {
1343 InsideCallSequence = TII.isFrameSetup(*I);
1344 SPAdj += TII.getSPAdjust(*I);
1345 I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I);
1346 continue;
1347 }
1348
1349 MachineInstr &MI = *I;
1350 bool DoIncr = true;
1351 bool DidFinishLoop = true;
1352 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1353 if (!MI.getOperand(i).isFI())
1354 continue;
1355
1356 // Frame indices in debug values are encoded in a target independent
1357 // way with simply the frame index and offset rather than any
1358 // target-specific addressing mode.
1359 if (MI.isDebugValue()) {
1360 MachineOperand &Op = MI.getOperand(i);
1361 assert(
1362 MI.isDebugOperand(&Op) &&
1363 "Frame indices can only appear as a debug operand in a DBG_VALUE*"
1364 " machine instruction");
1365 Register Reg;
1366 unsigned FrameIdx = Op.getIndex();
1367 unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx);
1368
1369 StackOffset Offset =
1370 TFI->getFrameIndexReference(MF, FrameIdx, Reg);
1371 Op.ChangeToRegister(Reg, false /*isDef*/);
1372
1373 const DIExpression *DIExpr = MI.getDebugExpression();
1374
1375 // If we have a direct DBG_VALUE, and its location expression isn't
1376 // currently complex, then adding an offset will morph it into a
1377 // complex location that is interpreted as being a memory address.
1378 // This changes a pointer-valued variable to dereference that pointer,
1379 // which is incorrect. Fix by adding DW_OP_stack_value.
1380
1381 if (MI.isNonListDebugValue()) {
1382 unsigned PrependFlags = DIExpression::ApplyOffset;
1383 if (!MI.isIndirectDebugValue() && !DIExpr->isComplex())
1384 PrependFlags |= DIExpression::StackValue;
1385
1386 // If we have DBG_VALUE that is indirect and has a Implicit location
1387 // expression need to insert a deref before prepending a Memory
1388 // location expression. Also after doing this we change the DBG_VALUE
1389 // to be direct.
1390 if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) {
1391 SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size};
1392 bool WithStackValue = true;
1393 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue);
1394 // Make the DBG_VALUE direct.
1395 MI.getDebugOffset().ChangeToRegister(0, false);
1396 }
1397 DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset);
1398 } else {
1399 // The debug operand at DebugOpIndex was a frame index at offset
1400 // `Offset`; now the operand has been replaced with the frame
1401 // register, we must add Offset with `register x, plus Offset`.
1402 unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op);
1403 SmallVector<uint64_t, 3> Ops;
1404 TRI.getOffsetOpcodes(Offset, Ops);
1405 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex);
1406 }
1407 MI.getDebugExpressionOp().setMetadata(DIExpr);
1408 continue;
1409 } else if (MI.isDebugPHI()) {
1410 // Allow stack ref to continue onwards.
1411 continue;
1412 }
1413
1414 // TODO: This code should be commoned with the code for
1415 // PATCHPOINT. There's no good reason for the difference in
1416 // implementation other than historical accident. The only
1417 // remaining difference is the unconditional use of the stack
1418 // pointer as the base register.
1419 if (MI.getOpcode() == TargetOpcode::STATEPOINT) {
1420 assert((!MI.isDebugValue() || i == 0) &&
1421 "Frame indicies can only appear as the first operand of a "
1422 "DBG_VALUE machine instruction");
1423 Register Reg;
1424 MachineOperand &Offset = MI.getOperand(i + 1);
1425 StackOffset refOffset = TFI->getFrameIndexReferencePreferSP(
1426 MF, MI.getOperand(i).getIndex(), Reg, /*IgnoreSPUpdates*/ false);
1427 assert(!refOffset.getScalable() &&
1428 "Frame offsets with a scalable component are not supported");
1429 Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj);
1430 MI.getOperand(i).ChangeToRegister(Reg, false /*isDef*/);
1431 continue;
1432 }
1433
1434 // Some instructions (e.g. inline asm instructions) can have
1435 // multiple frame indices and/or cause eliminateFrameIndex
1436 // to insert more than one instruction. We need the register
1437 // scavenger to go through all of these instructions so that
1438 // it can update its register information. We keep the
1439 // iterator at the point before insertion so that we can
1440 // revisit them in full.
1441 bool AtBeginning = (I == BB->begin());
1442 if (!AtBeginning) --I;
1443
1444 // If this instruction has a FrameIndex operand, we need to
1445 // use that target machine register info object to eliminate
1446 // it.
1447 TRI.eliminateFrameIndex(MI, SPAdj, i,
1448 FrameIndexEliminationScavenging ? RS : nullptr);
1449
1450 // Reset the iterator if we were at the beginning of the BB.
1451 if (AtBeginning) {
1452 I = BB->begin();
1453 DoIncr = false;
1454 }
1455
1456 DidFinishLoop = false;
1457 break;
1458 }
1459
1460 // If we are looking at a call sequence, we need to keep track of
1461 // the SP adjustment made by each instruction in the sequence.
1462 // This includes both the frame setup/destroy pseudos (handled above),
1463 // as well as other instructions that have side effects w.r.t the SP.
1464 // Note that this must come after eliminateFrameIndex, because
1465 // if I itself referred to a frame index, we shouldn't count its own
1466 // adjustment.
1467 if (DidFinishLoop && InsideCallSequence)
1468 SPAdj += TII.getSPAdjust(MI);
1469
1470 if (DoIncr && I != BB->end()) ++I;
1471
1472 // Update register states.
1473 if (RS && FrameIndexEliminationScavenging && DidFinishLoop)
1474 RS->forward(MI);
1475 }
1476 }
1477