1 //===-- PPCFrameLowering.cpp - PPC Frame Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the PPC implementation of TargetFrameLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCFrameLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPCInstrBuilder.h"
16 #include "PPCInstrInfo.h"
17 #include "PPCMachineFunctionInfo.h"
18 #include "PPCSubtarget.h"
19 #include "PPCTargetMachine.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/CodeGen/LivePhysRegs.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/RegisterScavenging.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/Target/TargetOptions.h"
30 
31 using namespace llvm;
32 
33 #define DEBUG_TYPE "framelowering"
34 STATISTIC(NumPESpillVSR, "Number of spills to vector in prologue");
35 STATISTIC(NumPEReloadVSR, "Number of reloads from vector in epilogue");
36 STATISTIC(NumPrologProbed, "Number of prologues probed");
37 
38 static cl::opt<bool>
39 EnablePEVectorSpills("ppc-enable-pe-vector-spills",
40                      cl::desc("Enable spills in prologue to vector registers."),
41                      cl::init(false), cl::Hidden);
42 
43 static unsigned computeReturnSaveOffset(const PPCSubtarget &STI) {
44   if (STI.isAIXABI())
45     return STI.isPPC64() ? 16 : 8;
46   // SVR4 ABI:
47   return STI.isPPC64() ? 16 : 4;
48 }
49 
50 static unsigned computeTOCSaveOffset(const PPCSubtarget &STI) {
51   if (STI.isAIXABI())
52     return STI.isPPC64() ? 40 : 20;
53   return STI.isELFv2ABI() ? 24 : 40;
54 }
55 
56 static unsigned computeFramePointerSaveOffset(const PPCSubtarget &STI) {
57   // First slot in the general register save area.
58   return STI.isPPC64() ? -8U : -4U;
59 }
60 
61 static unsigned computeLinkageSize(const PPCSubtarget &STI) {
62   if (STI.isAIXABI() || STI.isPPC64())
63     return (STI.isELFv2ABI() ? 4 : 6) * (STI.isPPC64() ? 8 : 4);
64 
65   // 32-bit SVR4 ABI:
66   return 8;
67 }
68 
69 static unsigned computeBasePointerSaveOffset(const PPCSubtarget &STI) {
70   // Third slot in the general purpose register save area.
71   if (STI.is32BitELFABI() && STI.getTargetMachine().isPositionIndependent())
72     return -12U;
73 
74   // Second slot in the general purpose register save area.
75   return STI.isPPC64() ? -16U : -8U;
76 }
77 
78 static unsigned computeCRSaveOffset(const PPCSubtarget &STI) {
79   return (STI.isAIXABI() && !STI.isPPC64()) ? 4 : 8;
80 }
81 
82 PPCFrameLowering::PPCFrameLowering(const PPCSubtarget &STI)
83     : TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
84                           STI.getPlatformStackAlignment(), 0),
85       Subtarget(STI), ReturnSaveOffset(computeReturnSaveOffset(Subtarget)),
86       TOCSaveOffset(computeTOCSaveOffset(Subtarget)),
87       FramePointerSaveOffset(computeFramePointerSaveOffset(Subtarget)),
88       LinkageSize(computeLinkageSize(Subtarget)),
89       BasePointerSaveOffset(computeBasePointerSaveOffset(Subtarget)),
90       CRSaveOffset(computeCRSaveOffset(Subtarget)) {}
91 
92 // With the SVR4 ABI, callee-saved registers have fixed offsets on the stack.
93 const PPCFrameLowering::SpillSlot *PPCFrameLowering::getCalleeSavedSpillSlots(
94     unsigned &NumEntries) const {
95 
96 // Floating-point register save area offsets.
97 #define CALLEE_SAVED_FPRS \
98       {PPC::F31, -8},     \
99       {PPC::F30, -16},    \
100       {PPC::F29, -24},    \
101       {PPC::F28, -32},    \
102       {PPC::F27, -40},    \
103       {PPC::F26, -48},    \
104       {PPC::F25, -56},    \
105       {PPC::F24, -64},    \
106       {PPC::F23, -72},    \
107       {PPC::F22, -80},    \
108       {PPC::F21, -88},    \
109       {PPC::F20, -96},    \
110       {PPC::F19, -104},   \
111       {PPC::F18, -112},   \
112       {PPC::F17, -120},   \
113       {PPC::F16, -128},   \
114       {PPC::F15, -136},   \
115       {PPC::F14, -144}
116 
117 // 32-bit general purpose register save area offsets shared by ELF and
118 // AIX. AIX has an extra CSR with r13.
119 #define CALLEE_SAVED_GPRS32 \
120       {PPC::R31, -4},       \
121       {PPC::R30, -8},       \
122       {PPC::R29, -12},      \
123       {PPC::R28, -16},      \
124       {PPC::R27, -20},      \
125       {PPC::R26, -24},      \
126       {PPC::R25, -28},      \
127       {PPC::R24, -32},      \
128       {PPC::R23, -36},      \
129       {PPC::R22, -40},      \
130       {PPC::R21, -44},      \
131       {PPC::R20, -48},      \
132       {PPC::R19, -52},      \
133       {PPC::R18, -56},      \
134       {PPC::R17, -60},      \
135       {PPC::R16, -64},      \
136       {PPC::R15, -68},      \
137       {PPC::R14, -72}
138 
139 // 64-bit general purpose register save area offsets.
140 #define CALLEE_SAVED_GPRS64 \
141       {PPC::X31, -8},       \
142       {PPC::X30, -16},      \
143       {PPC::X29, -24},      \
144       {PPC::X28, -32},      \
145       {PPC::X27, -40},      \
146       {PPC::X26, -48},      \
147       {PPC::X25, -56},      \
148       {PPC::X24, -64},      \
149       {PPC::X23, -72},      \
150       {PPC::X22, -80},      \
151       {PPC::X21, -88},      \
152       {PPC::X20, -96},      \
153       {PPC::X19, -104},     \
154       {PPC::X18, -112},     \
155       {PPC::X17, -120},     \
156       {PPC::X16, -128},     \
157       {PPC::X15, -136},     \
158       {PPC::X14, -144}
159 
160 // Vector register save area offsets.
161 #define CALLEE_SAVED_VRS \
162       {PPC::V31, -16},   \
163       {PPC::V30, -32},   \
164       {PPC::V29, -48},   \
165       {PPC::V28, -64},   \
166       {PPC::V27, -80},   \
167       {PPC::V26, -96},   \
168       {PPC::V25, -112},  \
169       {PPC::V24, -128},  \
170       {PPC::V23, -144},  \
171       {PPC::V22, -160},  \
172       {PPC::V21, -176},  \
173       {PPC::V20, -192}
174 
175   // Note that the offsets here overlap, but this is fixed up in
176   // processFunctionBeforeFrameFinalized.
177 
178   static const SpillSlot ELFOffsets32[] = {
179       CALLEE_SAVED_FPRS,
180       CALLEE_SAVED_GPRS32,
181 
182       // CR save area offset.  We map each of the nonvolatile CR fields
183       // to the slot for CR2, which is the first of the nonvolatile CR
184       // fields to be assigned, so that we only allocate one save slot.
185       // See PPCRegisterInfo::hasReservedSpillSlot() for more information.
186       {PPC::CR2, -4},
187 
188       // VRSAVE save area offset.
189       {PPC::VRSAVE, -4},
190 
191       CALLEE_SAVED_VRS,
192 
193       // SPE register save area (overlaps Vector save area).
194       {PPC::S31, -8},
195       {PPC::S30, -16},
196       {PPC::S29, -24},
197       {PPC::S28, -32},
198       {PPC::S27, -40},
199       {PPC::S26, -48},
200       {PPC::S25, -56},
201       {PPC::S24, -64},
202       {PPC::S23, -72},
203       {PPC::S22, -80},
204       {PPC::S21, -88},
205       {PPC::S20, -96},
206       {PPC::S19, -104},
207       {PPC::S18, -112},
208       {PPC::S17, -120},
209       {PPC::S16, -128},
210       {PPC::S15, -136},
211       {PPC::S14, -144}};
212 
213   static const SpillSlot ELFOffsets64[] = {
214       CALLEE_SAVED_FPRS,
215       CALLEE_SAVED_GPRS64,
216 
217       // VRSAVE save area offset.
218       {PPC::VRSAVE, -4},
219       CALLEE_SAVED_VRS
220   };
221 
222   static const SpillSlot AIXOffsets32[] = {CALLEE_SAVED_FPRS,
223                                            CALLEE_SAVED_GPRS32,
224                                            // Add AIX's extra CSR.
225                                            {PPC::R13, -76},
226                                            CALLEE_SAVED_VRS};
227 
228   static const SpillSlot AIXOffsets64[] = {
229       CALLEE_SAVED_FPRS, CALLEE_SAVED_GPRS64, CALLEE_SAVED_VRS};
230 
231   if (Subtarget.is64BitELFABI()) {
232     NumEntries = array_lengthof(ELFOffsets64);
233     return ELFOffsets64;
234   }
235 
236   if (Subtarget.is32BitELFABI()) {
237     NumEntries = array_lengthof(ELFOffsets32);
238     return ELFOffsets32;
239   }
240 
241   assert(Subtarget.isAIXABI() && "Unexpected ABI.");
242 
243   if (Subtarget.isPPC64()) {
244     NumEntries = array_lengthof(AIXOffsets64);
245     return AIXOffsets64;
246   }
247 
248   NumEntries = array_lengthof(AIXOffsets32);
249   return AIXOffsets32;
250 }
251 
252 static bool spillsCR(const MachineFunction &MF) {
253   const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
254   return FuncInfo->isCRSpilled();
255 }
256 
257 static bool hasSpills(const MachineFunction &MF) {
258   const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
259   return FuncInfo->hasSpills();
260 }
261 
262 static bool hasNonRISpills(const MachineFunction &MF) {
263   const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
264   return FuncInfo->hasNonRISpills();
265 }
266 
267 /// MustSaveLR - Return true if this function requires that we save the LR
268 /// register onto the stack in the prolog and restore it in the epilog of the
269 /// function.
270 static bool MustSaveLR(const MachineFunction &MF, unsigned LR) {
271   const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>();
272 
273   // We need a save/restore of LR if there is any def of LR (which is
274   // defined by calls, including the PIC setup sequence), or if there is
275   // some use of the LR stack slot (e.g. for builtin_return_address).
276   // (LR comes in 32 and 64 bit versions.)
277   MachineRegisterInfo::def_iterator RI = MF.getRegInfo().def_begin(LR);
278   return RI !=MF.getRegInfo().def_end() || MFI->isLRStoreRequired();
279 }
280 
281 /// determineFrameLayoutAndUpdate - Determine the size of the frame and maximum
282 /// call frame size. Update the MachineFunction object with the stack size.
283 uint64_t
284 PPCFrameLowering::determineFrameLayoutAndUpdate(MachineFunction &MF,
285                                                 bool UseEstimate) const {
286   unsigned NewMaxCallFrameSize = 0;
287   uint64_t FrameSize = determineFrameLayout(MF, UseEstimate,
288                                             &NewMaxCallFrameSize);
289   MF.getFrameInfo().setStackSize(FrameSize);
290   MF.getFrameInfo().setMaxCallFrameSize(NewMaxCallFrameSize);
291   return FrameSize;
292 }
293 
294 /// determineFrameLayout - Determine the size of the frame and maximum call
295 /// frame size.
296 uint64_t
297 PPCFrameLowering::determineFrameLayout(const MachineFunction &MF,
298                                        bool UseEstimate,
299                                        unsigned *NewMaxCallFrameSize) const {
300   const MachineFrameInfo &MFI = MF.getFrameInfo();
301   const PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
302 
303   // Get the number of bytes to allocate from the FrameInfo
304   uint64_t FrameSize =
305     UseEstimate ? MFI.estimateStackSize(MF) : MFI.getStackSize();
306 
307   // Get stack alignments. The frame must be aligned to the greatest of these:
308   Align TargetAlign = getStackAlign(); // alignment required per the ABI
309   Align MaxAlign = MFI.getMaxAlign();  // algmt required by data in frame
310   Align Alignment = std::max(TargetAlign, MaxAlign);
311 
312   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
313 
314   unsigned LR = RegInfo->getRARegister();
315   bool DisableRedZone = MF.getFunction().hasFnAttribute(Attribute::NoRedZone);
316   bool CanUseRedZone = !MFI.hasVarSizedObjects() && // No dynamic alloca.
317                        !MFI.adjustsStack() &&       // No calls.
318                        !MustSaveLR(MF, LR) &&       // No need to save LR.
319                        !FI->mustSaveTOC() &&        // No need to save TOC.
320                        !RegInfo->hasBasePointer(MF); // No special alignment.
321 
322   // Note: for PPC32 SVR4ABI, we can still generate stackless
323   // code if all local vars are reg-allocated.
324   bool FitsInRedZone = FrameSize <= Subtarget.getRedZoneSize();
325 
326   // Check whether we can skip adjusting the stack pointer (by using red zone)
327   if (!DisableRedZone && CanUseRedZone && FitsInRedZone) {
328     // No need for frame
329     return 0;
330   }
331 
332   // Get the maximum call frame size of all the calls.
333   unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
334 
335   // Maximum call frame needs to be at least big enough for linkage area.
336   unsigned minCallFrameSize = getLinkageSize();
337   maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize);
338 
339   // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
340   // that allocations will be aligned.
341   if (MFI.hasVarSizedObjects())
342     maxCallFrameSize = alignTo(maxCallFrameSize, Alignment);
343 
344   // Update the new max call frame size if the caller passes in a valid pointer.
345   if (NewMaxCallFrameSize)
346     *NewMaxCallFrameSize = maxCallFrameSize;
347 
348   // Include call frame size in total.
349   FrameSize += maxCallFrameSize;
350 
351   // Make sure the frame is aligned.
352   FrameSize = alignTo(FrameSize, Alignment);
353 
354   return FrameSize;
355 }
356 
357 // hasFP - Return true if the specified function actually has a dedicated frame
358 // pointer register.
359 bool PPCFrameLowering::hasFP(const MachineFunction &MF) const {
360   const MachineFrameInfo &MFI = MF.getFrameInfo();
361   // FIXME: This is pretty much broken by design: hasFP() might be called really
362   // early, before the stack layout was calculated and thus hasFP() might return
363   // true or false here depending on the time of call.
364   return (MFI.getStackSize()) && needsFP(MF);
365 }
366 
367 // needsFP - Return true if the specified function should have a dedicated frame
368 // pointer register.  This is true if the function has variable sized allocas or
369 // if frame pointer elimination is disabled.
370 bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
371   const MachineFrameInfo &MFI = MF.getFrameInfo();
372 
373   // Naked functions have no stack frame pushed, so we don't have a frame
374   // pointer.
375   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
376     return false;
377 
378   return MF.getTarget().Options.DisableFramePointerElim(MF) ||
379          MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint() ||
380          MF.exposesReturnsTwice() ||
381          (MF.getTarget().Options.GuaranteedTailCallOpt &&
382           MF.getInfo<PPCFunctionInfo>()->hasFastCall());
383 }
384 
385 void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const {
386   bool is31 = needsFP(MF);
387   unsigned FPReg  = is31 ? PPC::R31 : PPC::R1;
388   unsigned FP8Reg = is31 ? PPC::X31 : PPC::X1;
389 
390   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
391   bool HasBP = RegInfo->hasBasePointer(MF);
392   unsigned BPReg  = HasBP ? (unsigned) RegInfo->getBaseRegister(MF) : FPReg;
393   unsigned BP8Reg = HasBP ? (unsigned) PPC::X30 : FP8Reg;
394 
395   for (MachineBasicBlock &MBB : MF)
396     for (MachineBasicBlock::iterator MBBI = MBB.end(); MBBI != MBB.begin();) {
397       --MBBI;
398       for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) {
399         MachineOperand &MO = MBBI->getOperand(I);
400         if (!MO.isReg())
401           continue;
402 
403         switch (MO.getReg()) {
404         case PPC::FP:
405           MO.setReg(FPReg);
406           break;
407         case PPC::FP8:
408           MO.setReg(FP8Reg);
409           break;
410         case PPC::BP:
411           MO.setReg(BPReg);
412           break;
413         case PPC::BP8:
414           MO.setReg(BP8Reg);
415           break;
416 
417         }
418       }
419     }
420 }
421 
422 /*  This function will do the following:
423     - If MBB is an entry or exit block, set SR1 and SR2 to R0 and R12
424       respectively (defaults recommended by the ABI) and return true
425     - If MBB is not an entry block, initialize the register scavenger and look
426       for available registers.
427     - If the defaults (R0/R12) are available, return true
428     - If TwoUniqueRegsRequired is set to true, it looks for two unique
429       registers. Otherwise, look for a single available register.
430       - If the required registers are found, set SR1 and SR2 and return true.
431       - If the required registers are not found, set SR2 or both SR1 and SR2 to
432         PPC::NoRegister and return false.
433 
434     Note that if both SR1 and SR2 are valid parameters and TwoUniqueRegsRequired
435     is not set, this function will attempt to find two different registers, but
436     still return true if only one register is available (and set SR1 == SR2).
437 */
438 bool
439 PPCFrameLowering::findScratchRegister(MachineBasicBlock *MBB,
440                                       bool UseAtEnd,
441                                       bool TwoUniqueRegsRequired,
442                                       Register *SR1,
443                                       Register *SR2) const {
444   RegScavenger RS;
445   Register R0 =  Subtarget.isPPC64() ? PPC::X0 : PPC::R0;
446   Register R12 = Subtarget.isPPC64() ? PPC::X12 : PPC::R12;
447 
448   // Set the defaults for the two scratch registers.
449   if (SR1)
450     *SR1 = R0;
451 
452   if (SR2) {
453     assert (SR1 && "Asking for the second scratch register but not the first?");
454     *SR2 = R12;
455   }
456 
457   // If MBB is an entry or exit block, use R0 and R12 as the scratch registers.
458   if ((UseAtEnd && MBB->isReturnBlock()) ||
459       (!UseAtEnd && (&MBB->getParent()->front() == MBB)))
460     return true;
461 
462   RS.enterBasicBlock(*MBB);
463 
464   if (UseAtEnd && !MBB->empty()) {
465     // The scratch register will be used at the end of the block, so must
466     // consider all registers used within the block
467 
468     MachineBasicBlock::iterator MBBI = MBB->getFirstTerminator();
469     // If no terminator, back iterator up to previous instruction.
470     if (MBBI == MBB->end())
471       MBBI = std::prev(MBBI);
472 
473     if (MBBI != MBB->begin())
474       RS.forward(MBBI);
475   }
476 
477   // If the two registers are available, we're all good.
478   // Note that we only return here if both R0 and R12 are available because
479   // although the function may not require two unique registers, it may benefit
480   // from having two so we should try to provide them.
481   if (!RS.isRegUsed(R0) && !RS.isRegUsed(R12))
482     return true;
483 
484   // Get the list of callee-saved registers for the target.
485   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
486   const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(MBB->getParent());
487 
488   // Get all the available registers in the block.
489   BitVector BV = RS.getRegsAvailable(Subtarget.isPPC64() ? &PPC::G8RCRegClass :
490                                      &PPC::GPRCRegClass);
491 
492   // We shouldn't use callee-saved registers as scratch registers as they may be
493   // available when looking for a candidate block for shrink wrapping but not
494   // available when the actual prologue/epilogue is being emitted because they
495   // were added as live-in to the prologue block by PrologueEpilogueInserter.
496   for (int i = 0; CSRegs[i]; ++i)
497     BV.reset(CSRegs[i]);
498 
499   // Set the first scratch register to the first available one.
500   if (SR1) {
501     int FirstScratchReg = BV.find_first();
502     *SR1 = FirstScratchReg == -1 ? (unsigned)PPC::NoRegister : FirstScratchReg;
503   }
504 
505   // If there is another one available, set the second scratch register to that.
506   // Otherwise, set it to either PPC::NoRegister if this function requires two
507   // or to whatever SR1 is set to if this function doesn't require two.
508   if (SR2) {
509     int SecondScratchReg = BV.find_next(*SR1);
510     if (SecondScratchReg != -1)
511       *SR2 = SecondScratchReg;
512     else
513       *SR2 = TwoUniqueRegsRequired ? Register() : *SR1;
514   }
515 
516   // Now that we've done our best to provide both registers, double check
517   // whether we were unable to provide enough.
518   if (BV.count() < (TwoUniqueRegsRequired ? 2U : 1U))
519     return false;
520 
521   return true;
522 }
523 
524 // We need a scratch register for spilling LR and for spilling CR. By default,
525 // we use two scratch registers to hide latency. However, if only one scratch
526 // register is available, we can adjust for that by not overlapping the spill
527 // code. However, if we need to realign the stack (i.e. have a base pointer)
528 // and the stack frame is large, we need two scratch registers.
529 // Also, stack probe requires two scratch registers, one for old sp, one for
530 // large frame and large probe size.
531 bool
532 PPCFrameLowering::twoUniqueScratchRegsRequired(MachineBasicBlock *MBB) const {
533   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
534   MachineFunction &MF = *(MBB->getParent());
535   bool HasBP = RegInfo->hasBasePointer(MF);
536   unsigned FrameSize = determineFrameLayout(MF);
537   int NegFrameSize = -FrameSize;
538   bool IsLargeFrame = !isInt<16>(NegFrameSize);
539   MachineFrameInfo &MFI = MF.getFrameInfo();
540   Align MaxAlign = MFI.getMaxAlign();
541   bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI();
542   const PPCTargetLowering &TLI = *Subtarget.getTargetLowering();
543 
544   return ((IsLargeFrame || !HasRedZone) && HasBP && MaxAlign > 1) ||
545          TLI.hasInlineStackProbe(MF);
546 }
547 
548 bool PPCFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
549   MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
550 
551   return findScratchRegister(TmpMBB, false,
552                              twoUniqueScratchRegsRequired(TmpMBB));
553 }
554 
555 bool PPCFrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
556   MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
557 
558   return findScratchRegister(TmpMBB, true);
559 }
560 
561 bool PPCFrameLowering::stackUpdateCanBeMoved(MachineFunction &MF) const {
562   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
563   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
564 
565   // Abort if there is no register info or function info.
566   if (!RegInfo || !FI)
567     return false;
568 
569   // Only move the stack update on ELFv2 ABI and PPC64.
570   if (!Subtarget.isELFv2ABI() || !Subtarget.isPPC64())
571     return false;
572 
573   // Check the frame size first and return false if it does not fit the
574   // requirements.
575   // We need a non-zero frame size as well as a frame that will fit in the red
576   // zone. This is because by moving the stack pointer update we are now storing
577   // to the red zone until the stack pointer is updated. If we get an interrupt
578   // inside the prologue but before the stack update we now have a number of
579   // stores to the red zone and those stores must all fit.
580   MachineFrameInfo &MFI = MF.getFrameInfo();
581   unsigned FrameSize = MFI.getStackSize();
582   if (!FrameSize || FrameSize > Subtarget.getRedZoneSize())
583     return false;
584 
585   // Frame pointers and base pointers complicate matters so don't do anything
586   // if we have them. For example having a frame pointer will sometimes require
587   // a copy of r1 into r31 and that makes keeping track of updates to r1 more
588   // difficult. Similar situation exists with setjmp.
589   if (hasFP(MF) || RegInfo->hasBasePointer(MF) || MF.exposesReturnsTwice())
590     return false;
591 
592   // Calls to fast_cc functions use different rules for passing parameters on
593   // the stack from the ABI and using PIC base in the function imposes
594   // similar restrictions to using the base pointer. It is not generally safe
595   // to move the stack pointer update in these situations.
596   if (FI->hasFastCall() || FI->usesPICBase())
597     return false;
598 
599   // Finally we can move the stack update if we do not require register
600   // scavenging. Register scavenging can introduce more spills and so
601   // may make the frame size larger than we have computed.
602   return !RegInfo->requiresFrameIndexScavenging(MF);
603 }
604 
605 void PPCFrameLowering::emitPrologue(MachineFunction &MF,
606                                     MachineBasicBlock &MBB) const {
607   MachineBasicBlock::iterator MBBI = MBB.begin();
608   MachineFrameInfo &MFI = MF.getFrameInfo();
609   const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
610   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
611   const PPCTargetLowering &TLI = *Subtarget.getTargetLowering();
612 
613   MachineModuleInfo &MMI = MF.getMMI();
614   const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
615   DebugLoc dl;
616   // AIX assembler does not support cfi directives.
617   const bool needsCFI = MF.needsFrameMoves() && !Subtarget.isAIXABI();
618 
619   // Get processor type.
620   bool isPPC64 = Subtarget.isPPC64();
621   // Get the ABI.
622   bool isSVR4ABI = Subtarget.isSVR4ABI();
623   bool isELFv2ABI = Subtarget.isELFv2ABI();
624   assert((isSVR4ABI || Subtarget.isAIXABI()) && "Unsupported PPC ABI.");
625 
626   // Work out frame sizes.
627   uint64_t FrameSize = determineFrameLayoutAndUpdate(MF);
628   int64_t NegFrameSize = -FrameSize;
629   if (!isInt<32>(FrameSize) || !isInt<32>(NegFrameSize))
630     llvm_unreachable("Unhandled stack size!");
631 
632   if (MFI.isFrameAddressTaken())
633     replaceFPWithRealFP(MF);
634 
635   // Check if the link register (LR) must be saved.
636   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
637   bool MustSaveLR = FI->mustSaveLR();
638   bool MustSaveTOC = FI->mustSaveTOC();
639   const SmallVectorImpl<Register> &MustSaveCRs = FI->getMustSaveCRs();
640   bool MustSaveCR = !MustSaveCRs.empty();
641   // Do we have a frame pointer and/or base pointer for this function?
642   bool HasFP = hasFP(MF);
643   bool HasBP = RegInfo->hasBasePointer(MF);
644   bool HasRedZone = isPPC64 || !isSVR4ABI;
645   bool HasROPProtect = Subtarget.hasROPProtect();
646   bool HasPrivileged = Subtarget.hasPrivileged();
647 
648   Register SPReg       = isPPC64 ? PPC::X1  : PPC::R1;
649   Register BPReg = RegInfo->getBaseRegister(MF);
650   Register FPReg       = isPPC64 ? PPC::X31 : PPC::R31;
651   Register LRReg       = isPPC64 ? PPC::LR8 : PPC::LR;
652   Register TOCReg      = isPPC64 ? PPC::X2 :  PPC::R2;
653   Register ScratchReg;
654   Register TempReg     = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg
655   //  ...(R12/X12 is volatile in both Darwin & SVR4, & can't be a function arg.)
656   const MCInstrDesc& MFLRInst = TII.get(isPPC64 ? PPC::MFLR8
657                                                 : PPC::MFLR );
658   const MCInstrDesc& StoreInst = TII.get(isPPC64 ? PPC::STD
659                                                  : PPC::STW );
660   const MCInstrDesc& StoreUpdtInst = TII.get(isPPC64 ? PPC::STDU
661                                                      : PPC::STWU );
662   const MCInstrDesc& StoreUpdtIdxInst = TII.get(isPPC64 ? PPC::STDUX
663                                                         : PPC::STWUX);
664   const MCInstrDesc& LoadImmShiftedInst = TII.get(isPPC64 ? PPC::LIS8
665                                                           : PPC::LIS );
666   const MCInstrDesc& OrImmInst = TII.get(isPPC64 ? PPC::ORI8
667                                                  : PPC::ORI );
668   const MCInstrDesc& OrInst = TII.get(isPPC64 ? PPC::OR8
669                                               : PPC::OR );
670   const MCInstrDesc& SubtractCarryingInst = TII.get(isPPC64 ? PPC::SUBFC8
671                                                             : PPC::SUBFC);
672   const MCInstrDesc& SubtractImmCarryingInst = TII.get(isPPC64 ? PPC::SUBFIC8
673                                                                : PPC::SUBFIC);
674   const MCInstrDesc &MoveFromCondRegInst = TII.get(isPPC64 ? PPC::MFCR8
675                                                            : PPC::MFCR);
676   const MCInstrDesc &StoreWordInst = TII.get(isPPC64 ? PPC::STW8 : PPC::STW);
677   const MCInstrDesc &HashST =
678       TII.get(isPPC64 ? (HasPrivileged ? PPC::HASHSTP8 : PPC::HASHST8)
679                       : (HasPrivileged ? PPC::HASHSTP : PPC::HASHST));
680 
681   // Regarding this assert: Even though LR is saved in the caller's frame (i.e.,
682   // LROffset is positive), that slot is callee-owned. Because PPC32 SVR4 has no
683   // Red Zone, an asynchronous event (a form of "callee") could claim a frame &
684   // overwrite it, so PPC32 SVR4 must claim at least a minimal frame to save LR.
685   assert((isPPC64 || !isSVR4ABI || !(!FrameSize && (MustSaveLR || HasFP))) &&
686          "FrameSize must be >0 to save/restore the FP or LR for 32-bit SVR4.");
687 
688   // Using the same bool variable as below to suppress compiler warnings.
689   bool SingleScratchReg = findScratchRegister(
690       &MBB, false, twoUniqueScratchRegsRequired(&MBB), &ScratchReg, &TempReg);
691   assert(SingleScratchReg &&
692          "Required number of registers not available in this block");
693 
694   SingleScratchReg = ScratchReg == TempReg;
695 
696   int64_t LROffset = getReturnSaveOffset();
697 
698   int64_t FPOffset = 0;
699   if (HasFP) {
700     MachineFrameInfo &MFI = MF.getFrameInfo();
701     int FPIndex = FI->getFramePointerSaveIndex();
702     assert(FPIndex && "No Frame Pointer Save Slot!");
703     FPOffset = MFI.getObjectOffset(FPIndex);
704   }
705 
706   int64_t BPOffset = 0;
707   if (HasBP) {
708     MachineFrameInfo &MFI = MF.getFrameInfo();
709     int BPIndex = FI->getBasePointerSaveIndex();
710     assert(BPIndex && "No Base Pointer Save Slot!");
711     BPOffset = MFI.getObjectOffset(BPIndex);
712   }
713 
714   int64_t PBPOffset = 0;
715   if (FI->usesPICBase()) {
716     MachineFrameInfo &MFI = MF.getFrameInfo();
717     int PBPIndex = FI->getPICBasePointerSaveIndex();
718     assert(PBPIndex && "No PIC Base Pointer Save Slot!");
719     PBPOffset = MFI.getObjectOffset(PBPIndex);
720   }
721 
722   // Get stack alignments.
723   Align MaxAlign = MFI.getMaxAlign();
724   if (HasBP && MaxAlign > 1)
725     assert(Log2(MaxAlign) < 16 && "Invalid alignment!");
726 
727   // Frames of 32KB & larger require special handling because they cannot be
728   // indexed into with a simple STDU/STWU/STD/STW immediate offset operand.
729   bool isLargeFrame = !isInt<16>(NegFrameSize);
730 
731   // Check if we can move the stack update instruction (stdu) down the prologue
732   // past the callee saves. Hopefully this will avoid the situation where the
733   // saves are waiting for the update on the store with update to complete.
734   MachineBasicBlock::iterator StackUpdateLoc = MBBI;
735   bool MovingStackUpdateDown = false;
736 
737   // Check if we can move the stack update.
738   if (stackUpdateCanBeMoved(MF)) {
739     const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo();
740     for (CalleeSavedInfo CSI : Info) {
741       // If the callee saved register is spilled to a register instead of the
742       // stack then the spill no longer uses the stack pointer.
743       // This can lead to two consequences:
744       // 1) We no longer need to update the stack because the function does not
745       //    spill any callee saved registers to stack.
746       // 2) We have a situation where we still have to update the stack pointer
747       //    even though some registers are spilled to other registers. In
748       //    this case the current code moves the stack update to an incorrect
749       //    position.
750       // In either case we should abort moving the stack update operation.
751       if (CSI.isSpilledToReg()) {
752         StackUpdateLoc = MBBI;
753         MovingStackUpdateDown = false;
754         break;
755       }
756 
757       int FrIdx = CSI.getFrameIdx();
758       // If the frame index is not negative the callee saved info belongs to a
759       // stack object that is not a fixed stack object. We ignore non-fixed
760       // stack objects because we won't move the stack update pointer past them.
761       if (FrIdx >= 0)
762         continue;
763 
764       if (MFI.isFixedObjectIndex(FrIdx) && MFI.getObjectOffset(FrIdx) < 0) {
765         StackUpdateLoc++;
766         MovingStackUpdateDown = true;
767       } else {
768         // We need all of the Frame Indices to meet these conditions.
769         // If they do not, abort the whole operation.
770         StackUpdateLoc = MBBI;
771         MovingStackUpdateDown = false;
772         break;
773       }
774     }
775 
776     // If the operation was not aborted then update the object offset.
777     if (MovingStackUpdateDown) {
778       for (CalleeSavedInfo CSI : Info) {
779         int FrIdx = CSI.getFrameIdx();
780         if (FrIdx < 0)
781           MFI.setObjectOffset(FrIdx, MFI.getObjectOffset(FrIdx) + NegFrameSize);
782       }
783     }
784   }
785 
786   // Where in the prologue we move the CR fields depends on how many scratch
787   // registers we have, and if we need to save the link register or not. This
788   // lambda is to avoid duplicating the logic in 2 places.
789   auto BuildMoveFromCR = [&]() {
790     if (isELFv2ABI && MustSaveCRs.size() == 1) {
791     // In the ELFv2 ABI, we are not required to save all CR fields.
792     // If only one CR field is clobbered, it is more efficient to use
793     // mfocrf to selectively save just that field, because mfocrf has short
794     // latency compares to mfcr.
795       assert(isPPC64 && "V2 ABI is 64-bit only.");
796       MachineInstrBuilder MIB =
797           BuildMI(MBB, MBBI, dl, TII.get(PPC::MFOCRF8), TempReg);
798       MIB.addReg(MustSaveCRs[0], RegState::Kill);
799     } else {
800       MachineInstrBuilder MIB =
801           BuildMI(MBB, MBBI, dl, MoveFromCondRegInst, TempReg);
802       for (unsigned CRfield : MustSaveCRs)
803         MIB.addReg(CRfield, RegState::ImplicitKill);
804     }
805   };
806 
807   // If we need to spill the CR and the LR but we don't have two separate
808   // registers available, we must spill them one at a time
809   if (MustSaveCR && SingleScratchReg && MustSaveLR) {
810     BuildMoveFromCR();
811     BuildMI(MBB, MBBI, dl, StoreWordInst)
812         .addReg(TempReg, getKillRegState(true))
813         .addImm(CRSaveOffset)
814         .addReg(SPReg);
815   }
816 
817   if (MustSaveLR)
818     BuildMI(MBB, MBBI, dl, MFLRInst, ScratchReg);
819 
820   if (MustSaveCR && !(SingleScratchReg && MustSaveLR))
821     BuildMoveFromCR();
822 
823   if (HasRedZone) {
824     if (HasFP)
825       BuildMI(MBB, MBBI, dl, StoreInst)
826         .addReg(FPReg)
827         .addImm(FPOffset)
828         .addReg(SPReg);
829     if (FI->usesPICBase())
830       BuildMI(MBB, MBBI, dl, StoreInst)
831         .addReg(PPC::R30)
832         .addImm(PBPOffset)
833         .addReg(SPReg);
834     if (HasBP)
835       BuildMI(MBB, MBBI, dl, StoreInst)
836         .addReg(BPReg)
837         .addImm(BPOffset)
838         .addReg(SPReg);
839   }
840 
841   // Generate the instruction to store the LR. In the case where ROP protection
842   // is required the register holding the LR should not be killed as it will be
843   // used by the hash store instruction.
844   if (MustSaveLR) {
845     BuildMI(MBB, StackUpdateLoc, dl, StoreInst)
846         .addReg(ScratchReg, getKillRegState(!HasROPProtect))
847         .addImm(LROffset)
848         .addReg(SPReg);
849 
850     // Add the ROP protection Hash Store instruction.
851     // NOTE: This is technically a violation of the ABI. The hash can be saved
852     // up to 512 bytes into the Protected Zone. This can be outside of the
853     // initial 288 byte volatile program storage region in the Protected Zone.
854     // However, this restriction will be removed in an upcoming revision of the
855     // ABI.
856     if (HasROPProtect) {
857       const int SaveIndex = FI->getROPProtectionHashSaveIndex();
858       const int64_t ImmOffset = MFI.getObjectOffset(SaveIndex);
859       assert((ImmOffset <= -8 && ImmOffset >= -512) &&
860              "ROP hash save offset out of range.");
861       assert(((ImmOffset & 0x7) == 0) &&
862              "ROP hash save offset must be 8 byte aligned.");
863       BuildMI(MBB, StackUpdateLoc, dl, HashST)
864           .addReg(ScratchReg, getKillRegState(true))
865           .addImm(ImmOffset)
866           .addReg(SPReg);
867     }
868   }
869 
870   if (MustSaveCR &&
871       !(SingleScratchReg && MustSaveLR)) {
872     assert(HasRedZone && "A red zone is always available on PPC64");
873     BuildMI(MBB, MBBI, dl, StoreWordInst)
874       .addReg(TempReg, getKillRegState(true))
875       .addImm(CRSaveOffset)
876       .addReg(SPReg);
877   }
878 
879   // Skip the rest if this is a leaf function & all spills fit in the Red Zone.
880   if (!FrameSize)
881     return;
882 
883   // Adjust stack pointer: r1 += NegFrameSize.
884   // If there is a preferred stack alignment, align R1 now
885 
886   if (HasBP && HasRedZone) {
887     // Save a copy of r1 as the base pointer.
888     BuildMI(MBB, MBBI, dl, OrInst, BPReg)
889       .addReg(SPReg)
890       .addReg(SPReg);
891   }
892 
893   // Have we generated a STUX instruction to claim stack frame? If so,
894   // the negated frame size will be placed in ScratchReg.
895   bool HasSTUX = false;
896 
897   // If FrameSize <= TLI.getStackProbeSize(MF), as POWER ABI requires backchain
898   // pointer is always stored at SP, we will get a free probe due to an essential
899   // STU(X) instruction.
900   if (TLI.hasInlineStackProbe(MF) && FrameSize > TLI.getStackProbeSize(MF)) {
901     // To be consistent with other targets, a pseudo instruction is emitted and
902     // will be later expanded in `inlineStackProbe`.
903     BuildMI(MBB, MBBI, dl,
904             TII.get(isPPC64 ? PPC::PROBED_STACKALLOC_64
905                             : PPC::PROBED_STACKALLOC_32))
906         .addDef(TempReg)
907         .addDef(ScratchReg) // ScratchReg stores the old sp.
908         .addImm(NegFrameSize);
909     // FIXME: HasSTUX is only read if HasRedZone is not set, in such case, we
910     // update the ScratchReg to meet the assumption that ScratchReg contains
911     // the NegFrameSize. This solution is rather tricky.
912     if (!HasRedZone) {
913       BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBF), ScratchReg)
914           .addReg(ScratchReg)
915           .addReg(SPReg);
916       HasSTUX = true;
917     }
918   } else {
919     // This condition must be kept in sync with canUseAsPrologue.
920     if (HasBP && MaxAlign > 1) {
921       if (isPPC64)
922         BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), ScratchReg)
923             .addReg(SPReg)
924             .addImm(0)
925             .addImm(64 - Log2(MaxAlign));
926       else // PPC32...
927         BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), ScratchReg)
928             .addReg(SPReg)
929             .addImm(0)
930             .addImm(32 - Log2(MaxAlign))
931             .addImm(31);
932       if (!isLargeFrame) {
933         BuildMI(MBB, MBBI, dl, SubtractImmCarryingInst, ScratchReg)
934             .addReg(ScratchReg, RegState::Kill)
935             .addImm(NegFrameSize);
936       } else {
937         assert(!SingleScratchReg && "Only a single scratch reg available");
938         BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, TempReg)
939             .addImm(NegFrameSize >> 16);
940         BuildMI(MBB, MBBI, dl, OrImmInst, TempReg)
941             .addReg(TempReg, RegState::Kill)
942             .addImm(NegFrameSize & 0xFFFF);
943         BuildMI(MBB, MBBI, dl, SubtractCarryingInst, ScratchReg)
944             .addReg(ScratchReg, RegState::Kill)
945             .addReg(TempReg, RegState::Kill);
946       }
947 
948       BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg)
949           .addReg(SPReg, RegState::Kill)
950           .addReg(SPReg)
951           .addReg(ScratchReg);
952       HasSTUX = true;
953 
954     } else if (!isLargeFrame) {
955       BuildMI(MBB, StackUpdateLoc, dl, StoreUpdtInst, SPReg)
956           .addReg(SPReg)
957           .addImm(NegFrameSize)
958           .addReg(SPReg);
959 
960     } else {
961       BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
962           .addImm(NegFrameSize >> 16);
963       BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
964           .addReg(ScratchReg, RegState::Kill)
965           .addImm(NegFrameSize & 0xFFFF);
966       BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg)
967           .addReg(SPReg, RegState::Kill)
968           .addReg(SPReg)
969           .addReg(ScratchReg);
970       HasSTUX = true;
971     }
972   }
973 
974   // Save the TOC register after the stack pointer update if a prologue TOC
975   // save is required for the function.
976   if (MustSaveTOC) {
977     assert(isELFv2ABI && "TOC saves in the prologue only supported on ELFv2");
978     BuildMI(MBB, StackUpdateLoc, dl, TII.get(PPC::STD))
979       .addReg(TOCReg, getKillRegState(true))
980       .addImm(TOCSaveOffset)
981       .addReg(SPReg);
982   }
983 
984   if (!HasRedZone) {
985     assert(!isPPC64 && "A red zone is always available on PPC64");
986     if (HasSTUX) {
987       // The negated frame size is in ScratchReg, and the SPReg has been
988       // decremented by the frame size: SPReg = old SPReg + ScratchReg.
989       // Since FPOffset, PBPOffset, etc. are relative to the beginning of
990       // the stack frame (i.e. the old SP), ideally, we would put the old
991       // SP into a register and use it as the base for the stores. The
992       // problem is that the only available register may be ScratchReg,
993       // which could be R0, and R0 cannot be used as a base address.
994 
995       // First, set ScratchReg to the old SP. This may need to be modified
996       // later.
997       BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBF), ScratchReg)
998         .addReg(ScratchReg, RegState::Kill)
999         .addReg(SPReg);
1000 
1001       if (ScratchReg == PPC::R0) {
1002         // R0 cannot be used as a base register, but it can be used as an
1003         // index in a store-indexed.
1004         int LastOffset = 0;
1005         if (HasFP)  {
1006           // R0 += (FPOffset-LastOffset).
1007           // Need addic, since addi treats R0 as 0.
1008           BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
1009             .addReg(ScratchReg)
1010             .addImm(FPOffset-LastOffset);
1011           LastOffset = FPOffset;
1012           // Store FP into *R0.
1013           BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX))
1014             .addReg(FPReg, RegState::Kill)  // Save FP.
1015             .addReg(PPC::ZERO)
1016             .addReg(ScratchReg);  // This will be the index (R0 is ok here).
1017         }
1018         if (FI->usesPICBase()) {
1019           // R0 += (PBPOffset-LastOffset).
1020           BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
1021             .addReg(ScratchReg)
1022             .addImm(PBPOffset-LastOffset);
1023           LastOffset = PBPOffset;
1024           BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX))
1025             .addReg(PPC::R30, RegState::Kill)  // Save PIC base pointer.
1026             .addReg(PPC::ZERO)
1027             .addReg(ScratchReg);  // This will be the index (R0 is ok here).
1028         }
1029         if (HasBP) {
1030           // R0 += (BPOffset-LastOffset).
1031           BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
1032             .addReg(ScratchReg)
1033             .addImm(BPOffset-LastOffset);
1034           LastOffset = BPOffset;
1035           BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX))
1036             .addReg(BPReg, RegState::Kill)  // Save BP.
1037             .addReg(PPC::ZERO)
1038             .addReg(ScratchReg);  // This will be the index (R0 is ok here).
1039           // BP = R0-LastOffset
1040           BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), BPReg)
1041             .addReg(ScratchReg, RegState::Kill)
1042             .addImm(-LastOffset);
1043         }
1044       } else {
1045         // ScratchReg is not R0, so use it as the base register. It is
1046         // already set to the old SP, so we can use the offsets directly.
1047 
1048         // Now that the stack frame has been allocated, save all the necessary
1049         // registers using ScratchReg as the base address.
1050         if (HasFP)
1051           BuildMI(MBB, MBBI, dl, StoreInst)
1052             .addReg(FPReg)
1053             .addImm(FPOffset)
1054             .addReg(ScratchReg);
1055         if (FI->usesPICBase())
1056           BuildMI(MBB, MBBI, dl, StoreInst)
1057             .addReg(PPC::R30)
1058             .addImm(PBPOffset)
1059             .addReg(ScratchReg);
1060         if (HasBP) {
1061           BuildMI(MBB, MBBI, dl, StoreInst)
1062             .addReg(BPReg)
1063             .addImm(BPOffset)
1064             .addReg(ScratchReg);
1065           BuildMI(MBB, MBBI, dl, OrInst, BPReg)
1066             .addReg(ScratchReg, RegState::Kill)
1067             .addReg(ScratchReg);
1068         }
1069       }
1070     } else {
1071       // The frame size is a known 16-bit constant (fitting in the immediate
1072       // field of STWU). To be here we have to be compiling for PPC32.
1073       // Since the SPReg has been decreased by FrameSize, add it back to each
1074       // offset.
1075       if (HasFP)
1076         BuildMI(MBB, MBBI, dl, StoreInst)
1077           .addReg(FPReg)
1078           .addImm(FrameSize + FPOffset)
1079           .addReg(SPReg);
1080       if (FI->usesPICBase())
1081         BuildMI(MBB, MBBI, dl, StoreInst)
1082           .addReg(PPC::R30)
1083           .addImm(FrameSize + PBPOffset)
1084           .addReg(SPReg);
1085       if (HasBP) {
1086         BuildMI(MBB, MBBI, dl, StoreInst)
1087           .addReg(BPReg)
1088           .addImm(FrameSize + BPOffset)
1089           .addReg(SPReg);
1090         BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), BPReg)
1091           .addReg(SPReg)
1092           .addImm(FrameSize);
1093       }
1094     }
1095   }
1096 
1097   // Add Call Frame Information for the instructions we generated above.
1098   if (needsCFI) {
1099     unsigned CFIIndex;
1100 
1101     if (HasBP) {
1102       // Define CFA in terms of BP. Do this in preference to using FP/SP,
1103       // because if the stack needed aligning then CFA won't be at a fixed
1104       // offset from FP/SP.
1105       unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
1106       CFIIndex = MF.addFrameInst(
1107           MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
1108     } else {
1109       // Adjust the definition of CFA to account for the change in SP.
1110       assert(NegFrameSize);
1111       CFIIndex = MF.addFrameInst(
1112           MCCFIInstruction::cfiDefCfaOffset(nullptr, -NegFrameSize));
1113     }
1114     BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1115         .addCFIIndex(CFIIndex);
1116 
1117     if (HasFP) {
1118       // Describe where FP was saved, at a fixed offset from CFA.
1119       unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
1120       CFIIndex = MF.addFrameInst(
1121           MCCFIInstruction::createOffset(nullptr, Reg, FPOffset));
1122       BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1123           .addCFIIndex(CFIIndex);
1124     }
1125 
1126     if (FI->usesPICBase()) {
1127       // Describe where FP was saved, at a fixed offset from CFA.
1128       unsigned Reg = MRI->getDwarfRegNum(PPC::R30, true);
1129       CFIIndex = MF.addFrameInst(
1130           MCCFIInstruction::createOffset(nullptr, Reg, PBPOffset));
1131       BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1132           .addCFIIndex(CFIIndex);
1133     }
1134 
1135     if (HasBP) {
1136       // Describe where BP was saved, at a fixed offset from CFA.
1137       unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
1138       CFIIndex = MF.addFrameInst(
1139           MCCFIInstruction::createOffset(nullptr, Reg, BPOffset));
1140       BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1141           .addCFIIndex(CFIIndex);
1142     }
1143 
1144     if (MustSaveLR) {
1145       // Describe where LR was saved, at a fixed offset from CFA.
1146       unsigned Reg = MRI->getDwarfRegNum(LRReg, true);
1147       CFIIndex = MF.addFrameInst(
1148           MCCFIInstruction::createOffset(nullptr, Reg, LROffset));
1149       BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1150           .addCFIIndex(CFIIndex);
1151     }
1152   }
1153 
1154   // If there is a frame pointer, copy R1 into R31
1155   if (HasFP) {
1156     BuildMI(MBB, MBBI, dl, OrInst, FPReg)
1157       .addReg(SPReg)
1158       .addReg(SPReg);
1159 
1160     if (!HasBP && needsCFI) {
1161       // Change the definition of CFA from SP+offset to FP+offset, because SP
1162       // will change at every alloca.
1163       unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
1164       unsigned CFIIndex = MF.addFrameInst(
1165           MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
1166 
1167       BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1168           .addCFIIndex(CFIIndex);
1169     }
1170   }
1171 
1172   if (needsCFI) {
1173     // Describe where callee saved registers were saved, at fixed offsets from
1174     // CFA.
1175     const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
1176     for (const CalleeSavedInfo &I : CSI) {
1177       Register Reg = I.getReg();
1178       if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
1179 
1180       // This is a bit of a hack: CR2LT, CR2GT, CR2EQ and CR2UN are just
1181       // subregisters of CR2. We just need to emit a move of CR2.
1182       if (PPC::CRBITRCRegClass.contains(Reg))
1183         continue;
1184 
1185       if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
1186         continue;
1187 
1188       // For SVR4, don't emit a move for the CR spill slot if we haven't
1189       // spilled CRs.
1190       if (isSVR4ABI && (PPC::CR2 <= Reg && Reg <= PPC::CR4)
1191           && !MustSaveCR)
1192         continue;
1193 
1194       // For 64-bit SVR4 when we have spilled CRs, the spill location
1195       // is SP+8, not a frame-relative slot.
1196       if (isSVR4ABI && isPPC64 && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
1197         // In the ELFv1 ABI, only CR2 is noted in CFI and stands in for
1198         // the whole CR word.  In the ELFv2 ABI, every CR that was
1199         // actually saved gets its own CFI record.
1200         Register CRReg = isELFv2ABI? Reg : PPC::CR2;
1201         unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
1202             nullptr, MRI->getDwarfRegNum(CRReg, true), CRSaveOffset));
1203         BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1204             .addCFIIndex(CFIIndex);
1205         continue;
1206       }
1207 
1208       if (I.isSpilledToReg()) {
1209         unsigned SpilledReg = I.getDstReg();
1210         unsigned CFIRegister = MF.addFrameInst(MCCFIInstruction::createRegister(
1211             nullptr, MRI->getDwarfRegNum(Reg, true),
1212             MRI->getDwarfRegNum(SpilledReg, true)));
1213         BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1214           .addCFIIndex(CFIRegister);
1215       } else {
1216         int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
1217         // We have changed the object offset above but we do not want to change
1218         // the actual offsets in the CFI instruction so we have to undo the
1219         // offset change here.
1220         if (MovingStackUpdateDown)
1221           Offset -= NegFrameSize;
1222 
1223         unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
1224             nullptr, MRI->getDwarfRegNum(Reg, true), Offset));
1225         BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1226             .addCFIIndex(CFIIndex);
1227       }
1228     }
1229   }
1230 }
1231 
1232 void PPCFrameLowering::inlineStackProbe(MachineFunction &MF,
1233                                         MachineBasicBlock &PrologMBB) const {
1234   bool isPPC64 = Subtarget.isPPC64();
1235   const PPCTargetLowering &TLI = *Subtarget.getTargetLowering();
1236   const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
1237   MachineFrameInfo &MFI = MF.getFrameInfo();
1238   MachineModuleInfo &MMI = MF.getMMI();
1239   const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
1240   // AIX assembler does not support cfi directives.
1241   const bool needsCFI = MF.needsFrameMoves() && !Subtarget.isAIXABI();
1242   auto StackAllocMIPos = llvm::find_if(PrologMBB, [](MachineInstr &MI) {
1243     int Opc = MI.getOpcode();
1244     return Opc == PPC::PROBED_STACKALLOC_64 || Opc == PPC::PROBED_STACKALLOC_32;
1245   });
1246   if (StackAllocMIPos == PrologMBB.end())
1247     return;
1248   const BasicBlock *ProbedBB = PrologMBB.getBasicBlock();
1249   MachineBasicBlock *CurrentMBB = &PrologMBB;
1250   DebugLoc DL = PrologMBB.findDebugLoc(StackAllocMIPos);
1251   MachineInstr &MI = *StackAllocMIPos;
1252   int64_t NegFrameSize = MI.getOperand(2).getImm();
1253   unsigned ProbeSize = TLI.getStackProbeSize(MF);
1254   int64_t NegProbeSize = -(int64_t)ProbeSize;
1255   assert(isInt<32>(NegProbeSize) && "Unhandled probe size");
1256   int64_t NumBlocks = NegFrameSize / NegProbeSize;
1257   int64_t NegResidualSize = NegFrameSize % NegProbeSize;
1258   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
1259   Register ScratchReg = MI.getOperand(0).getReg();
1260   Register FPReg = MI.getOperand(1).getReg();
1261   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1262   bool HasBP = RegInfo->hasBasePointer(MF);
1263   Register BPReg = RegInfo->getBaseRegister(MF);
1264   Align MaxAlign = MFI.getMaxAlign();
1265   bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI();
1266   const MCInstrDesc &CopyInst = TII.get(isPPC64 ? PPC::OR8 : PPC::OR);
1267   // Subroutines to generate .cfi_* directives.
1268   auto buildDefCFAReg = [&](MachineBasicBlock &MBB,
1269                             MachineBasicBlock::iterator MBBI, Register Reg) {
1270     unsigned RegNum = MRI->getDwarfRegNum(Reg, true);
1271     unsigned CFIIndex = MF.addFrameInst(
1272         MCCFIInstruction::createDefCfaRegister(nullptr, RegNum));
1273     BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
1274         .addCFIIndex(CFIIndex);
1275   };
1276   auto buildDefCFA = [&](MachineBasicBlock &MBB,
1277                          MachineBasicBlock::iterator MBBI, Register Reg,
1278                          int Offset) {
1279     unsigned RegNum = MRI->getDwarfRegNum(Reg, true);
1280     unsigned CFIIndex = MBB.getParent()->addFrameInst(
1281         MCCFIInstruction::cfiDefCfa(nullptr, RegNum, Offset));
1282     BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
1283         .addCFIIndex(CFIIndex);
1284   };
1285   // Subroutine to determine if we can use the Imm as part of d-form.
1286   auto CanUseDForm = [](int64_t Imm) { return isInt<16>(Imm) && Imm % 4 == 0; };
1287   // Subroutine to materialize the Imm into TempReg.
1288   auto MaterializeImm = [&](MachineBasicBlock &MBB,
1289                             MachineBasicBlock::iterator MBBI, int64_t Imm,
1290                             Register &TempReg) {
1291     assert(isInt<32>(Imm) && "Unhandled imm");
1292     if (isInt<16>(Imm))
1293       BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::LI8 : PPC::LI), TempReg)
1294           .addImm(Imm);
1295     else {
1296       BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
1297           .addImm(Imm >> 16);
1298       BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::ORI8 : PPC::ORI), TempReg)
1299           .addReg(TempReg)
1300           .addImm(Imm & 0xFFFF);
1301     }
1302   };
1303   // Subroutine to store frame pointer and decrease stack pointer by probe size.
1304   auto allocateAndProbe = [&](MachineBasicBlock &MBB,
1305                               MachineBasicBlock::iterator MBBI, int64_t NegSize,
1306                               Register NegSizeReg, bool UseDForm,
1307                               Register StoreReg) {
1308     if (UseDForm)
1309       BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::STDU : PPC::STWU), SPReg)
1310           .addReg(StoreReg)
1311           .addImm(NegSize)
1312           .addReg(SPReg);
1313     else
1314       BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
1315           .addReg(StoreReg)
1316           .addReg(SPReg)
1317           .addReg(NegSizeReg);
1318   };
1319   // Used to probe stack when realignment is required.
1320   // Note that, according to ABI's requirement, *sp must always equals the
1321   // value of back-chain pointer, only st(w|d)u(x) can be used to update sp.
1322   // Following is pseudo code:
1323   // final_sp = (sp & align) + negframesize;
1324   // neg_gap = final_sp - sp;
1325   // while (neg_gap < negprobesize) {
1326   //   stdu fp, negprobesize(sp);
1327   //   neg_gap -= negprobesize;
1328   // }
1329   // stdux fp, sp, neg_gap
1330   //
1331   // When HasBP & HasRedzone, back-chain pointer is already saved in BPReg
1332   // before probe code, we don't need to save it, so we get one additional reg
1333   // that can be used to materialize the probeside if needed to use xform.
1334   // Otherwise, we can NOT materialize probeside, so we can only use Dform for
1335   // now.
1336   //
1337   // The allocations are:
1338   // if (HasBP && HasRedzone) {
1339   //   r0: materialize the probesize if needed so that we can use xform.
1340   //   r12: `neg_gap`
1341   // } else {
1342   //   r0: back-chain pointer
1343   //   r12: `neg_gap`.
1344   // }
1345   auto probeRealignedStack = [&](MachineBasicBlock &MBB,
1346                                  MachineBasicBlock::iterator MBBI,
1347                                  Register ScratchReg, Register TempReg) {
1348     assert(HasBP && "The function is supposed to have base pointer when its "
1349                     "stack is realigned.");
1350     assert(isPowerOf2_64(ProbeSize) && "Probe size should be power of 2");
1351 
1352     // FIXME: We can eliminate this limitation if we get more infomation about
1353     // which part of redzone are already used. Used redzone can be treated
1354     // probed. But there might be `holes' in redzone probed, this could
1355     // complicate the implementation.
1356     assert(ProbeSize >= Subtarget.getRedZoneSize() &&
1357            "Probe size should be larger or equal to the size of red-zone so "
1358            "that red-zone is not clobbered by probing.");
1359 
1360     Register &FinalStackPtr = TempReg;
1361     // FIXME: We only support NegProbeSize materializable by DForm currently.
1362     // When HasBP && HasRedzone, we can use xform if we have an additional idle
1363     // register.
1364     NegProbeSize = std::max(NegProbeSize, -((int64_t)1 << 15));
1365     assert(isInt<16>(NegProbeSize) &&
1366            "NegProbeSize should be materializable by DForm");
1367     Register CRReg = PPC::CR0;
1368     // Layout of output assembly kinda like:
1369     // bb.0:
1370     //   ...
1371     //   sub $scratchreg, $finalsp, r1
1372     //   cmpdi $scratchreg, <negprobesize>
1373     //   bge bb.2
1374     // bb.1:
1375     //   stdu <backchain>, <negprobesize>(r1)
1376     //   sub $scratchreg, $scratchreg, negprobesize
1377     //   cmpdi $scratchreg, <negprobesize>
1378     //   blt bb.1
1379     // bb.2:
1380     //   stdux <backchain>, r1, $scratchreg
1381     MachineFunction::iterator MBBInsertPoint = std::next(MBB.getIterator());
1382     MachineBasicBlock *ProbeLoopBodyMBB = MF.CreateMachineBasicBlock(ProbedBB);
1383     MF.insert(MBBInsertPoint, ProbeLoopBodyMBB);
1384     MachineBasicBlock *ProbeExitMBB = MF.CreateMachineBasicBlock(ProbedBB);
1385     MF.insert(MBBInsertPoint, ProbeExitMBB);
1386     // bb.2
1387     {
1388       Register BackChainPointer = HasRedZone ? BPReg : TempReg;
1389       allocateAndProbe(*ProbeExitMBB, ProbeExitMBB->end(), 0, ScratchReg, false,
1390                        BackChainPointer);
1391       if (HasRedZone)
1392         // PROBED_STACKALLOC_64 assumes Operand(1) stores the old sp, copy BPReg
1393         // to TempReg to satisfy it.
1394         BuildMI(*ProbeExitMBB, ProbeExitMBB->end(), DL, CopyInst, TempReg)
1395             .addReg(BPReg)
1396             .addReg(BPReg);
1397       ProbeExitMBB->splice(ProbeExitMBB->end(), &MBB, MBBI, MBB.end());
1398       ProbeExitMBB->transferSuccessorsAndUpdatePHIs(&MBB);
1399     }
1400     // bb.0
1401     {
1402       BuildMI(&MBB, DL, TII.get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), ScratchReg)
1403           .addReg(SPReg)
1404           .addReg(FinalStackPtr);
1405       if (!HasRedZone)
1406         BuildMI(&MBB, DL, CopyInst, TempReg).addReg(SPReg).addReg(SPReg);
1407       BuildMI(&MBB, DL, TII.get(isPPC64 ? PPC::CMPDI : PPC::CMPWI), CRReg)
1408           .addReg(ScratchReg)
1409           .addImm(NegProbeSize);
1410       BuildMI(&MBB, DL, TII.get(PPC::BCC))
1411           .addImm(PPC::PRED_GE)
1412           .addReg(CRReg)
1413           .addMBB(ProbeExitMBB);
1414       MBB.addSuccessor(ProbeLoopBodyMBB);
1415       MBB.addSuccessor(ProbeExitMBB);
1416     }
1417     // bb.1
1418     {
1419       Register BackChainPointer = HasRedZone ? BPReg : TempReg;
1420       allocateAndProbe(*ProbeLoopBodyMBB, ProbeLoopBodyMBB->end(), NegProbeSize,
1421                        0, true /*UseDForm*/, BackChainPointer);
1422       BuildMI(ProbeLoopBodyMBB, DL, TII.get(isPPC64 ? PPC::ADDI8 : PPC::ADDI),
1423               ScratchReg)
1424           .addReg(ScratchReg)
1425           .addImm(-NegProbeSize);
1426       BuildMI(ProbeLoopBodyMBB, DL, TII.get(isPPC64 ? PPC::CMPDI : PPC::CMPWI),
1427               CRReg)
1428           .addReg(ScratchReg)
1429           .addImm(NegProbeSize);
1430       BuildMI(ProbeLoopBodyMBB, DL, TII.get(PPC::BCC))
1431           .addImm(PPC::PRED_LT)
1432           .addReg(CRReg)
1433           .addMBB(ProbeLoopBodyMBB);
1434       ProbeLoopBodyMBB->addSuccessor(ProbeExitMBB);
1435       ProbeLoopBodyMBB->addSuccessor(ProbeLoopBodyMBB);
1436     }
1437     // Update liveins.
1438     recomputeLiveIns(*ProbeLoopBodyMBB);
1439     recomputeLiveIns(*ProbeExitMBB);
1440     return ProbeExitMBB;
1441   };
1442   // For case HasBP && MaxAlign > 1, we have to realign the SP by performing
1443   // SP = SP - SP % MaxAlign, thus make the probe more like dynamic probe since
1444   // the offset subtracted from SP is determined by SP's runtime value.
1445   if (HasBP && MaxAlign > 1) {
1446     // Calculate final stack pointer.
1447     if (isPPC64)
1448       BuildMI(*CurrentMBB, {MI}, DL, TII.get(PPC::RLDICL), ScratchReg)
1449           .addReg(SPReg)
1450           .addImm(0)
1451           .addImm(64 - Log2(MaxAlign));
1452     else
1453       BuildMI(*CurrentMBB, {MI}, DL, TII.get(PPC::RLWINM), ScratchReg)
1454           .addReg(SPReg)
1455           .addImm(0)
1456           .addImm(32 - Log2(MaxAlign))
1457           .addImm(31);
1458     BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::SUBF8 : PPC::SUBF),
1459             FPReg)
1460         .addReg(ScratchReg)
1461         .addReg(SPReg);
1462     MaterializeImm(*CurrentMBB, {MI}, NegFrameSize, ScratchReg);
1463     BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
1464             FPReg)
1465         .addReg(ScratchReg)
1466         .addReg(FPReg);
1467     CurrentMBB = probeRealignedStack(*CurrentMBB, {MI}, ScratchReg, FPReg);
1468     if (needsCFI)
1469       buildDefCFAReg(*CurrentMBB, {MI}, FPReg);
1470   } else {
1471     // Initialize current frame pointer.
1472     BuildMI(*CurrentMBB, {MI}, DL, CopyInst, FPReg).addReg(SPReg).addReg(SPReg);
1473     // Use FPReg to calculate CFA.
1474     if (needsCFI)
1475       buildDefCFA(*CurrentMBB, {MI}, FPReg, 0);
1476     // Probe residual part.
1477     if (NegResidualSize) {
1478       bool ResidualUseDForm = CanUseDForm(NegResidualSize);
1479       if (!ResidualUseDForm)
1480         MaterializeImm(*CurrentMBB, {MI}, NegResidualSize, ScratchReg);
1481       allocateAndProbe(*CurrentMBB, {MI}, NegResidualSize, ScratchReg,
1482                        ResidualUseDForm, FPReg);
1483     }
1484     bool UseDForm = CanUseDForm(NegProbeSize);
1485     // If number of blocks is small, just probe them directly.
1486     if (NumBlocks < 3) {
1487       if (!UseDForm)
1488         MaterializeImm(*CurrentMBB, {MI}, NegProbeSize, ScratchReg);
1489       for (int i = 0; i < NumBlocks; ++i)
1490         allocateAndProbe(*CurrentMBB, {MI}, NegProbeSize, ScratchReg, UseDForm,
1491                          FPReg);
1492       if (needsCFI) {
1493         // Restore using SPReg to calculate CFA.
1494         buildDefCFAReg(*CurrentMBB, {MI}, SPReg);
1495       }
1496     } else {
1497       // Since CTR is a volatile register and current shrinkwrap implementation
1498       // won't choose an MBB in a loop as the PrologMBB, it's safe to synthesize a
1499       // CTR loop to probe.
1500       // Calculate trip count and stores it in CTRReg.
1501       MaterializeImm(*CurrentMBB, {MI}, NumBlocks, ScratchReg);
1502       BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::MTCTR8 : PPC::MTCTR))
1503           .addReg(ScratchReg, RegState::Kill);
1504       if (!UseDForm)
1505         MaterializeImm(*CurrentMBB, {MI}, NegProbeSize, ScratchReg);
1506       // Create MBBs of the loop.
1507       MachineFunction::iterator MBBInsertPoint =
1508           std::next(CurrentMBB->getIterator());
1509       MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(ProbedBB);
1510       MF.insert(MBBInsertPoint, LoopMBB);
1511       MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(ProbedBB);
1512       MF.insert(MBBInsertPoint, ExitMBB);
1513       // Synthesize the loop body.
1514       allocateAndProbe(*LoopMBB, LoopMBB->end(), NegProbeSize, ScratchReg,
1515                        UseDForm, FPReg);
1516       BuildMI(LoopMBB, DL, TII.get(isPPC64 ? PPC::BDNZ8 : PPC::BDNZ))
1517           .addMBB(LoopMBB);
1518       LoopMBB->addSuccessor(ExitMBB);
1519       LoopMBB->addSuccessor(LoopMBB);
1520       // Synthesize the exit MBB.
1521       ExitMBB->splice(ExitMBB->end(), CurrentMBB,
1522                       std::next(MachineBasicBlock::iterator(MI)),
1523                       CurrentMBB->end());
1524       ExitMBB->transferSuccessorsAndUpdatePHIs(CurrentMBB);
1525       CurrentMBB->addSuccessor(LoopMBB);
1526       if (needsCFI) {
1527         // Restore using SPReg to calculate CFA.
1528         buildDefCFAReg(*ExitMBB, ExitMBB->begin(), SPReg);
1529       }
1530       // Update liveins.
1531       recomputeLiveIns(*LoopMBB);
1532       recomputeLiveIns(*ExitMBB);
1533     }
1534   }
1535   ++NumPrologProbed;
1536   MI.eraseFromParent();
1537 }
1538 
1539 void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
1540                                     MachineBasicBlock &MBB) const {
1541   MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1542   DebugLoc dl;
1543 
1544   if (MBBI != MBB.end())
1545     dl = MBBI->getDebugLoc();
1546 
1547   const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
1548   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1549 
1550   // Get alignment info so we know how to restore the SP.
1551   const MachineFrameInfo &MFI = MF.getFrameInfo();
1552 
1553   // Get the number of bytes allocated from the FrameInfo.
1554   int64_t FrameSize = MFI.getStackSize();
1555 
1556   // Get processor type.
1557   bool isPPC64 = Subtarget.isPPC64();
1558 
1559   // Check if the link register (LR) has been saved.
1560   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1561   bool MustSaveLR = FI->mustSaveLR();
1562   const SmallVectorImpl<Register> &MustSaveCRs = FI->getMustSaveCRs();
1563   bool MustSaveCR = !MustSaveCRs.empty();
1564   // Do we have a frame pointer and/or base pointer for this function?
1565   bool HasFP = hasFP(MF);
1566   bool HasBP = RegInfo->hasBasePointer(MF);
1567   bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI();
1568   bool HasROPProtect = Subtarget.hasROPProtect();
1569   bool HasPrivileged = Subtarget.hasPrivileged();
1570 
1571   Register SPReg      = isPPC64 ? PPC::X1  : PPC::R1;
1572   Register BPReg = RegInfo->getBaseRegister(MF);
1573   Register FPReg      = isPPC64 ? PPC::X31 : PPC::R31;
1574   Register ScratchReg;
1575   Register TempReg     = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg
1576   const MCInstrDesc& MTLRInst = TII.get( isPPC64 ? PPC::MTLR8
1577                                                  : PPC::MTLR );
1578   const MCInstrDesc& LoadInst = TII.get( isPPC64 ? PPC::LD
1579                                                  : PPC::LWZ );
1580   const MCInstrDesc& LoadImmShiftedInst = TII.get( isPPC64 ? PPC::LIS8
1581                                                            : PPC::LIS );
1582   const MCInstrDesc& OrInst = TII.get(isPPC64 ? PPC::OR8
1583                                               : PPC::OR );
1584   const MCInstrDesc& OrImmInst = TII.get( isPPC64 ? PPC::ORI8
1585                                                   : PPC::ORI );
1586   const MCInstrDesc& AddImmInst = TII.get( isPPC64 ? PPC::ADDI8
1587                                                    : PPC::ADDI );
1588   const MCInstrDesc& AddInst = TII.get( isPPC64 ? PPC::ADD8
1589                                                 : PPC::ADD4 );
1590   const MCInstrDesc& LoadWordInst = TII.get( isPPC64 ? PPC::LWZ8
1591                                                      : PPC::LWZ);
1592   const MCInstrDesc& MoveToCRInst = TII.get( isPPC64 ? PPC::MTOCRF8
1593                                                      : PPC::MTOCRF);
1594   const MCInstrDesc &HashChk =
1595       TII.get(isPPC64 ? (HasPrivileged ? PPC::HASHCHKP8 : PPC::HASHCHK8)
1596                       : (HasPrivileged ? PPC::HASHCHKP : PPC::HASHCHK));
1597   int64_t LROffset = getReturnSaveOffset();
1598 
1599   int64_t FPOffset = 0;
1600 
1601   // Using the same bool variable as below to suppress compiler warnings.
1602   bool SingleScratchReg = findScratchRegister(&MBB, true, false, &ScratchReg,
1603                                               &TempReg);
1604   assert(SingleScratchReg &&
1605          "Could not find an available scratch register");
1606 
1607   SingleScratchReg = ScratchReg == TempReg;
1608 
1609   if (HasFP) {
1610     int FPIndex = FI->getFramePointerSaveIndex();
1611     assert(FPIndex && "No Frame Pointer Save Slot!");
1612     FPOffset = MFI.getObjectOffset(FPIndex);
1613   }
1614 
1615   int64_t BPOffset = 0;
1616   if (HasBP) {
1617       int BPIndex = FI->getBasePointerSaveIndex();
1618       assert(BPIndex && "No Base Pointer Save Slot!");
1619       BPOffset = MFI.getObjectOffset(BPIndex);
1620   }
1621 
1622   int64_t PBPOffset = 0;
1623   if (FI->usesPICBase()) {
1624     int PBPIndex = FI->getPICBasePointerSaveIndex();
1625     assert(PBPIndex && "No PIC Base Pointer Save Slot!");
1626     PBPOffset = MFI.getObjectOffset(PBPIndex);
1627   }
1628 
1629   bool IsReturnBlock = (MBBI != MBB.end() && MBBI->isReturn());
1630 
1631   if (IsReturnBlock) {
1632     unsigned RetOpcode = MBBI->getOpcode();
1633     bool UsesTCRet =  RetOpcode == PPC::TCRETURNri ||
1634                       RetOpcode == PPC::TCRETURNdi ||
1635                       RetOpcode == PPC::TCRETURNai ||
1636                       RetOpcode == PPC::TCRETURNri8 ||
1637                       RetOpcode == PPC::TCRETURNdi8 ||
1638                       RetOpcode == PPC::TCRETURNai8;
1639 
1640     if (UsesTCRet) {
1641       int MaxTCRetDelta = FI->getTailCallSPDelta();
1642       MachineOperand &StackAdjust = MBBI->getOperand(1);
1643       assert(StackAdjust.isImm() && "Expecting immediate value.");
1644       // Adjust stack pointer.
1645       int StackAdj = StackAdjust.getImm();
1646       int Delta = StackAdj - MaxTCRetDelta;
1647       assert((Delta >= 0) && "Delta must be positive");
1648       if (MaxTCRetDelta>0)
1649         FrameSize += (StackAdj +Delta);
1650       else
1651         FrameSize += StackAdj;
1652     }
1653   }
1654 
1655   // Frames of 32KB & larger require special handling because they cannot be
1656   // indexed into with a simple LD/LWZ immediate offset operand.
1657   bool isLargeFrame = !isInt<16>(FrameSize);
1658 
1659   // On targets without red zone, the SP needs to be restored last, so that
1660   // all live contents of the stack frame are upwards of the SP. This means
1661   // that we cannot restore SP just now, since there may be more registers
1662   // to restore from the stack frame (e.g. R31). If the frame size is not
1663   // a simple immediate value, we will need a spare register to hold the
1664   // restored SP. If the frame size is known and small, we can simply adjust
1665   // the offsets of the registers to be restored, and still use SP to restore
1666   // them. In such case, the final update of SP will be to add the frame
1667   // size to it.
1668   // To simplify the code, set RBReg to the base register used to restore
1669   // values from the stack, and set SPAdd to the value that needs to be added
1670   // to the SP at the end. The default values are as if red zone was present.
1671   unsigned RBReg = SPReg;
1672   unsigned SPAdd = 0;
1673 
1674   // Check if we can move the stack update instruction up the epilogue
1675   // past the callee saves. This will allow the move to LR instruction
1676   // to be executed before the restores of the callee saves which means
1677   // that the callee saves can hide the latency from the MTLR instrcution.
1678   MachineBasicBlock::iterator StackUpdateLoc = MBBI;
1679   if (stackUpdateCanBeMoved(MF)) {
1680     const std::vector<CalleeSavedInfo> & Info = MFI.getCalleeSavedInfo();
1681     for (CalleeSavedInfo CSI : Info) {
1682       // If the callee saved register is spilled to another register abort the
1683       // stack update movement.
1684       if (CSI.isSpilledToReg()) {
1685         StackUpdateLoc = MBBI;
1686         break;
1687       }
1688       int FrIdx = CSI.getFrameIdx();
1689       // If the frame index is not negative the callee saved info belongs to a
1690       // stack object that is not a fixed stack object. We ignore non-fixed
1691       // stack objects because we won't move the update of the stack pointer
1692       // past them.
1693       if (FrIdx >= 0)
1694         continue;
1695 
1696       if (MFI.isFixedObjectIndex(FrIdx) && MFI.getObjectOffset(FrIdx) < 0)
1697         StackUpdateLoc--;
1698       else {
1699         // Abort the operation as we can't update all CSR restores.
1700         StackUpdateLoc = MBBI;
1701         break;
1702       }
1703     }
1704   }
1705 
1706   if (FrameSize) {
1707     // In the prologue, the loaded (or persistent) stack pointer value is
1708     // offset by the STDU/STDUX/STWU/STWUX instruction. For targets with red
1709     // zone add this offset back now.
1710 
1711     // If the function has a base pointer, the stack pointer has been copied
1712     // to it so we can restore it by copying in the other direction.
1713     if (HasRedZone && HasBP) {
1714       BuildMI(MBB, MBBI, dl, OrInst, RBReg).
1715         addReg(BPReg).
1716         addReg(BPReg);
1717     }
1718     // If this function contained a fastcc call and GuaranteedTailCallOpt is
1719     // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
1720     // call which invalidates the stack pointer value in SP(0). So we use the
1721     // value of R31 in this case. Similar situation exists with setjmp.
1722     else if (FI->hasFastCall() || MF.exposesReturnsTwice()) {
1723       assert(HasFP && "Expecting a valid frame pointer.");
1724       if (!HasRedZone)
1725         RBReg = FPReg;
1726       if (!isLargeFrame) {
1727         BuildMI(MBB, MBBI, dl, AddImmInst, RBReg)
1728           .addReg(FPReg).addImm(FrameSize);
1729       } else {
1730         BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
1731           .addImm(FrameSize >> 16);
1732         BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
1733           .addReg(ScratchReg, RegState::Kill)
1734           .addImm(FrameSize & 0xFFFF);
1735         BuildMI(MBB, MBBI, dl, AddInst)
1736           .addReg(RBReg)
1737           .addReg(FPReg)
1738           .addReg(ScratchReg);
1739       }
1740     } else if (!isLargeFrame && !HasBP && !MFI.hasVarSizedObjects()) {
1741       if (HasRedZone) {
1742         BuildMI(MBB, StackUpdateLoc, dl, AddImmInst, SPReg)
1743           .addReg(SPReg)
1744           .addImm(FrameSize);
1745       } else {
1746         // Make sure that adding FrameSize will not overflow the max offset
1747         // size.
1748         assert(FPOffset <= 0 && BPOffset <= 0 && PBPOffset <= 0 &&
1749                "Local offsets should be negative");
1750         SPAdd = FrameSize;
1751         FPOffset += FrameSize;
1752         BPOffset += FrameSize;
1753         PBPOffset += FrameSize;
1754       }
1755     } else {
1756       // We don't want to use ScratchReg as a base register, because it
1757       // could happen to be R0. Use FP instead, but make sure to preserve it.
1758       if (!HasRedZone) {
1759         // If FP is not saved, copy it to ScratchReg.
1760         if (!HasFP)
1761           BuildMI(MBB, MBBI, dl, OrInst, ScratchReg)
1762             .addReg(FPReg)
1763             .addReg(FPReg);
1764         RBReg = FPReg;
1765       }
1766       BuildMI(MBB, StackUpdateLoc, dl, LoadInst, RBReg)
1767         .addImm(0)
1768         .addReg(SPReg);
1769     }
1770   }
1771   assert(RBReg != ScratchReg && "Should have avoided ScratchReg");
1772   // If there is no red zone, ScratchReg may be needed for holding a useful
1773   // value (although not the base register). Make sure it is not overwritten
1774   // too early.
1775 
1776   // If we need to restore both the LR and the CR and we only have one
1777   // available scratch register, we must do them one at a time.
1778   if (MustSaveCR && SingleScratchReg && MustSaveLR) {
1779     // Here TempReg == ScratchReg, and in the absence of red zone ScratchReg
1780     // is live here.
1781     assert(HasRedZone && "Expecting red zone");
1782     BuildMI(MBB, MBBI, dl, LoadWordInst, TempReg)
1783       .addImm(CRSaveOffset)
1784       .addReg(SPReg);
1785     for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
1786       BuildMI(MBB, MBBI, dl, MoveToCRInst, MustSaveCRs[i])
1787         .addReg(TempReg, getKillRegState(i == e-1));
1788   }
1789 
1790   // Delay restoring of the LR if ScratchReg is needed. This is ok, since
1791   // LR is stored in the caller's stack frame. ScratchReg will be needed
1792   // if RBReg is anything other than SP. We shouldn't use ScratchReg as
1793   // a base register anyway, because it may happen to be R0.
1794   bool LoadedLR = false;
1795   if (MustSaveLR && RBReg == SPReg && isInt<16>(LROffset+SPAdd)) {
1796     BuildMI(MBB, StackUpdateLoc, dl, LoadInst, ScratchReg)
1797       .addImm(LROffset+SPAdd)
1798       .addReg(RBReg);
1799     LoadedLR = true;
1800   }
1801 
1802   if (MustSaveCR && !(SingleScratchReg && MustSaveLR)) {
1803     assert(RBReg == SPReg && "Should be using SP as a base register");
1804     BuildMI(MBB, MBBI, dl, LoadWordInst, TempReg)
1805       .addImm(CRSaveOffset)
1806       .addReg(RBReg);
1807   }
1808 
1809   if (HasFP) {
1810     // If there is red zone, restore FP directly, since SP has already been
1811     // restored. Otherwise, restore the value of FP into ScratchReg.
1812     if (HasRedZone || RBReg == SPReg)
1813       BuildMI(MBB, MBBI, dl, LoadInst, FPReg)
1814         .addImm(FPOffset)
1815         .addReg(SPReg);
1816     else
1817       BuildMI(MBB, MBBI, dl, LoadInst, ScratchReg)
1818         .addImm(FPOffset)
1819         .addReg(RBReg);
1820   }
1821 
1822   if (FI->usesPICBase())
1823     BuildMI(MBB, MBBI, dl, LoadInst, PPC::R30)
1824       .addImm(PBPOffset)
1825       .addReg(RBReg);
1826 
1827   if (HasBP)
1828     BuildMI(MBB, MBBI, dl, LoadInst, BPReg)
1829       .addImm(BPOffset)
1830       .addReg(RBReg);
1831 
1832   // There is nothing more to be loaded from the stack, so now we can
1833   // restore SP: SP = RBReg + SPAdd.
1834   if (RBReg != SPReg || SPAdd != 0) {
1835     assert(!HasRedZone && "This should not happen with red zone");
1836     // If SPAdd is 0, generate a copy.
1837     if (SPAdd == 0)
1838       BuildMI(MBB, MBBI, dl, OrInst, SPReg)
1839         .addReg(RBReg)
1840         .addReg(RBReg);
1841     else
1842       BuildMI(MBB, MBBI, dl, AddImmInst, SPReg)
1843         .addReg(RBReg)
1844         .addImm(SPAdd);
1845 
1846     assert(RBReg != ScratchReg && "Should be using FP or SP as base register");
1847     if (RBReg == FPReg)
1848       BuildMI(MBB, MBBI, dl, OrInst, FPReg)
1849         .addReg(ScratchReg)
1850         .addReg(ScratchReg);
1851 
1852     // Now load the LR from the caller's stack frame.
1853     if (MustSaveLR && !LoadedLR)
1854       BuildMI(MBB, MBBI, dl, LoadInst, ScratchReg)
1855         .addImm(LROffset)
1856         .addReg(SPReg);
1857   }
1858 
1859   if (MustSaveCR &&
1860       !(SingleScratchReg && MustSaveLR))
1861     for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
1862       BuildMI(MBB, MBBI, dl, MoveToCRInst, MustSaveCRs[i])
1863         .addReg(TempReg, getKillRegState(i == e-1));
1864 
1865   if (MustSaveLR) {
1866     // If ROP protection is required, an extra instruction is added to compute a
1867     // hash and then compare it to the hash stored in the prologue.
1868     if (HasROPProtect) {
1869       const int SaveIndex = FI->getROPProtectionHashSaveIndex();
1870       const int64_t ImmOffset = MFI.getObjectOffset(SaveIndex);
1871       assert((ImmOffset <= -8 && ImmOffset >= -512) &&
1872              "ROP hash check location offset out of range.");
1873       assert(((ImmOffset & 0x7) == 0) &&
1874              "ROP hash check location offset must be 8 byte aligned.");
1875       BuildMI(MBB, StackUpdateLoc, dl, HashChk)
1876           .addReg(ScratchReg)
1877           .addImm(ImmOffset)
1878           .addReg(SPReg);
1879     }
1880     BuildMI(MBB, StackUpdateLoc, dl, MTLRInst).addReg(ScratchReg);
1881   }
1882 
1883   // Callee pop calling convention. Pop parameter/linkage area. Used for tail
1884   // call optimization
1885   if (IsReturnBlock) {
1886     unsigned RetOpcode = MBBI->getOpcode();
1887     if (MF.getTarget().Options.GuaranteedTailCallOpt &&
1888         (RetOpcode == PPC::BLR || RetOpcode == PPC::BLR8) &&
1889         MF.getFunction().getCallingConv() == CallingConv::Fast) {
1890       PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1891       unsigned CallerAllocatedAmt = FI->getMinReservedArea();
1892 
1893       if (CallerAllocatedAmt && isInt<16>(CallerAllocatedAmt)) {
1894         BuildMI(MBB, MBBI, dl, AddImmInst, SPReg)
1895           .addReg(SPReg).addImm(CallerAllocatedAmt);
1896       } else {
1897         BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
1898           .addImm(CallerAllocatedAmt >> 16);
1899         BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
1900           .addReg(ScratchReg, RegState::Kill)
1901           .addImm(CallerAllocatedAmt & 0xFFFF);
1902         BuildMI(MBB, MBBI, dl, AddInst)
1903           .addReg(SPReg)
1904           .addReg(FPReg)
1905           .addReg(ScratchReg);
1906       }
1907     } else {
1908       createTailCallBranchInstr(MBB);
1909     }
1910   }
1911 }
1912 
1913 void PPCFrameLowering::createTailCallBranchInstr(MachineBasicBlock &MBB) const {
1914   MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1915 
1916   // If we got this far a first terminator should exist.
1917   assert(MBBI != MBB.end() && "Failed to find the first terminator.");
1918 
1919   DebugLoc dl = MBBI->getDebugLoc();
1920   const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
1921 
1922   // Create branch instruction for pseudo tail call return instruction.
1923   // The TCRETURNdi variants are direct calls. Valid targets for those are
1924   // MO_GlobalAddress operands as well as MO_ExternalSymbol with PC-Rel
1925   // since we can tail call external functions with PC-Rel (i.e. we don't need
1926   // to worry about different TOC pointers). Some of the external functions will
1927   // be MO_GlobalAddress while others like memcpy for example, are going to
1928   // be MO_ExternalSymbol.
1929   unsigned RetOpcode = MBBI->getOpcode();
1930   if (RetOpcode == PPC::TCRETURNdi) {
1931     MBBI = MBB.getLastNonDebugInstr();
1932     MachineOperand &JumpTarget = MBBI->getOperand(0);
1933     if (JumpTarget.isGlobal())
1934       BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
1935         addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
1936     else if (JumpTarget.isSymbol())
1937       BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
1938         addExternalSymbol(JumpTarget.getSymbolName());
1939     else
1940       llvm_unreachable("Expecting Global or External Symbol");
1941   } else if (RetOpcode == PPC::TCRETURNri) {
1942     MBBI = MBB.getLastNonDebugInstr();
1943     assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
1944     BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR));
1945   } else if (RetOpcode == PPC::TCRETURNai) {
1946     MBBI = MBB.getLastNonDebugInstr();
1947     MachineOperand &JumpTarget = MBBI->getOperand(0);
1948     BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
1949   } else if (RetOpcode == PPC::TCRETURNdi8) {
1950     MBBI = MBB.getLastNonDebugInstr();
1951     MachineOperand &JumpTarget = MBBI->getOperand(0);
1952     if (JumpTarget.isGlobal())
1953       BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
1954         addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
1955     else if (JumpTarget.isSymbol())
1956       BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
1957         addExternalSymbol(JumpTarget.getSymbolName());
1958     else
1959       llvm_unreachable("Expecting Global or External Symbol");
1960   } else if (RetOpcode == PPC::TCRETURNri8) {
1961     MBBI = MBB.getLastNonDebugInstr();
1962     assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
1963     BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR8));
1964   } else if (RetOpcode == PPC::TCRETURNai8) {
1965     MBBI = MBB.getLastNonDebugInstr();
1966     MachineOperand &JumpTarget = MBBI->getOperand(0);
1967     BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
1968   }
1969 }
1970 
1971 void PPCFrameLowering::determineCalleeSaves(MachineFunction &MF,
1972                                             BitVector &SavedRegs,
1973                                             RegScavenger *RS) const {
1974   TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1975 
1976   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1977 
1978   // Do not explicitly save the callee saved VSRp registers.
1979   // The individual VSR subregisters will be saved instead.
1980   SavedRegs.reset(PPC::VSRp26);
1981   SavedRegs.reset(PPC::VSRp27);
1982   SavedRegs.reset(PPC::VSRp28);
1983   SavedRegs.reset(PPC::VSRp29);
1984   SavedRegs.reset(PPC::VSRp30);
1985   SavedRegs.reset(PPC::VSRp31);
1986 
1987   //  Save and clear the LR state.
1988   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1989   unsigned LR = RegInfo->getRARegister();
1990   FI->setMustSaveLR(MustSaveLR(MF, LR));
1991   SavedRegs.reset(LR);
1992 
1993   //  Save R31 if necessary
1994   int FPSI = FI->getFramePointerSaveIndex();
1995   const bool isPPC64 = Subtarget.isPPC64();
1996   MachineFrameInfo &MFI = MF.getFrameInfo();
1997 
1998   // If the frame pointer save index hasn't been defined yet.
1999   if (!FPSI && needsFP(MF)) {
2000     // Find out what the fix offset of the frame pointer save area.
2001     int FPOffset = getFramePointerSaveOffset();
2002     // Allocate the frame index for frame pointer save area.
2003     FPSI = MFI.CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
2004     // Save the result.
2005     FI->setFramePointerSaveIndex(FPSI);
2006   }
2007 
2008   int BPSI = FI->getBasePointerSaveIndex();
2009   if (!BPSI && RegInfo->hasBasePointer(MF)) {
2010     int BPOffset = getBasePointerSaveOffset();
2011     // Allocate the frame index for the base pointer save area.
2012     BPSI = MFI.CreateFixedObject(isPPC64? 8 : 4, BPOffset, true);
2013     // Save the result.
2014     FI->setBasePointerSaveIndex(BPSI);
2015   }
2016 
2017   // Reserve stack space for the PIC Base register (R30).
2018   // Only used in SVR4 32-bit.
2019   if (FI->usesPICBase()) {
2020     int PBPSI = MFI.CreateFixedObject(4, -8, true);
2021     FI->setPICBasePointerSaveIndex(PBPSI);
2022   }
2023 
2024   // Make sure we don't explicitly spill r31, because, for example, we have
2025   // some inline asm which explicitly clobbers it, when we otherwise have a
2026   // frame pointer and are using r31's spill slot for the prologue/epilogue
2027   // code. Same goes for the base pointer and the PIC base register.
2028   if (needsFP(MF))
2029     SavedRegs.reset(isPPC64 ? PPC::X31 : PPC::R31);
2030   if (RegInfo->hasBasePointer(MF))
2031     SavedRegs.reset(RegInfo->getBaseRegister(MF));
2032   if (FI->usesPICBase())
2033     SavedRegs.reset(PPC::R30);
2034 
2035   // Reserve stack space to move the linkage area to in case of a tail call.
2036   int TCSPDelta = 0;
2037   if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2038       (TCSPDelta = FI->getTailCallSPDelta()) < 0) {
2039     MFI.CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true);
2040   }
2041 
2042   // Allocate the nonvolatile CR spill slot iff the function uses CR 2, 3, or 4.
2043   // For 64-bit SVR4, and all flavors of AIX we create a FixedStack
2044   // object at the offset of the CR-save slot in the linkage area. The actual
2045   // save and restore of the condition register will be created as part of the
2046   // prologue and epilogue insertion, but the FixedStack object is needed to
2047   // keep the CalleSavedInfo valid.
2048   if ((SavedRegs.test(PPC::CR2) || SavedRegs.test(PPC::CR3) ||
2049        SavedRegs.test(PPC::CR4))) {
2050     const uint64_t SpillSize = 4; // Condition register is always 4 bytes.
2051     const int64_t SpillOffset =
2052         Subtarget.isPPC64() ? 8 : Subtarget.isAIXABI() ? 4 : -4;
2053     int FrameIdx =
2054         MFI.CreateFixedObject(SpillSize, SpillOffset,
2055                               /* IsImmutable */ true, /* IsAliased */ false);
2056     FI->setCRSpillFrameIndex(FrameIdx);
2057   }
2058 }
2059 
2060 void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
2061                                                        RegScavenger *RS) const {
2062   // Get callee saved register information.
2063   MachineFrameInfo &MFI = MF.getFrameInfo();
2064   const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
2065 
2066   // If the function is shrink-wrapped, and if the function has a tail call, the
2067   // tail call might not be in the new RestoreBlock, so real branch instruction
2068   // won't be generated by emitEpilogue(), because shrink-wrap has chosen new
2069   // RestoreBlock. So we handle this case here.
2070   if (MFI.getSavePoint() && MFI.hasTailCall()) {
2071     MachineBasicBlock *RestoreBlock = MFI.getRestorePoint();
2072     for (MachineBasicBlock &MBB : MF) {
2073       if (MBB.isReturnBlock() && (&MBB) != RestoreBlock)
2074         createTailCallBranchInstr(MBB);
2075     }
2076   }
2077 
2078   // Early exit if no callee saved registers are modified!
2079   if (CSI.empty() && !needsFP(MF)) {
2080     addScavengingSpillSlot(MF, RS);
2081     return;
2082   }
2083 
2084   unsigned MinGPR = PPC::R31;
2085   unsigned MinG8R = PPC::X31;
2086   unsigned MinFPR = PPC::F31;
2087   unsigned MinVR = Subtarget.hasSPE() ? PPC::S31 : PPC::V31;
2088 
2089   bool HasGPSaveArea = false;
2090   bool HasG8SaveArea = false;
2091   bool HasFPSaveArea = false;
2092   bool HasVRSaveArea = false;
2093 
2094   SmallVector<CalleeSavedInfo, 18> GPRegs;
2095   SmallVector<CalleeSavedInfo, 18> G8Regs;
2096   SmallVector<CalleeSavedInfo, 18> FPRegs;
2097   SmallVector<CalleeSavedInfo, 18> VRegs;
2098 
2099   for (const CalleeSavedInfo &I : CSI) {
2100     Register Reg = I.getReg();
2101     assert((!MF.getInfo<PPCFunctionInfo>()->mustSaveTOC() ||
2102             (Reg != PPC::X2 && Reg != PPC::R2)) &&
2103            "Not expecting to try to spill R2 in a function that must save TOC");
2104     if (PPC::GPRCRegClass.contains(Reg)) {
2105       HasGPSaveArea = true;
2106 
2107       GPRegs.push_back(I);
2108 
2109       if (Reg < MinGPR) {
2110         MinGPR = Reg;
2111       }
2112     } else if (PPC::G8RCRegClass.contains(Reg)) {
2113       HasG8SaveArea = true;
2114 
2115       G8Regs.push_back(I);
2116 
2117       if (Reg < MinG8R) {
2118         MinG8R = Reg;
2119       }
2120     } else if (PPC::F8RCRegClass.contains(Reg)) {
2121       HasFPSaveArea = true;
2122 
2123       FPRegs.push_back(I);
2124 
2125       if (Reg < MinFPR) {
2126         MinFPR = Reg;
2127       }
2128     } else if (PPC::CRBITRCRegClass.contains(Reg) ||
2129                PPC::CRRCRegClass.contains(Reg)) {
2130       ; // do nothing, as we already know whether CRs are spilled
2131     } else if (PPC::VRRCRegClass.contains(Reg) ||
2132                PPC::SPERCRegClass.contains(Reg)) {
2133       // Altivec and SPE are mutually exclusive, but have the same stack
2134       // alignment requirements, so overload the save area for both cases.
2135       HasVRSaveArea = true;
2136 
2137       VRegs.push_back(I);
2138 
2139       if (Reg < MinVR) {
2140         MinVR = Reg;
2141       }
2142     } else {
2143       llvm_unreachable("Unknown RegisterClass!");
2144     }
2145   }
2146 
2147   PPCFunctionInfo *PFI = MF.getInfo<PPCFunctionInfo>();
2148   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2149 
2150   int64_t LowerBound = 0;
2151 
2152   // Take into account stack space reserved for tail calls.
2153   int TCSPDelta = 0;
2154   if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2155       (TCSPDelta = PFI->getTailCallSPDelta()) < 0) {
2156     LowerBound = TCSPDelta;
2157   }
2158 
2159   // The Floating-point register save area is right below the back chain word
2160   // of the previous stack frame.
2161   if (HasFPSaveArea) {
2162     for (unsigned i = 0, e = FPRegs.size(); i != e; ++i) {
2163       int FI = FPRegs[i].getFrameIdx();
2164 
2165       MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2166     }
2167 
2168     LowerBound -= (31 - TRI->getEncodingValue(MinFPR) + 1) * 8;
2169   }
2170 
2171   // Check whether the frame pointer register is allocated. If so, make sure it
2172   // is spilled to the correct offset.
2173   if (needsFP(MF)) {
2174     int FI = PFI->getFramePointerSaveIndex();
2175     assert(FI && "No Frame Pointer Save Slot!");
2176     MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2177     // FP is R31/X31, so no need to update MinGPR/MinG8R.
2178     HasGPSaveArea = true;
2179   }
2180 
2181   if (PFI->usesPICBase()) {
2182     int FI = PFI->getPICBasePointerSaveIndex();
2183     assert(FI && "No PIC Base Pointer Save Slot!");
2184     MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2185 
2186     MinGPR = std::min<unsigned>(MinGPR, PPC::R30);
2187     HasGPSaveArea = true;
2188   }
2189 
2190   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
2191   if (RegInfo->hasBasePointer(MF)) {
2192     int FI = PFI->getBasePointerSaveIndex();
2193     assert(FI && "No Base Pointer Save Slot!");
2194     MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2195 
2196     Register BP = RegInfo->getBaseRegister(MF);
2197     if (PPC::G8RCRegClass.contains(BP)) {
2198       MinG8R = std::min<unsigned>(MinG8R, BP);
2199       HasG8SaveArea = true;
2200     } else if (PPC::GPRCRegClass.contains(BP)) {
2201       MinGPR = std::min<unsigned>(MinGPR, BP);
2202       HasGPSaveArea = true;
2203     }
2204   }
2205 
2206   // General register save area starts right below the Floating-point
2207   // register save area.
2208   if (HasGPSaveArea || HasG8SaveArea) {
2209     // Move general register save area spill slots down, taking into account
2210     // the size of the Floating-point register save area.
2211     for (unsigned i = 0, e = GPRegs.size(); i != e; ++i) {
2212       if (!GPRegs[i].isSpilledToReg()) {
2213         int FI = GPRegs[i].getFrameIdx();
2214         MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2215       }
2216     }
2217 
2218     // Move general register save area spill slots down, taking into account
2219     // the size of the Floating-point register save area.
2220     for (unsigned i = 0, e = G8Regs.size(); i != e; ++i) {
2221       if (!G8Regs[i].isSpilledToReg()) {
2222         int FI = G8Regs[i].getFrameIdx();
2223         MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2224       }
2225     }
2226 
2227     unsigned MinReg =
2228       std::min<unsigned>(TRI->getEncodingValue(MinGPR),
2229                          TRI->getEncodingValue(MinG8R));
2230 
2231     const unsigned GPRegSize = Subtarget.isPPC64() ? 8 : 4;
2232     LowerBound -= (31 - MinReg + 1) * GPRegSize;
2233   }
2234 
2235   // For 32-bit only, the CR save area is below the general register
2236   // save area.  For 64-bit SVR4, the CR save area is addressed relative
2237   // to the stack pointer and hence does not need an adjustment here.
2238   // Only CR2 (the first nonvolatile spilled) has an associated frame
2239   // index so that we have a single uniform save area.
2240   if (spillsCR(MF) && Subtarget.is32BitELFABI()) {
2241     // Adjust the frame index of the CR spill slot.
2242     for (const auto &CSInfo : CSI) {
2243       if (CSInfo.getReg() == PPC::CR2) {
2244         int FI = CSInfo.getFrameIdx();
2245         MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2246         break;
2247       }
2248     }
2249 
2250     LowerBound -= 4; // The CR save area is always 4 bytes long.
2251   }
2252 
2253   // Both Altivec and SPE have the same alignment and padding requirements
2254   // within the stack frame.
2255   if (HasVRSaveArea) {
2256     // Insert alignment padding, we need 16-byte alignment. Note: for positive
2257     // number the alignment formula is : y = (x + (n-1)) & (~(n-1)). But since
2258     // we are using negative number here (the stack grows downward). We should
2259     // use formula : y = x & (~(n-1)). Where x is the size before aligning, n
2260     // is the alignment size ( n = 16 here) and y is the size after aligning.
2261     assert(LowerBound <= 0 && "Expect LowerBound have a non-positive value!");
2262     LowerBound &= ~(15);
2263 
2264     for (unsigned i = 0, e = VRegs.size(); i != e; ++i) {
2265       int FI = VRegs[i].getFrameIdx();
2266 
2267       MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2268     }
2269   }
2270 
2271   addScavengingSpillSlot(MF, RS);
2272 }
2273 
2274 void
2275 PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF,
2276                                          RegScavenger *RS) const {
2277   // Reserve a slot closest to SP or frame pointer if we have a dynalloc or
2278   // a large stack, which will require scavenging a register to materialize a
2279   // large offset.
2280 
2281   // We need to have a scavenger spill slot for spills if the frame size is
2282   // large. In case there is no free register for large-offset addressing,
2283   // this slot is used for the necessary emergency spill. Also, we need the
2284   // slot for dynamic stack allocations.
2285 
2286   // The scavenger might be invoked if the frame offset does not fit into
2287   // the 16-bit immediate. We don't know the complete frame size here
2288   // because we've not yet computed callee-saved register spills or the
2289   // needed alignment padding.
2290   unsigned StackSize = determineFrameLayout(MF, true);
2291   MachineFrameInfo &MFI = MF.getFrameInfo();
2292   if (MFI.hasVarSizedObjects() || spillsCR(MF) || hasNonRISpills(MF) ||
2293       (hasSpills(MF) && !isInt<16>(StackSize))) {
2294     const TargetRegisterClass &GPRC = PPC::GPRCRegClass;
2295     const TargetRegisterClass &G8RC = PPC::G8RCRegClass;
2296     const TargetRegisterClass &RC = Subtarget.isPPC64() ? G8RC : GPRC;
2297     const TargetRegisterInfo &TRI = *Subtarget.getRegisterInfo();
2298     unsigned Size = TRI.getSpillSize(RC);
2299     Align Alignment = TRI.getSpillAlign(RC);
2300     RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Alignment, false));
2301 
2302     // Might we have over-aligned allocas?
2303     bool HasAlVars =
2304         MFI.hasVarSizedObjects() && MFI.getMaxAlign() > getStackAlign();
2305 
2306     // These kinds of spills might need two registers.
2307     if (spillsCR(MF) || HasAlVars)
2308       RS->addScavengingFrameIndex(
2309           MFI.CreateStackObject(Size, Alignment, false));
2310   }
2311 }
2312 
2313 // This function checks if a callee saved gpr can be spilled to a volatile
2314 // vector register. This occurs for leaf functions when the option
2315 // ppc-enable-pe-vector-spills is enabled. If there are any remaining registers
2316 // which were not spilled to vectors, return false so the target independent
2317 // code can handle them by assigning a FrameIdx to a stack slot.
2318 bool PPCFrameLowering::assignCalleeSavedSpillSlots(
2319     MachineFunction &MF, const TargetRegisterInfo *TRI,
2320     std::vector<CalleeSavedInfo> &CSI) const {
2321 
2322   if (CSI.empty())
2323     return true; // Early exit if no callee saved registers are modified!
2324 
2325   // Early exit if cannot spill gprs to volatile vector registers.
2326   MachineFrameInfo &MFI = MF.getFrameInfo();
2327   if (!EnablePEVectorSpills || MFI.hasCalls() || !Subtarget.hasP9Vector())
2328     return false;
2329 
2330   // Build a BitVector of VSRs that can be used for spilling GPRs.
2331   BitVector BVAllocatable = TRI->getAllocatableSet(MF);
2332   BitVector BVCalleeSaved(TRI->getNumRegs());
2333   const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
2334   const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
2335   for (unsigned i = 0; CSRegs[i]; ++i)
2336     BVCalleeSaved.set(CSRegs[i]);
2337 
2338   for (unsigned Reg : BVAllocatable.set_bits()) {
2339     // Set to 0 if the register is not a volatile VSX register, or if it is
2340     // used in the function.
2341     if (BVCalleeSaved[Reg] || !PPC::VSRCRegClass.contains(Reg) ||
2342         MF.getRegInfo().isPhysRegUsed(Reg))
2343       BVAllocatable.reset(Reg);
2344   }
2345 
2346   bool AllSpilledToReg = true;
2347   unsigned LastVSRUsedForSpill = 0;
2348   for (auto &CS : CSI) {
2349     if (BVAllocatable.none())
2350       return false;
2351 
2352     Register Reg = CS.getReg();
2353 
2354     if (!PPC::G8RCRegClass.contains(Reg)) {
2355       AllSpilledToReg = false;
2356       continue;
2357     }
2358 
2359     // For P9, we can reuse LastVSRUsedForSpill to spill two GPRs
2360     // into one VSR using the mtvsrdd instruction.
2361     if (LastVSRUsedForSpill != 0) {
2362       CS.setDstReg(LastVSRUsedForSpill);
2363       BVAllocatable.reset(LastVSRUsedForSpill);
2364       LastVSRUsedForSpill = 0;
2365       continue;
2366     }
2367 
2368     unsigned VolatileVFReg = BVAllocatable.find_first();
2369     if (VolatileVFReg < BVAllocatable.size()) {
2370       CS.setDstReg(VolatileVFReg);
2371       LastVSRUsedForSpill = VolatileVFReg;
2372     } else {
2373       AllSpilledToReg = false;
2374     }
2375   }
2376   return AllSpilledToReg;
2377 }
2378 
2379 bool PPCFrameLowering::spillCalleeSavedRegisters(
2380     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2381     ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2382 
2383   MachineFunction *MF = MBB.getParent();
2384   const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
2385   PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>();
2386   bool MustSaveTOC = FI->mustSaveTOC();
2387   DebugLoc DL;
2388   bool CRSpilled = false;
2389   MachineInstrBuilder CRMIB;
2390   BitVector Spilled(TRI->getNumRegs());
2391 
2392   VSRContainingGPRs.clear();
2393 
2394   // Map each VSR to GPRs to be spilled with into it. Single VSR can contain one
2395   // or two GPRs, so we need table to record information for later save/restore.
2396   llvm::for_each(CSI, [&](const CalleeSavedInfo &Info) {
2397     if (Info.isSpilledToReg()) {
2398       auto &SpilledVSR =
2399           VSRContainingGPRs.FindAndConstruct(Info.getDstReg()).second;
2400       assert(SpilledVSR.second == 0 &&
2401              "Can't spill more than two GPRs into VSR!");
2402       if (SpilledVSR.first == 0)
2403         SpilledVSR.first = Info.getReg();
2404       else
2405         SpilledVSR.second = Info.getReg();
2406     }
2407   });
2408 
2409   for (const CalleeSavedInfo &I : CSI) {
2410     Register Reg = I.getReg();
2411 
2412     // CR2 through CR4 are the nonvolatile CR fields.
2413     bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4;
2414 
2415     // Add the callee-saved register as live-in; it's killed at the spill.
2416     // Do not do this for callee-saved registers that are live-in to the
2417     // function because they will already be marked live-in and this will be
2418     // adding it for a second time. It is an error to add the same register
2419     // to the set more than once.
2420     const MachineRegisterInfo &MRI = MF->getRegInfo();
2421     bool IsLiveIn = MRI.isLiveIn(Reg);
2422     if (!IsLiveIn)
2423        MBB.addLiveIn(Reg);
2424 
2425     if (CRSpilled && IsCRField) {
2426       CRMIB.addReg(Reg, RegState::ImplicitKill);
2427       continue;
2428     }
2429 
2430     // The actual spill will happen in the prologue.
2431     if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
2432       continue;
2433 
2434     // Insert the spill to the stack frame.
2435     if (IsCRField) {
2436       PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>();
2437       if (!Subtarget.is32BitELFABI()) {
2438         // The actual spill will happen at the start of the prologue.
2439         FuncInfo->addMustSaveCR(Reg);
2440       } else {
2441         CRSpilled = true;
2442         FuncInfo->setSpillsCR();
2443 
2444         // 32-bit:  FP-relative.  Note that we made sure CR2-CR4 all have
2445         // the same frame index in PPCRegisterInfo::hasReservedSpillSlot.
2446         CRMIB = BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12)
2447                   .addReg(Reg, RegState::ImplicitKill);
2448 
2449         MBB.insert(MI, CRMIB);
2450         MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::STW))
2451                                          .addReg(PPC::R12,
2452                                                  getKillRegState(true)),
2453                                          I.getFrameIdx()));
2454       }
2455     } else {
2456       if (I.isSpilledToReg()) {
2457         unsigned Dst = I.getDstReg();
2458 
2459         if (Spilled[Dst])
2460           continue;
2461 
2462         if (VSRContainingGPRs[Dst].second != 0) {
2463           assert(Subtarget.hasP9Vector() &&
2464                  "mtvsrdd is unavailable on pre-P9 targets.");
2465 
2466           NumPESpillVSR += 2;
2467           BuildMI(MBB, MI, DL, TII.get(PPC::MTVSRDD), Dst)
2468               .addReg(VSRContainingGPRs[Dst].first, getKillRegState(true))
2469               .addReg(VSRContainingGPRs[Dst].second, getKillRegState(true));
2470         } else if (VSRContainingGPRs[Dst].second == 0) {
2471           assert(Subtarget.hasP8Vector() &&
2472                  "Can't move GPR to VSR on pre-P8 targets.");
2473 
2474           ++NumPESpillVSR;
2475           BuildMI(MBB, MI, DL, TII.get(PPC::MTVSRD),
2476                   TRI->getSubReg(Dst, PPC::sub_64))
2477               .addReg(VSRContainingGPRs[Dst].first, getKillRegState(true));
2478         } else {
2479           llvm_unreachable("More than two GPRs spilled to a VSR!");
2480         }
2481         Spilled.set(Dst);
2482       } else {
2483         const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2484         // Use !IsLiveIn for the kill flag.
2485         // We do not want to kill registers that are live in this function
2486         // before their use because they will become undefined registers.
2487         // Functions without NoUnwind need to preserve the order of elements in
2488         // saved vector registers.
2489         if (Subtarget.needsSwapsForVSXMemOps() &&
2490             !MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
2491           TII.storeRegToStackSlotNoUpd(MBB, MI, Reg, !IsLiveIn,
2492                                        I.getFrameIdx(), RC, TRI);
2493         else
2494           TII.storeRegToStackSlot(MBB, MI, Reg, !IsLiveIn, I.getFrameIdx(),
2495                                   RC, TRI);
2496       }
2497     }
2498   }
2499   return true;
2500 }
2501 
2502 static void restoreCRs(bool is31, bool CR2Spilled, bool CR3Spilled,
2503                        bool CR4Spilled, MachineBasicBlock &MBB,
2504                        MachineBasicBlock::iterator MI,
2505                        ArrayRef<CalleeSavedInfo> CSI, unsigned CSIIndex) {
2506 
2507   MachineFunction *MF = MBB.getParent();
2508   const PPCInstrInfo &TII = *MF->getSubtarget<PPCSubtarget>().getInstrInfo();
2509   DebugLoc DL;
2510   unsigned MoveReg = PPC::R12;
2511 
2512   // 32-bit:  FP-relative
2513   MBB.insert(MI,
2514              addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ), MoveReg),
2515                                CSI[CSIIndex].getFrameIdx()));
2516 
2517   unsigned RestoreOp = PPC::MTOCRF;
2518   if (CR2Spilled)
2519     MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR2)
2520                .addReg(MoveReg, getKillRegState(!CR3Spilled && !CR4Spilled)));
2521 
2522   if (CR3Spilled)
2523     MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR3)
2524                .addReg(MoveReg, getKillRegState(!CR4Spilled)));
2525 
2526   if (CR4Spilled)
2527     MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR4)
2528                .addReg(MoveReg, getKillRegState(true)));
2529 }
2530 
2531 MachineBasicBlock::iterator PPCFrameLowering::
2532 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
2533                               MachineBasicBlock::iterator I) const {
2534   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2535   if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2536       I->getOpcode() == PPC::ADJCALLSTACKUP) {
2537     // Add (actually subtract) back the amount the callee popped on return.
2538     if (int CalleeAmt =  I->getOperand(1).getImm()) {
2539       bool is64Bit = Subtarget.isPPC64();
2540       CalleeAmt *= -1;
2541       unsigned StackReg = is64Bit ? PPC::X1 : PPC::R1;
2542       unsigned TmpReg = is64Bit ? PPC::X0 : PPC::R0;
2543       unsigned ADDIInstr = is64Bit ? PPC::ADDI8 : PPC::ADDI;
2544       unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4;
2545       unsigned LISInstr = is64Bit ? PPC::LIS8 : PPC::LIS;
2546       unsigned ORIInstr = is64Bit ? PPC::ORI8 : PPC::ORI;
2547       const DebugLoc &dl = I->getDebugLoc();
2548 
2549       if (isInt<16>(CalleeAmt)) {
2550         BuildMI(MBB, I, dl, TII.get(ADDIInstr), StackReg)
2551           .addReg(StackReg, RegState::Kill)
2552           .addImm(CalleeAmt);
2553       } else {
2554         MachineBasicBlock::iterator MBBI = I;
2555         BuildMI(MBB, MBBI, dl, TII.get(LISInstr), TmpReg)
2556           .addImm(CalleeAmt >> 16);
2557         BuildMI(MBB, MBBI, dl, TII.get(ORIInstr), TmpReg)
2558           .addReg(TmpReg, RegState::Kill)
2559           .addImm(CalleeAmt & 0xFFFF);
2560         BuildMI(MBB, MBBI, dl, TII.get(ADDInstr), StackReg)
2561           .addReg(StackReg, RegState::Kill)
2562           .addReg(TmpReg);
2563       }
2564     }
2565   }
2566   // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
2567   return MBB.erase(I);
2568 }
2569 
2570 static bool isCalleeSavedCR(unsigned Reg) {
2571   return PPC::CR2 == Reg || Reg == PPC::CR3 || Reg == PPC::CR4;
2572 }
2573 
2574 bool PPCFrameLowering::restoreCalleeSavedRegisters(
2575     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2576     MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2577   MachineFunction *MF = MBB.getParent();
2578   const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
2579   PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>();
2580   bool MustSaveTOC = FI->mustSaveTOC();
2581   bool CR2Spilled = false;
2582   bool CR3Spilled = false;
2583   bool CR4Spilled = false;
2584   unsigned CSIIndex = 0;
2585   BitVector Restored(TRI->getNumRegs());
2586 
2587   // Initialize insertion-point logic; we will be restoring in reverse
2588   // order of spill.
2589   MachineBasicBlock::iterator I = MI, BeforeI = I;
2590   bool AtStart = I == MBB.begin();
2591 
2592   if (!AtStart)
2593     --BeforeI;
2594 
2595   for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2596     Register Reg = CSI[i].getReg();
2597 
2598     if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
2599       continue;
2600 
2601     // Restore of callee saved condition register field is handled during
2602     // epilogue insertion.
2603     if (isCalleeSavedCR(Reg) && !Subtarget.is32BitELFABI())
2604       continue;
2605 
2606     if (Reg == PPC::CR2) {
2607       CR2Spilled = true;
2608       // The spill slot is associated only with CR2, which is the
2609       // first nonvolatile spilled.  Save it here.
2610       CSIIndex = i;
2611       continue;
2612     } else if (Reg == PPC::CR3) {
2613       CR3Spilled = true;
2614       continue;
2615     } else if (Reg == PPC::CR4) {
2616       CR4Spilled = true;
2617       continue;
2618     } else {
2619       // On 32-bit ELF when we first encounter a non-CR register after seeing at
2620       // least one CR register, restore all spilled CRs together.
2621       if (CR2Spilled || CR3Spilled || CR4Spilled) {
2622         bool is31 = needsFP(*MF);
2623         restoreCRs(is31, CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI,
2624                    CSIIndex);
2625         CR2Spilled = CR3Spilled = CR4Spilled = false;
2626       }
2627 
2628       if (CSI[i].isSpilledToReg()) {
2629         DebugLoc DL;
2630         unsigned Dst = CSI[i].getDstReg();
2631 
2632         if (Restored[Dst])
2633           continue;
2634 
2635         if (VSRContainingGPRs[Dst].second != 0) {
2636           assert(Subtarget.hasP9Vector());
2637           NumPEReloadVSR += 2;
2638           BuildMI(MBB, I, DL, TII.get(PPC::MFVSRLD),
2639                   VSRContainingGPRs[Dst].second)
2640               .addReg(Dst);
2641           BuildMI(MBB, I, DL, TII.get(PPC::MFVSRD),
2642                   VSRContainingGPRs[Dst].first)
2643               .addReg(TRI->getSubReg(Dst, PPC::sub_64), getKillRegState(true));
2644         } else if (VSRContainingGPRs[Dst].second == 0) {
2645           assert(Subtarget.hasP8Vector());
2646           ++NumPEReloadVSR;
2647           BuildMI(MBB, I, DL, TII.get(PPC::MFVSRD),
2648                   VSRContainingGPRs[Dst].first)
2649               .addReg(TRI->getSubReg(Dst, PPC::sub_64), getKillRegState(true));
2650         } else {
2651           llvm_unreachable("More than two GPRs spilled to a VSR!");
2652         }
2653 
2654         Restored.set(Dst);
2655 
2656       } else {
2657        // Default behavior for non-CR saves.
2658         const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2659 
2660         // Functions without NoUnwind need to preserve the order of elements in
2661         // saved vector registers.
2662         if (Subtarget.needsSwapsForVSXMemOps() &&
2663             !MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
2664           TII.loadRegFromStackSlotNoUpd(MBB, I, Reg, CSI[i].getFrameIdx(), RC,
2665                                         TRI);
2666         else
2667           TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI);
2668 
2669         assert(I != MBB.begin() &&
2670                "loadRegFromStackSlot didn't insert any code!");
2671       }
2672     }
2673 
2674     // Insert in reverse order.
2675     if (AtStart)
2676       I = MBB.begin();
2677     else {
2678       I = BeforeI;
2679       ++I;
2680     }
2681   }
2682 
2683   // If we haven't yet spilled the CRs, do so now.
2684   if (CR2Spilled || CR3Spilled || CR4Spilled) {
2685     assert(Subtarget.is32BitELFABI() &&
2686            "Only set CR[2|3|4]Spilled on 32-bit SVR4.");
2687     bool is31 = needsFP(*MF);
2688     restoreCRs(is31, CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI, CSIIndex);
2689   }
2690 
2691   return true;
2692 }
2693 
2694 uint64_t PPCFrameLowering::getTOCSaveOffset() const {
2695   return TOCSaveOffset;
2696 }
2697 
2698 uint64_t PPCFrameLowering::getFramePointerSaveOffset() const {
2699   return FramePointerSaveOffset;
2700 }
2701 
2702 uint64_t PPCFrameLowering::getBasePointerSaveOffset() const {
2703   return BasePointerSaveOffset;
2704 }
2705 
2706 bool PPCFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
2707   if (MF.getInfo<PPCFunctionInfo>()->shrinkWrapDisabled())
2708     return false;
2709   return !MF.getSubtarget<PPCSubtarget>().is32BitELFABI();
2710 }
2711