1 //===-- PPCRegisterInfo.cpp - PowerPC Register Information ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the PowerPC implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PPCRegisterInfo.h"
15 #include "PPCFrameLowering.h"
16 #include "PPCInstrBuilder.h"
17 #include "PPCMachineFunctionInfo.h"
18 #include "PPCSubtarget.h"
19 #include "PPCTargetMachine.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineModuleInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/RegisterScavenging.h"
29 #include "llvm/CodeGen/TargetFrameLowering.h"
30 #include "llvm/CodeGen/TargetInstrInfo.h"
31 #include "llvm/IR/CallingConv.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Target/TargetMachine.h"
41 #include "llvm/Target/TargetOptions.h"
42 #include <cstdlib>
43 
44 using namespace llvm;
45 
46 #define DEBUG_TYPE "reginfo"
47 
48 #define GET_REGINFO_TARGET_DESC
49 #include "PPCGenRegisterInfo.inc"
50 
51 STATISTIC(InflateGPRC, "Number of gprc inputs for getLargestLegalClass");
52 STATISTIC(InflateGP8RC, "Number of g8rc inputs for getLargestLegalClass");
53 
54 static cl::opt<bool>
55 EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true),
56          cl::desc("Enable use of a base pointer for complex stack frames"));
57 
58 static cl::opt<bool>
59 AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false),
60          cl::desc("Force the use of a base pointer in every function"));
61 
62 static cl::opt<bool>
63 EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills", cl::Hidden, cl::init(false),
64          cl::desc("Enable spills from gpr to vsr rather than stack"));
65 
66 static cl::opt<bool>
67 StackPtrConst("ppc-stack-ptr-caller-preserved",
68                 cl::desc("Consider R1 caller preserved so stack saves of "
69                          "caller preserved registers can be LICM candidates"),
70                 cl::init(true), cl::Hidden);
71 
72 static cl::opt<unsigned>
73 MaxCRBitSpillDist("ppc-max-crbit-spill-dist",
74                   cl::desc("Maximum search distance for definition of CR bit "
75                            "spill on ppc"),
76                   cl::Hidden, cl::init(100));
77 
78 // Copies/moves of physical accumulators are expensive operations
79 // that should be avoided whenever possible. MMA instructions are
80 // meant to be used in performance-sensitive computational kernels.
81 // This option is provided, at least for the time being, to give the
82 // user a tool to detect this expensive operation and either rework
83 // their code or report a compiler bug if that turns out to be the
84 // cause.
85 #ifndef NDEBUG
86 static cl::opt<bool>
87 ReportAccMoves("ppc-report-acc-moves",
88                cl::desc("Emit information about accumulator register spills "
89                         "and copies"),
90                cl::Hidden, cl::init(false));
91 #endif
92 
93 static unsigned offsetMinAlignForOpcode(unsigned OpC);
94 
95 PPCRegisterInfo::PPCRegisterInfo(const PPCTargetMachine &TM)
96   : PPCGenRegisterInfo(TM.isPPC64() ? PPC::LR8 : PPC::LR,
97                        TM.isPPC64() ? 0 : 1,
98                        TM.isPPC64() ? 0 : 1),
99     TM(TM) {
100   ImmToIdxMap[PPC::LD]   = PPC::LDX;    ImmToIdxMap[PPC::STD]  = PPC::STDX;
101   ImmToIdxMap[PPC::LBZ]  = PPC::LBZX;   ImmToIdxMap[PPC::STB]  = PPC::STBX;
102   ImmToIdxMap[PPC::LHZ]  = PPC::LHZX;   ImmToIdxMap[PPC::LHA]  = PPC::LHAX;
103   ImmToIdxMap[PPC::LWZ]  = PPC::LWZX;   ImmToIdxMap[PPC::LWA]  = PPC::LWAX;
104   ImmToIdxMap[PPC::LFS]  = PPC::LFSX;   ImmToIdxMap[PPC::LFD]  = PPC::LFDX;
105   ImmToIdxMap[PPC::STH]  = PPC::STHX;   ImmToIdxMap[PPC::STW]  = PPC::STWX;
106   ImmToIdxMap[PPC::STFS] = PPC::STFSX;  ImmToIdxMap[PPC::STFD] = PPC::STFDX;
107   ImmToIdxMap[PPC::ADDI] = PPC::ADD4;
108   ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32;
109 
110   // 64-bit
111   ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8;
112   ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8;
113   ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8;
114   ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX;
115   ImmToIdxMap[PPC::ADDI8] = PPC::ADD8;
116 
117   // VSX
118   ImmToIdxMap[PPC::DFLOADf32] = PPC::LXSSPX;
119   ImmToIdxMap[PPC::DFLOADf64] = PPC::LXSDX;
120   ImmToIdxMap[PPC::SPILLTOVSR_LD] = PPC::SPILLTOVSR_LDX;
121   ImmToIdxMap[PPC::SPILLTOVSR_ST] = PPC::SPILLTOVSR_STX;
122   ImmToIdxMap[PPC::DFSTOREf32] = PPC::STXSSPX;
123   ImmToIdxMap[PPC::DFSTOREf64] = PPC::STXSDX;
124   ImmToIdxMap[PPC::LXV] = PPC::LXVX;
125   ImmToIdxMap[PPC::LXSD] = PPC::LXSDX;
126   ImmToIdxMap[PPC::LXSSP] = PPC::LXSSPX;
127   ImmToIdxMap[PPC::STXV] = PPC::STXVX;
128   ImmToIdxMap[PPC::STXSD] = PPC::STXSDX;
129   ImmToIdxMap[PPC::STXSSP] = PPC::STXSSPX;
130 
131   // SPE
132   ImmToIdxMap[PPC::EVLDD] = PPC::EVLDDX;
133   ImmToIdxMap[PPC::EVSTDD] = PPC::EVSTDDX;
134   ImmToIdxMap[PPC::SPESTW] = PPC::SPESTWX;
135   ImmToIdxMap[PPC::SPELWZ] = PPC::SPELWZX;
136 
137   // Power10
138   ImmToIdxMap[PPC::LXVP]   = PPC::LXVPX;
139   ImmToIdxMap[PPC::STXVP]  = PPC::STXVPX;
140   ImmToIdxMap[PPC::PLXVP]  = PPC::LXVPX;
141   ImmToIdxMap[PPC::PSTXVP] = PPC::STXVPX;
142 }
143 
144 /// getPointerRegClass - Return the register class to use to hold pointers.
145 /// This is used for addressing modes.
146 const TargetRegisterClass *
147 PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
148                                                                        const {
149   // Note that PPCInstrInfo::FoldImmediate also directly uses this Kind value
150   // when it checks for ZERO folding.
151   if (Kind == 1) {
152     if (TM.isPPC64())
153       return &PPC::G8RC_NOX0RegClass;
154     return &PPC::GPRC_NOR0RegClass;
155   }
156 
157   if (TM.isPPC64())
158     return &PPC::G8RCRegClass;
159   return &PPC::GPRCRegClass;
160 }
161 
162 const MCPhysReg*
163 PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
164   const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>();
165   if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) {
166     if (!TM.isPPC64() && Subtarget.isAIXABI())
167       report_fatal_error("AnyReg unimplemented on 32-bit AIX.");
168     if (Subtarget.hasVSX()) {
169       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
170         return CSR_64_AllRegs_AIX_Dflt_VSX_SaveList;
171       return CSR_64_AllRegs_VSX_SaveList;
172     }
173     if (Subtarget.hasAltivec()) {
174       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
175         return CSR_64_AllRegs_AIX_Dflt_Altivec_SaveList;
176       return CSR_64_AllRegs_Altivec_SaveList;
177     }
178     return CSR_64_AllRegs_SaveList;
179   }
180 
181   // On PPC64, we might need to save r2 (but only if it is not reserved).
182   // We do not need to treat R2 as callee-saved when using PC-Relative calls
183   // because any direct uses of R2 will cause it to be reserved. If the function
184   // is a leaf or the only uses of R2 are implicit uses for calls, the calls
185   // will use the @notoc relocation which will cause this function to set the
186   // st_other bit to 1, thereby communicating to its caller that it arbitrarily
187   // clobbers the TOC.
188   bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2) &&
189                 !Subtarget.isUsingPCRelativeCalls();
190 
191   // Cold calling convention CSRs.
192   if (MF->getFunction().getCallingConv() == CallingConv::Cold) {
193     if (Subtarget.isAIXABI())
194       report_fatal_error("Cold calling unimplemented on AIX.");
195     if (TM.isPPC64()) {
196       if (Subtarget.hasAltivec())
197         return SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList
198                       : CSR_SVR64_ColdCC_Altivec_SaveList;
199       return SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList
200                     : CSR_SVR64_ColdCC_SaveList;
201     }
202     // 32-bit targets.
203     if (Subtarget.hasAltivec())
204       return CSR_SVR32_ColdCC_Altivec_SaveList;
205     else if (Subtarget.hasSPE())
206       return CSR_SVR32_ColdCC_SPE_SaveList;
207     return CSR_SVR32_ColdCC_SaveList;
208   }
209   // Standard calling convention CSRs.
210   if (TM.isPPC64()) {
211     if (Subtarget.hasAltivec() &&
212         (!Subtarget.isAIXABI() || TM.getAIXExtendedAltivecABI())) {
213       return SaveR2 ? CSR_PPC64_R2_Altivec_SaveList
214                     : CSR_PPC64_Altivec_SaveList;
215     }
216     return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList;
217   }
218   // 32-bit targets.
219   if (Subtarget.isAIXABI()) {
220     if (Subtarget.hasAltivec())
221       return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_Altivec_SaveList
222                                            : CSR_AIX32_SaveList;
223     return CSR_AIX32_SaveList;
224   }
225   if (Subtarget.hasAltivec())
226     return CSR_SVR432_Altivec_SaveList;
227   else if (Subtarget.hasSPE())
228     return CSR_SVR432_SPE_SaveList;
229   return CSR_SVR432_SaveList;
230 }
231 
232 const uint32_t *
233 PPCRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
234                                       CallingConv::ID CC) const {
235   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
236   if (CC == CallingConv::AnyReg) {
237     if (Subtarget.hasVSX()) {
238       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
239         return CSR_64_AllRegs_AIX_Dflt_VSX_RegMask;
240       return CSR_64_AllRegs_VSX_RegMask;
241     }
242     if (Subtarget.hasAltivec()) {
243       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
244         return CSR_64_AllRegs_AIX_Dflt_Altivec_RegMask;
245       return CSR_64_AllRegs_Altivec_RegMask;
246     }
247     return CSR_64_AllRegs_RegMask;
248   }
249 
250   if (Subtarget.isAIXABI()) {
251     return TM.isPPC64()
252                ? ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
253                       ? CSR_PPC64_Altivec_RegMask
254                       : CSR_PPC64_RegMask)
255                : ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
256                       ? CSR_AIX32_Altivec_RegMask
257                       : CSR_AIX32_RegMask);
258   }
259 
260   if (CC == CallingConv::Cold) {
261     return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask
262                                                   : CSR_SVR64_ColdCC_RegMask)
263                         : (Subtarget.hasAltivec() ? CSR_SVR32_ColdCC_Altivec_RegMask
264                                                   : (Subtarget.hasSPE()
265                                                   ? CSR_SVR32_ColdCC_SPE_RegMask
266                                                   : CSR_SVR32_ColdCC_RegMask));
267   }
268 
269   return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_PPC64_Altivec_RegMask
270                                                 : CSR_PPC64_RegMask)
271                       : (Subtarget.hasAltivec()
272                              ? CSR_SVR432_Altivec_RegMask
273                              : (Subtarget.hasSPE() ? CSR_SVR432_SPE_RegMask
274                                                    : CSR_SVR432_RegMask));
275 }
276 
277 const uint32_t*
278 PPCRegisterInfo::getNoPreservedMask() const {
279   return CSR_NoRegs_RegMask;
280 }
281 
282 void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
283   for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM})
284     Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32));
285 }
286 
287 BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
288   BitVector Reserved(getNumRegs());
289   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
290   const PPCFrameLowering *TFI = getFrameLowering(MF);
291 
292   // The ZERO register is not really a register, but the representation of r0
293   // when used in instructions that treat r0 as the constant 0.
294   markSuperRegs(Reserved, PPC::ZERO);
295 
296   // The FP register is also not really a register, but is the representation
297   // of the frame pointer register used by ISD::FRAMEADDR.
298   markSuperRegs(Reserved, PPC::FP);
299 
300   // The BP register is also not really a register, but is the representation
301   // of the base pointer register used by setjmp.
302   markSuperRegs(Reserved, PPC::BP);
303 
304   // The counter registers must be reserved so that counter-based loops can
305   // be correctly formed (and the mtctr instructions are not DCE'd).
306   markSuperRegs(Reserved, PPC::CTR);
307   markSuperRegs(Reserved, PPC::CTR8);
308 
309   markSuperRegs(Reserved, PPC::R1);
310   markSuperRegs(Reserved, PPC::LR);
311   markSuperRegs(Reserved, PPC::LR8);
312   markSuperRegs(Reserved, PPC::RM);
313 
314   markSuperRegs(Reserved, PPC::VRSAVE);
315 
316   // The SVR4 ABI reserves r2 and r13
317   if (Subtarget.isSVR4ABI()) {
318     // We only reserve r2 if we need to use the TOC pointer. If we have no
319     // explicit uses of the TOC pointer (meaning we're a leaf function with
320     // no constant-pool loads, etc.) and we have no potential uses inside an
321     // inline asm block, then we can treat r2 has an ordinary callee-saved
322     // register.
323     const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
324     if (!TM.isPPC64() || FuncInfo->usesTOCBasePtr() || MF.hasInlineAsm())
325       markSuperRegs(Reserved, PPC::R2);  // System-reserved register
326     markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register
327   }
328 
329   // Always reserve r2 on AIX for now.
330   // TODO: Make r2 allocatable on AIX/XCOFF for some leaf functions.
331   if (Subtarget.isAIXABI())
332     markSuperRegs(Reserved, PPC::R2);  // System-reserved register
333 
334   // On PPC64, r13 is the thread pointer. Never allocate this register.
335   if (TM.isPPC64())
336     markSuperRegs(Reserved, PPC::R13);
337 
338   if (TFI->needsFP(MF))
339     markSuperRegs(Reserved, PPC::R31);
340 
341   bool IsPositionIndependent = TM.isPositionIndependent();
342   if (hasBasePointer(MF)) {
343     if (Subtarget.is32BitELFABI() && IsPositionIndependent)
344       markSuperRegs(Reserved, PPC::R29);
345     else
346       markSuperRegs(Reserved, PPC::R30);
347   }
348 
349   if (Subtarget.is32BitELFABI() && IsPositionIndependent)
350     markSuperRegs(Reserved, PPC::R30);
351 
352   // Reserve Altivec registers when Altivec is unavailable.
353   if (!Subtarget.hasAltivec())
354     for (TargetRegisterClass::iterator I = PPC::VRRCRegClass.begin(),
355          IE = PPC::VRRCRegClass.end(); I != IE; ++I)
356       markSuperRegs(Reserved, *I);
357 
358   if (Subtarget.isAIXABI() && Subtarget.hasAltivec() &&
359       !TM.getAIXExtendedAltivecABI()) {
360     //  In the AIX default Altivec ABI, vector registers VR20-VR31 are reserved
361     //  and cannot be used.
362     for (auto Reg : CSR_Altivec_SaveList) {
363       if (Reg == 0)
364         break;
365       markSuperRegs(Reserved, Reg);
366       for (MCRegAliasIterator AS(Reg, this, true); AS.isValid(); ++AS) {
367         Reserved.set(*AS);
368       }
369     }
370   }
371 
372   assert(checkAllSuperRegsMarked(Reserved));
373   return Reserved;
374 }
375 
376 bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
377   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
378   const PPCInstrInfo *InstrInfo =  Subtarget.getInstrInfo();
379   const MachineFrameInfo &MFI = MF.getFrameInfo();
380   const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo();
381 
382   LLVM_DEBUG(dbgs() << "requiresFrameIndexScavenging for " << MF.getName()
383                     << ".\n");
384   // If the callee saved info is invalid we have to default to true for safety.
385   if (!MFI.isCalleeSavedInfoValid()) {
386     LLVM_DEBUG(dbgs() << "TRUE - Invalid callee saved info.\n");
387     return true;
388   }
389 
390   // We will require the use of X-Forms because the frame is larger than what
391   // can be represented in signed 16 bits that fit in the immediate of a D-Form.
392   // If we need an X-Form then we need a register to store the address offset.
393   unsigned FrameSize = MFI.getStackSize();
394   // Signed 16 bits means that the FrameSize cannot be more than 15 bits.
395   if (FrameSize & ~0x7FFF) {
396     LLVM_DEBUG(dbgs() << "TRUE - Frame size is too large for D-Form.\n");
397     return true;
398   }
399 
400   // The callee saved info is valid so it can be traversed.
401   // Checking for registers that need saving that do not have load or store
402   // forms where the address offset is an immediate.
403   for (unsigned i = 0; i < Info.size(); i++) {
404     // If the spill is to a register no scavenging is required.
405     if (Info[i].isSpilledToReg())
406       continue;
407 
408     int FrIdx = Info[i].getFrameIdx();
409     unsigned Reg = Info[i].getReg();
410 
411     const TargetRegisterClass *RC = getMinimalPhysRegClass(Reg);
412     unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC);
413     if (!MFI.isFixedObjectIndex(FrIdx)) {
414       // This is not a fixed object. If it requires alignment then we may still
415       // need to use the XForm.
416       if (offsetMinAlignForOpcode(Opcode) > 1) {
417         LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
418                           << " for register " << printReg(Reg, this) << ".\n");
419         LLVM_DEBUG(dbgs() << "TRUE - Not fixed frame object that requires "
420                           << "alignment.\n");
421         return true;
422       }
423     }
424 
425     // This is eiher:
426     // 1) A fixed frame index object which we know are aligned so
427     // as long as we have a valid DForm/DSForm/DQForm (non XForm) we don't
428     // need to consider the alignment here.
429     // 2) A not fixed object but in that case we now know that the min required
430     // alignment is no more than 1 based on the previous check.
431     if (InstrInfo->isXFormMemOp(Opcode)) {
432       LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
433                         << " for register " << printReg(Reg, this) << ".\n");
434       LLVM_DEBUG(dbgs() << "TRUE - Memory operand is X-Form.\n");
435       return true;
436     }
437   }
438   LLVM_DEBUG(dbgs() << "FALSE - Scavenging is not required.\n");
439   return false;
440 }
441 
442 bool PPCRegisterInfo::requiresVirtualBaseRegisters(
443     const MachineFunction &MF) const {
444   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
445   // Do not use virtual base registers when ROP protection is turned on.
446   // Virtual base registers break the layout of the local variable space and may
447   // push the ROP Hash location past the 512 byte range of the ROP store
448   // instruction.
449   return !Subtarget.hasROPProtect();
450 }
451 
452 bool PPCRegisterInfo::isCallerPreservedPhysReg(MCRegister PhysReg,
453                                                const MachineFunction &MF) const {
454   assert(Register::isPhysicalRegister(PhysReg));
455   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
456   const MachineFrameInfo &MFI = MF.getFrameInfo();
457 
458   if (!Subtarget.is64BitELFABI() && !Subtarget.isAIXABI())
459     return false;
460   if (PhysReg == Subtarget.getTOCPointerRegister())
461     // X2/R2 is guaranteed to be preserved within a function if it is reserved.
462     // The reason it's reserved is that it's the TOC pointer (and the function
463     // uses the TOC). In functions where it isn't reserved (i.e. leaf functions
464     // with no TOC access), we can't claim that it is preserved.
465     return (getReservedRegs(MF).test(PhysReg));
466   if (StackPtrConst && PhysReg == Subtarget.getStackPointerRegister() &&
467       !MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment())
468     // The value of the stack pointer does not change within a function after
469     // the prologue and before the epilogue if there are no dynamic allocations
470     // and no inline asm which clobbers X1/R1.
471     return true;
472   return false;
473 }
474 
475 bool PPCRegisterInfo::getRegAllocationHints(Register VirtReg,
476                                             ArrayRef<MCPhysReg> Order,
477                                             SmallVectorImpl<MCPhysReg> &Hints,
478                                             const MachineFunction &MF,
479                                             const VirtRegMap *VRM,
480                                             const LiveRegMatrix *Matrix) const {
481   const MachineRegisterInfo *MRI = &MF.getRegInfo();
482 
483   // Call the base implementation first to set any hints based on the usual
484   // heuristics and decide what the return value should be. We want to return
485   // the same value returned by the base implementation. If the base
486   // implementation decides to return true and force the allocation then we
487   // will leave it as such. On the other hand if the base implementation
488   // decides to return false the following code will not force the allocation
489   // as we are just looking to provide a hint.
490   bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
491       VirtReg, Order, Hints, MF, VRM, Matrix);
492   // We are interested in instructions that copy values to ACC/UACC.
493   // The copy into UACC will be simply a COPY to a subreg so we
494   // want to allocate the corresponding physical subreg for the source.
495   // The copy into ACC will be a BUILD_UACC so we want to allocate
496   // the same number UACC for the source.
497   for (MachineInstr &Use : MRI->reg_nodbg_instructions(VirtReg)) {
498     const MachineOperand *ResultOp = nullptr;
499     Register ResultReg;
500     switch (Use.getOpcode()) {
501     case TargetOpcode::COPY: {
502       ResultOp = &Use.getOperand(0);
503       ResultReg = ResultOp->getReg();
504       if (Register::isVirtualRegister(ResultReg) &&
505           MRI->getRegClass(ResultReg)->contains(PPC::UACC0) &&
506           VRM->hasPhys(ResultReg)) {
507         Register UACCPhys = VRM->getPhys(ResultReg);
508         Register HintReg = getSubReg(UACCPhys, ResultOp->getSubReg());
509         // Ensure that the hint is a VSRp register.
510         if (HintReg >= PPC::VSRp0 && HintReg <= PPC::VSRp31)
511           Hints.push_back(HintReg);
512       }
513       break;
514     }
515     case PPC::BUILD_UACC: {
516       ResultOp = &Use.getOperand(0);
517       ResultReg = ResultOp->getReg();
518       if (MRI->getRegClass(ResultReg)->contains(PPC::ACC0) &&
519           VRM->hasPhys(ResultReg)) {
520         Register ACCPhys = VRM->getPhys(ResultReg);
521         assert((ACCPhys >= PPC::ACC0 && ACCPhys <= PPC::ACC7) &&
522                "Expecting an ACC register for BUILD_UACC.");
523         Register HintReg = PPC::UACC0 + (ACCPhys - PPC::ACC0);
524         Hints.push_back(HintReg);
525       }
526       break;
527     }
528     }
529   }
530   return BaseImplRetVal;
531 }
532 
533 unsigned PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
534                                               MachineFunction &MF) const {
535   const PPCFrameLowering *TFI = getFrameLowering(MF);
536   const unsigned DefaultSafety = 1;
537 
538   switch (RC->getID()) {
539   default:
540     return 0;
541   case PPC::G8RC_NOX0RegClassID:
542   case PPC::GPRC_NOR0RegClassID:
543   case PPC::SPERCRegClassID:
544   case PPC::G8RCRegClassID:
545   case PPC::GPRCRegClassID: {
546     unsigned FP = TFI->hasFP(MF) ? 1 : 0;
547     return 32 - FP - DefaultSafety;
548   }
549   case PPC::F4RCRegClassID:
550   case PPC::F8RCRegClassID:
551   case PPC::VSLRCRegClassID:
552     return 32 - DefaultSafety;
553   case PPC::VFRCRegClassID:
554   case PPC::VRRCRegClassID: {
555     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
556     // Vector registers VR20-VR31 are reserved and cannot be used in the default
557     // Altivec ABI on AIX.
558     if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
559       return 20 - DefaultSafety;
560   }
561     return 32 - DefaultSafety;
562   case PPC::VSFRCRegClassID:
563   case PPC::VSSRCRegClassID:
564   case PPC::VSRCRegClassID: {
565     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
566     if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
567       // Vector registers VR20-VR31 are reserved and cannot be used in the
568       // default Altivec ABI on AIX.
569       return 52 - DefaultSafety;
570   }
571     return 64 - DefaultSafety;
572   case PPC::CRRCRegClassID:
573     return 8 - DefaultSafety;
574   }
575 }
576 
577 const TargetRegisterClass *
578 PPCRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
579                                            const MachineFunction &MF) const {
580   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
581   const auto *DefaultSuperclass =
582       TargetRegisterInfo::getLargestLegalSuperClass(RC, MF);
583   if (Subtarget.hasVSX()) {
584     // With VSX, we can inflate various sub-register classes to the full VSX
585     // register set.
586 
587     // For Power9 we allow the user to enable GPR to vector spills.
588     // FIXME: Currently limited to spilling GP8RC. A follow on patch will add
589     // support to spill GPRC.
590     if (TM.isELFv2ABI() || Subtarget.isAIXABI()) {
591       if (Subtarget.hasP9Vector() && EnableGPRToVecSpills &&
592           RC == &PPC::G8RCRegClass) {
593         InflateGP8RC++;
594         return &PPC::SPILLTOVSRRCRegClass;
595       }
596       if (RC == &PPC::GPRCRegClass && EnableGPRToVecSpills)
597         InflateGPRC++;
598     }
599 
600     for (const auto *I = RC->getSuperClasses(); *I; ++I) {
601       if (getRegSizeInBits(**I) != getRegSizeInBits(*RC))
602         continue;
603 
604       switch ((*I)->getID()) {
605       case PPC::VSSRCRegClassID:
606         return Subtarget.hasP8Vector() ? *I : DefaultSuperclass;
607       case PPC::VSFRCRegClassID:
608       case PPC::VSRCRegClassID:
609         return *I;
610       case PPC::VSRpRCRegClassID:
611         return Subtarget.pairedVectorMemops() ? *I : DefaultSuperclass;
612       case PPC::ACCRCRegClassID:
613       case PPC::UACCRCRegClassID:
614         return Subtarget.hasMMA() ? *I : DefaultSuperclass;
615       }
616     }
617   }
618 
619   return DefaultSuperclass;
620 }
621 
622 //===----------------------------------------------------------------------===//
623 // Stack Frame Processing methods
624 //===----------------------------------------------------------------------===//
625 
626 /// lowerDynamicAlloc - Generate the code for allocating an object in the
627 /// current frame.  The sequence of code will be in the general form
628 ///
629 ///   addi   R0, SP, \#frameSize ; get the address of the previous frame
630 ///   stwxu  R0, SP, Rnegsize   ; add and update the SP with the negated size
631 ///   addi   Rnew, SP, \#maxCalFrameSize ; get the top of the allocation
632 ///
633 void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const {
634   // Get the instruction.
635   MachineInstr &MI = *II;
636   // Get the instruction's basic block.
637   MachineBasicBlock &MBB = *MI.getParent();
638   // Get the basic block's function.
639   MachineFunction &MF = *MBB.getParent();
640   // Get the frame info.
641   MachineFrameInfo &MFI = MF.getFrameInfo();
642   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
643   // Get the instruction info.
644   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
645   // Determine whether 64-bit pointers are used.
646   bool LP64 = TM.isPPC64();
647   DebugLoc dl = MI.getDebugLoc();
648 
649   // Get the maximum call stack size.
650   unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
651   Align MaxAlign = MFI.getMaxAlign();
652   assert(isAligned(MaxAlign, maxCallFrameSize) &&
653          "Maximum call-frame size not sufficiently aligned");
654   (void)MaxAlign;
655 
656   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
657   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
658   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
659   bool KillNegSizeReg = MI.getOperand(1).isKill();
660   Register NegSizeReg = MI.getOperand(1).getReg();
661 
662   prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, Reg);
663   // Grow the stack and update the stack pointer link, then determine the
664   // address of new allocated space.
665   if (LP64) {
666     BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1)
667         .addReg(Reg, RegState::Kill)
668         .addReg(PPC::X1)
669         .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
670     BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
671         .addReg(PPC::X1)
672         .addImm(maxCallFrameSize);
673   } else {
674     BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1)
675         .addReg(Reg, RegState::Kill)
676         .addReg(PPC::R1)
677         .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
678     BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
679         .addReg(PPC::R1)
680         .addImm(maxCallFrameSize);
681   }
682 
683   // Discard the DYNALLOC instruction.
684   MBB.erase(II);
685 }
686 
687 /// To accomplish dynamic stack allocation, we have to calculate exact size
688 /// subtracted from the stack pointer according alignment information and get
689 /// previous frame pointer.
690 void PPCRegisterInfo::prepareDynamicAlloca(MachineBasicBlock::iterator II,
691                                            Register &NegSizeReg,
692                                            bool &KillNegSizeReg,
693                                            Register &FramePointer) const {
694   // Get the instruction.
695   MachineInstr &MI = *II;
696   // Get the instruction's basic block.
697   MachineBasicBlock &MBB = *MI.getParent();
698   // Get the basic block's function.
699   MachineFunction &MF = *MBB.getParent();
700   // Get the frame info.
701   MachineFrameInfo &MFI = MF.getFrameInfo();
702   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
703   // Get the instruction info.
704   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
705   // Determine whether 64-bit pointers are used.
706   bool LP64 = TM.isPPC64();
707   DebugLoc dl = MI.getDebugLoc();
708   // Get the total frame size.
709   unsigned FrameSize = MFI.getStackSize();
710 
711   // Get stack alignments.
712   const PPCFrameLowering *TFI = getFrameLowering(MF);
713   Align TargetAlign = TFI->getStackAlign();
714   Align MaxAlign = MFI.getMaxAlign();
715 
716   // Determine the previous frame's address.  If FrameSize can't be
717   // represented as 16 bits or we need special alignment, then we load the
718   // previous frame's address from 0(SP).  Why not do an addis of the hi?
719   // Because R0 is our only safe tmp register and addi/addis treat R0 as zero.
720   // Constructing the constant and adding would take 3 instructions.
721   // Fortunately, a frame greater than 32K is rare.
722   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
723   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
724 
725   if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) {
726     if (LP64)
727       BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), FramePointer)
728           .addReg(PPC::X31)
729           .addImm(FrameSize);
730     else
731       BuildMI(MBB, II, dl, TII.get(PPC::ADDI), FramePointer)
732           .addReg(PPC::R31)
733           .addImm(FrameSize);
734   } else if (LP64) {
735     BuildMI(MBB, II, dl, TII.get(PPC::LD), FramePointer)
736         .addImm(0)
737         .addReg(PPC::X1);
738   } else {
739     BuildMI(MBB, II, dl, TII.get(PPC::LWZ), FramePointer)
740         .addImm(0)
741         .addReg(PPC::R1);
742   }
743   // Determine the actual NegSizeReg according to alignment info.
744   if (LP64) {
745     if (MaxAlign > TargetAlign) {
746       unsigned UnalNegSizeReg = NegSizeReg;
747       NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
748 
749       // Unfortunately, there is no andi, only andi., and we can't insert that
750       // here because we might clobber cr0 while it is live.
751       BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg)
752           .addImm(~(MaxAlign.value() - 1));
753 
754       unsigned NegSizeReg1 = NegSizeReg;
755       NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
756       BuildMI(MBB, II, dl, TII.get(PPC::AND8), NegSizeReg)
757           .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
758           .addReg(NegSizeReg1, RegState::Kill);
759       KillNegSizeReg = true;
760     }
761   } else {
762     if (MaxAlign > TargetAlign) {
763       unsigned UnalNegSizeReg = NegSizeReg;
764       NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
765 
766       // Unfortunately, there is no andi, only andi., and we can't insert that
767       // here because we might clobber cr0 while it is live.
768       BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg)
769           .addImm(~(MaxAlign.value() - 1));
770 
771       unsigned NegSizeReg1 = NegSizeReg;
772       NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
773       BuildMI(MBB, II, dl, TII.get(PPC::AND), NegSizeReg)
774           .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
775           .addReg(NegSizeReg1, RegState::Kill);
776       KillNegSizeReg = true;
777     }
778   }
779 }
780 
781 void PPCRegisterInfo::lowerPrepareProbedAlloca(
782     MachineBasicBlock::iterator II) const {
783   MachineInstr &MI = *II;
784   // Get the instruction's basic block.
785   MachineBasicBlock &MBB = *MI.getParent();
786   // Get the basic block's function.
787   MachineFunction &MF = *MBB.getParent();
788   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
789   // Get the instruction info.
790   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
791   // Determine whether 64-bit pointers are used.
792   bool LP64 = TM.isPPC64();
793   DebugLoc dl = MI.getDebugLoc();
794   Register FramePointer = MI.getOperand(0).getReg();
795   const Register ActualNegSizeReg = MI.getOperand(1).getReg();
796   bool KillNegSizeReg = MI.getOperand(2).isKill();
797   Register NegSizeReg = MI.getOperand(2).getReg();
798   const MCInstrDesc &CopyInst = TII.get(LP64 ? PPC::OR8 : PPC::OR);
799   // RegAllocator might allocate FramePointer and NegSizeReg in the same phyreg.
800   if (FramePointer == NegSizeReg) {
801     assert(KillNegSizeReg && "FramePointer is a def and NegSizeReg is an use, "
802                              "NegSizeReg should be killed");
803     // FramePointer is clobbered earlier than the use of NegSizeReg in
804     // prepareDynamicAlloca, save NegSizeReg in ActualNegSizeReg to avoid
805     // misuse.
806     BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
807         .addReg(NegSizeReg)
808         .addReg(NegSizeReg);
809     NegSizeReg = ActualNegSizeReg;
810     KillNegSizeReg = false;
811   }
812   prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, FramePointer);
813   // NegSizeReg might be updated in prepareDynamicAlloca if MaxAlign >
814   // TargetAlign.
815   if (NegSizeReg != ActualNegSizeReg)
816     BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
817         .addReg(NegSizeReg)
818         .addReg(NegSizeReg);
819   MBB.erase(II);
820 }
821 
822 void PPCRegisterInfo::lowerDynamicAreaOffset(
823     MachineBasicBlock::iterator II) const {
824   // Get the instruction.
825   MachineInstr &MI = *II;
826   // Get the instruction's basic block.
827   MachineBasicBlock &MBB = *MI.getParent();
828   // Get the basic block's function.
829   MachineFunction &MF = *MBB.getParent();
830   // Get the frame info.
831   MachineFrameInfo &MFI = MF.getFrameInfo();
832   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
833   // Get the instruction info.
834   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
835 
836   unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
837   bool is64Bit = TM.isPPC64();
838   DebugLoc dl = MI.getDebugLoc();
839   BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI),
840           MI.getOperand(0).getReg())
841       .addImm(maxCallFrameSize);
842   MBB.erase(II);
843 }
844 
845 /// lowerCRSpilling - Generate the code for spilling a CR register. Instead of
846 /// reserving a whole register (R0), we scrounge for one here. This generates
847 /// code like this:
848 ///
849 ///   mfcr rA                  ; Move the conditional register into GPR rA.
850 ///   rlwinm rA, rA, SB, 0, 31 ; Shift the bits left so they are in CR0's slot.
851 ///   stw rA, FI               ; Store rA to the frame.
852 ///
853 void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
854                                       unsigned FrameIndex) const {
855   // Get the instruction.
856   MachineInstr &MI = *II;       // ; SPILL_CR <SrcReg>, <offset>
857   // Get the instruction's basic block.
858   MachineBasicBlock &MBB = *MI.getParent();
859   MachineFunction &MF = *MBB.getParent();
860   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
861   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
862   DebugLoc dl = MI.getDebugLoc();
863 
864   bool LP64 = TM.isPPC64();
865   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
866   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
867 
868   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
869   Register SrcReg = MI.getOperand(0).getReg();
870 
871   // We need to store the CR in the low 4-bits of the saved value. First, issue
872   // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg.
873   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
874       .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
875 
876   // If the saved register wasn't CR0, shift the bits left so that they are in
877   // CR0's slot.
878   if (SrcReg != PPC::CR0) {
879     Register Reg1 = Reg;
880     Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
881 
882     // rlwinm rA, rA, ShiftBits, 0, 31.
883     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
884       .addReg(Reg1, RegState::Kill)
885       .addImm(getEncodingValue(SrcReg) * 4)
886       .addImm(0)
887       .addImm(31);
888   }
889 
890   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
891                     .addReg(Reg, RegState::Kill),
892                     FrameIndex);
893 
894   // Discard the pseudo instruction.
895   MBB.erase(II);
896 }
897 
898 void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
899                                       unsigned FrameIndex) const {
900   // Get the instruction.
901   MachineInstr &MI = *II;       // ; <DestReg> = RESTORE_CR <offset>
902   // Get the instruction's basic block.
903   MachineBasicBlock &MBB = *MI.getParent();
904   MachineFunction &MF = *MBB.getParent();
905   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
906   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
907   DebugLoc dl = MI.getDebugLoc();
908 
909   bool LP64 = TM.isPPC64();
910   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
911   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
912 
913   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
914   Register DestReg = MI.getOperand(0).getReg();
915   assert(MI.definesRegister(DestReg) &&
916     "RESTORE_CR does not define its destination");
917 
918   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
919                               Reg), FrameIndex);
920 
921   // If the reloaded register isn't CR0, shift the bits right so that they are
922   // in the right CR's slot.
923   if (DestReg != PPC::CR0) {
924     Register Reg1 = Reg;
925     Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
926 
927     unsigned ShiftBits = getEncodingValue(DestReg)*4;
928     // rlwinm r11, r11, 32-ShiftBits, 0, 31.
929     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
930              .addReg(Reg1, RegState::Kill).addImm(32-ShiftBits).addImm(0)
931              .addImm(31);
932   }
933 
934   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg)
935              .addReg(Reg, RegState::Kill);
936 
937   // Discard the pseudo instruction.
938   MBB.erase(II);
939 }
940 
941 void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II,
942                                          unsigned FrameIndex) const {
943   // Get the instruction.
944   MachineInstr &MI = *II;       // ; SPILL_CRBIT <SrcReg>, <offset>
945   // Get the instruction's basic block.
946   MachineBasicBlock &MBB = *MI.getParent();
947   MachineFunction &MF = *MBB.getParent();
948   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
949   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
950   const TargetRegisterInfo* TRI = Subtarget.getRegisterInfo();
951   DebugLoc dl = MI.getDebugLoc();
952 
953   bool LP64 = TM.isPPC64();
954   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
955   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
956 
957   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
958   Register SrcReg = MI.getOperand(0).getReg();
959 
960   // Search up the BB to find the definition of the CR bit.
961   MachineBasicBlock::reverse_iterator Ins = MI;
962   MachineBasicBlock::reverse_iterator Rend = MBB.rend();
963   ++Ins;
964   unsigned CRBitSpillDistance = 0;
965   bool SeenUse = false;
966   for (; Ins != Rend; ++Ins) {
967     // Definition found.
968     if (Ins->modifiesRegister(SrcReg, TRI))
969       break;
970     // Use found.
971     if (Ins->readsRegister(SrcReg, TRI))
972       SeenUse = true;
973     // Unable to find CR bit definition within maximum search distance.
974     if (CRBitSpillDistance == MaxCRBitSpillDist) {
975       Ins = MI;
976       break;
977     }
978     // Skip debug instructions when counting CR bit spill distance.
979     if (!Ins->isDebugInstr())
980       CRBitSpillDistance++;
981   }
982 
983   // Unable to find the definition of the CR bit in the MBB.
984   if (Ins == MBB.rend())
985     Ins = MI;
986 
987   bool SpillsKnownBit = false;
988   // There is no need to extract the CR bit if its value is already known.
989   switch (Ins->getOpcode()) {
990   case PPC::CRUNSET:
991     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LI8 : PPC::LI), Reg)
992       .addImm(0);
993     SpillsKnownBit = true;
994     break;
995   case PPC::CRSET:
996     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LIS8 : PPC::LIS), Reg)
997       .addImm(-32768);
998     SpillsKnownBit = true;
999     break;
1000   default:
1001     // On Power10, we can use SETNBC to spill all CR bits. SETNBC will set all
1002     // bits (specifically, it produces a -1 if the CR bit is set). Ultimately,
1003     // the bit that is of importance to us is bit 32 (bit 0 of a 32-bit
1004     // register), and SETNBC will set this.
1005     if (Subtarget.isISA3_1()) {
1006       BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETNBC8 : PPC::SETNBC), Reg)
1007           .addReg(SrcReg, RegState::Undef);
1008       break;
1009     }
1010 
1011     // On Power9, we can use SETB to extract the LT bit. This only works for
1012     // the LT bit since SETB produces -1/1/0 for LT/GT/<neither>. So the value
1013     // of the bit we care about (32-bit sign bit) will be set to the value of
1014     // the LT bit (regardless of the other bits in the CR field).
1015     if (Subtarget.isISA3_0()) {
1016       if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR1LT ||
1017           SrcReg == PPC::CR2LT || SrcReg == PPC::CR3LT ||
1018           SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT ||
1019           SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) {
1020         BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETB8 : PPC::SETB), Reg)
1021           .addReg(getCRFromCRBit(SrcReg), RegState::Undef);
1022         break;
1023       }
1024     }
1025 
1026     // We need to move the CR field that contains the CR bit we are spilling.
1027     // The super register may not be explicitly defined (i.e. it can be defined
1028     // by a CR-logical that only defines the subreg) so we state that the CR
1029     // field is undef. Also, in order to preserve the kill flag on the CR bit,
1030     // we add it as an implicit use.
1031     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
1032       .addReg(getCRFromCRBit(SrcReg), RegState::Undef)
1033       .addReg(SrcReg,
1034               RegState::Implicit | getKillRegState(MI.getOperand(0).isKill()));
1035 
1036     // If the saved register wasn't CR0LT, shift the bits left so that the bit
1037     // to store is the first one. Mask all but that bit.
1038     Register Reg1 = Reg;
1039     Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1040 
1041     // rlwinm rA, rA, ShiftBits, 0, 0.
1042     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
1043       .addReg(Reg1, RegState::Kill)
1044       .addImm(getEncodingValue(SrcReg))
1045       .addImm(0).addImm(0);
1046   }
1047   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
1048                     .addReg(Reg, RegState::Kill),
1049                     FrameIndex);
1050 
1051   bool KillsCRBit = MI.killsRegister(SrcReg, TRI);
1052   // Discard the pseudo instruction.
1053   MBB.erase(II);
1054   if (SpillsKnownBit && KillsCRBit && !SeenUse) {
1055     Ins->setDesc(TII.get(PPC::UNENCODED_NOP));
1056     Ins->RemoveOperand(0);
1057   }
1058 }
1059 
1060 void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II,
1061                                       unsigned FrameIndex) const {
1062   // Get the instruction.
1063   MachineInstr &MI = *II;       // ; <DestReg> = RESTORE_CRBIT <offset>
1064   // Get the instruction's basic block.
1065   MachineBasicBlock &MBB = *MI.getParent();
1066   MachineFunction &MF = *MBB.getParent();
1067   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1068   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1069   DebugLoc dl = MI.getDebugLoc();
1070 
1071   bool LP64 = TM.isPPC64();
1072   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
1073   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1074 
1075   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1076   Register DestReg = MI.getOperand(0).getReg();
1077   assert(MI.definesRegister(DestReg) &&
1078     "RESTORE_CRBIT does not define its destination");
1079 
1080   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
1081                               Reg), FrameIndex);
1082 
1083   BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF), DestReg);
1084 
1085   Register RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1086   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO)
1087           .addReg(getCRFromCRBit(DestReg));
1088 
1089   unsigned ShiftBits = getEncodingValue(DestReg);
1090   // rlwimi r11, r10, 32-ShiftBits, ..., ...
1091   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO)
1092       .addReg(RegO, RegState::Kill)
1093       .addReg(Reg, RegState::Kill)
1094       .addImm(ShiftBits ? 32 - ShiftBits : 0)
1095       .addImm(ShiftBits)
1096       .addImm(ShiftBits);
1097 
1098   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF),
1099           getCRFromCRBit(DestReg))
1100       .addReg(RegO, RegState::Kill)
1101       // Make sure we have a use dependency all the way through this
1102       // sequence of instructions. We can't have the other bits in the CR
1103       // modified in between the mfocrf and the mtocrf.
1104       .addReg(getCRFromCRBit(DestReg), RegState::Implicit);
1105 
1106   // Discard the pseudo instruction.
1107   MBB.erase(II);
1108 }
1109 
1110 void PPCRegisterInfo::emitAccCopyInfo(MachineBasicBlock &MBB,
1111                                       MCRegister DestReg, MCRegister SrcReg) {
1112 #ifdef NDEBUG
1113   return;
1114 #else
1115   if (ReportAccMoves) {
1116     std::string Dest = PPC::ACCRCRegClass.contains(DestReg) ? "acc" : "uacc";
1117     std::string Src = PPC::ACCRCRegClass.contains(SrcReg) ? "acc" : "uacc";
1118     dbgs() << "Emitting copy from " << Src << " to " << Dest << ":\n";
1119     MBB.dump();
1120   }
1121 #endif
1122 }
1123 
1124 static void emitAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsPrimed,
1125                                     bool IsRestore) {
1126 #ifdef NDEBUG
1127   return;
1128 #else
1129   if (ReportAccMoves) {
1130     dbgs() << "Emitting " << (IsPrimed ? "acc" : "uacc") << " register "
1131            << (IsRestore ? "restore" : "spill") << ":\n";
1132     MBB.dump();
1133   }
1134 #endif
1135 }
1136 
1137 /// lowerACCSpilling - Generate the code for spilling the accumulator register.
1138 /// Similarly to other spills/reloads that use pseudo-ops, we do not actually
1139 /// eliminate the FrameIndex here nor compute the stack offset. We simply
1140 /// create a real instruction with an FI and rely on eliminateFrameIndex to
1141 /// handle the FI elimination.
1142 void PPCRegisterInfo::lowerACCSpilling(MachineBasicBlock::iterator II,
1143                                        unsigned FrameIndex) const {
1144   MachineInstr &MI = *II; // SPILL_ACC <SrcReg>, <offset>
1145   MachineBasicBlock &MBB = *MI.getParent();
1146   MachineFunction &MF = *MBB.getParent();
1147   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1148   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1149   DebugLoc DL = MI.getDebugLoc();
1150   Register SrcReg = MI.getOperand(0).getReg();
1151   bool IsKilled = MI.getOperand(0).isKill();
1152 
1153   bool IsPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1154   Register Reg =
1155       PPC::VSRp0 + (SrcReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
1156   bool IsLittleEndian = Subtarget.isLittleEndian();
1157 
1158   emitAccSpillRestoreInfo(MBB, IsPrimed, false);
1159 
1160   // De-prime the register being spilled, create two stores for the pair
1161   // subregisters accounting for endianness and then re-prime the register if
1162   // it isn't killed.  This uses the Offset parameter to addFrameReference() to
1163   // adjust the offset of the store that is within the 64-byte stack slot.
1164   if (IsPrimed)
1165     BuildMI(MBB, II, DL, TII.get(PPC::XXMFACC), SrcReg).addReg(SrcReg);
1166   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1167                         .addReg(Reg, getKillRegState(IsKilled)),
1168                     FrameIndex, IsLittleEndian ? 32 : 0);
1169   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1170                         .addReg(Reg + 1, getKillRegState(IsKilled)),
1171                     FrameIndex, IsLittleEndian ? 0 : 32);
1172   if (IsPrimed && !IsKilled)
1173     BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), SrcReg).addReg(SrcReg);
1174 
1175   // Discard the pseudo instruction.
1176   MBB.erase(II);
1177 }
1178 
1179 /// lowerACCRestore - Generate the code to restore the accumulator register.
1180 void PPCRegisterInfo::lowerACCRestore(MachineBasicBlock::iterator II,
1181                                       unsigned FrameIndex) const {
1182   MachineInstr &MI = *II; // <DestReg> = RESTORE_ACC <offset>
1183   MachineBasicBlock &MBB = *MI.getParent();
1184   MachineFunction &MF = *MBB.getParent();
1185   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1186   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1187   DebugLoc DL = MI.getDebugLoc();
1188 
1189   Register DestReg = MI.getOperand(0).getReg();
1190   assert(MI.definesRegister(DestReg) &&
1191          "RESTORE_ACC does not define its destination");
1192 
1193   bool IsPrimed = PPC::ACCRCRegClass.contains(DestReg);
1194   Register Reg =
1195       PPC::VSRp0 + (DestReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
1196   bool IsLittleEndian = Subtarget.isLittleEndian();
1197 
1198   emitAccSpillRestoreInfo(MBB, IsPrimed, true);
1199 
1200   // Create two loads for the pair subregisters accounting for endianness and
1201   // then prime the accumulator register being restored.
1202   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg),
1203                     FrameIndex, IsLittleEndian ? 32 : 0);
1204   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg + 1),
1205                     FrameIndex, IsLittleEndian ? 0 : 32);
1206   if (IsPrimed)
1207     BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), DestReg).addReg(DestReg);
1208 
1209   // Discard the pseudo instruction.
1210   MBB.erase(II);
1211 }
1212 
1213 /// lowerQuadwordSpilling - Generate code to spill paired general register.
1214 void PPCRegisterInfo::lowerQuadwordSpilling(MachineBasicBlock::iterator II,
1215                                             unsigned FrameIndex) const {
1216   MachineInstr &MI = *II;
1217   MachineBasicBlock &MBB = *MI.getParent();
1218   MachineFunction &MF = *MBB.getParent();
1219   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1220   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1221   DebugLoc DL = MI.getDebugLoc();
1222 
1223   Register SrcReg = MI.getOperand(0).getReg();
1224   bool IsKilled = MI.getOperand(0).isKill();
1225 
1226   Register Reg = PPC::X0 + (SrcReg - PPC::G8p0) * 2;
1227   bool IsLittleEndian = Subtarget.isLittleEndian();
1228 
1229   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STD))
1230                         .addReg(Reg, getKillRegState(IsKilled)),
1231                     FrameIndex, IsLittleEndian ? 8 : 0);
1232   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STD))
1233                         .addReg(Reg + 1, getKillRegState(IsKilled)),
1234                     FrameIndex, IsLittleEndian ? 0 : 8);
1235 
1236   // Discard the pseudo instruction.
1237   MBB.erase(II);
1238 }
1239 
1240 /// lowerQuadwordRestore - Generate code to restore paired general register.
1241 void PPCRegisterInfo::lowerQuadwordRestore(MachineBasicBlock::iterator II,
1242                                            unsigned FrameIndex) const {
1243   MachineInstr &MI = *II;
1244   MachineBasicBlock &MBB = *MI.getParent();
1245   MachineFunction &MF = *MBB.getParent();
1246   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1247   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1248   DebugLoc DL = MI.getDebugLoc();
1249 
1250   Register DestReg = MI.getOperand(0).getReg();
1251   assert(MI.definesRegister(DestReg) &&
1252          "RESTORE_QUADWORD does not define its destination");
1253 
1254   Register Reg = PPC::X0 + (DestReg - PPC::G8p0) * 2;
1255   bool IsLittleEndian = Subtarget.isLittleEndian();
1256 
1257   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LD), Reg), FrameIndex,
1258                     IsLittleEndian ? 8 : 0);
1259   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LD), Reg + 1), FrameIndex,
1260                     IsLittleEndian ? 0 : 8);
1261 
1262   // Discard the pseudo instruction.
1263   MBB.erase(II);
1264 }
1265 
1266 bool PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
1267                                            Register Reg, int &FrameIdx) const {
1268   // For the nonvolatile condition registers (CR2, CR3, CR4) return true to
1269   // prevent allocating an additional frame slot.
1270   // For 64-bit ELF and AIX, the CR save area is in the linkage area at SP+8,
1271   // for 32-bit AIX the CR save area is in the linkage area at SP+4.
1272   // We have created a FrameIndex to that spill slot to keep the CalleSaveInfos
1273   // valid.
1274   // For 32-bit ELF, we have previously created the stack slot if needed, so
1275   // return its FrameIdx.
1276   if (PPC::CR2 <= Reg && Reg <= PPC::CR4) {
1277     FrameIdx = MF.getInfo<PPCFunctionInfo>()->getCRSpillFrameIndex();
1278     return true;
1279   }
1280   return false;
1281 }
1282 
1283 // If the offset must be a multiple of some value, return what that value is.
1284 static unsigned offsetMinAlignForOpcode(unsigned OpC) {
1285   switch (OpC) {
1286   default:
1287     return 1;
1288   case PPC::LWA:
1289   case PPC::LWA_32:
1290   case PPC::LD:
1291   case PPC::LDU:
1292   case PPC::STD:
1293   case PPC::STDU:
1294   case PPC::DFLOADf32:
1295   case PPC::DFLOADf64:
1296   case PPC::DFSTOREf32:
1297   case PPC::DFSTOREf64:
1298   case PPC::LXSD:
1299   case PPC::LXSSP:
1300   case PPC::STXSD:
1301   case PPC::STXSSP:
1302   case PPC::STQ:
1303     return 4;
1304   case PPC::EVLDD:
1305   case PPC::EVSTDD:
1306     return 8;
1307   case PPC::LXV:
1308   case PPC::STXV:
1309   case PPC::LQ:
1310   case PPC::LXVP:
1311   case PPC::STXVP:
1312     return 16;
1313   }
1314 }
1315 
1316 // If the offset must be a multiple of some value, return what that value is.
1317 static unsigned offsetMinAlign(const MachineInstr &MI) {
1318   unsigned OpC = MI.getOpcode();
1319   return offsetMinAlignForOpcode(OpC);
1320 }
1321 
1322 // Return the OffsetOperandNo given the FIOperandNum (and the instruction).
1323 static unsigned getOffsetONFromFION(const MachineInstr &MI,
1324                                     unsigned FIOperandNum) {
1325   // Take into account whether it's an add or mem instruction
1326   unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2;
1327   if (MI.isInlineAsm())
1328     OffsetOperandNo = FIOperandNum - 1;
1329   else if (MI.getOpcode() == TargetOpcode::STACKMAP ||
1330            MI.getOpcode() == TargetOpcode::PATCHPOINT)
1331     OffsetOperandNo = FIOperandNum + 1;
1332 
1333   return OffsetOperandNo;
1334 }
1335 
1336 void
1337 PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1338                                      int SPAdj, unsigned FIOperandNum,
1339                                      RegScavenger *RS) const {
1340   assert(SPAdj == 0 && "Unexpected");
1341 
1342   // Get the instruction.
1343   MachineInstr &MI = *II;
1344   // Get the instruction's basic block.
1345   MachineBasicBlock &MBB = *MI.getParent();
1346   // Get the basic block's function.
1347   MachineFunction &MF = *MBB.getParent();
1348   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1349   // Get the instruction info.
1350   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1351   // Get the frame info.
1352   MachineFrameInfo &MFI = MF.getFrameInfo();
1353   DebugLoc dl = MI.getDebugLoc();
1354 
1355   unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
1356 
1357   // Get the frame index.
1358   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
1359 
1360   // Get the frame pointer save index.  Users of this index are primarily
1361   // DYNALLOC instructions.
1362   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1363   int FPSI = FI->getFramePointerSaveIndex();
1364   // Get the instruction opcode.
1365   unsigned OpC = MI.getOpcode();
1366 
1367   if ((OpC == PPC::DYNAREAOFFSET || OpC == PPC::DYNAREAOFFSET8)) {
1368     lowerDynamicAreaOffset(II);
1369     return;
1370   }
1371 
1372   // Special case for dynamic alloca.
1373   if (FPSI && FrameIndex == FPSI &&
1374       (OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) {
1375     lowerDynamicAlloc(II);
1376     return;
1377   }
1378 
1379   if (FPSI && FrameIndex == FPSI &&
1380       (OpC == PPC::PREPARE_PROBED_ALLOCA_64 ||
1381        OpC == PPC::PREPARE_PROBED_ALLOCA_32 ||
1382        OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 ||
1383        OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32)) {
1384     lowerPrepareProbedAlloca(II);
1385     return;
1386   }
1387 
1388   // Special case for pseudo-ops SPILL_CR and RESTORE_CR, etc.
1389   if (OpC == PPC::SPILL_CR) {
1390     lowerCRSpilling(II, FrameIndex);
1391     return;
1392   } else if (OpC == PPC::RESTORE_CR) {
1393     lowerCRRestore(II, FrameIndex);
1394     return;
1395   } else if (OpC == PPC::SPILL_CRBIT) {
1396     lowerCRBitSpilling(II, FrameIndex);
1397     return;
1398   } else if (OpC == PPC::RESTORE_CRBIT) {
1399     lowerCRBitRestore(II, FrameIndex);
1400     return;
1401   } else if (OpC == PPC::SPILL_ACC || OpC == PPC::SPILL_UACC) {
1402     lowerACCSpilling(II, FrameIndex);
1403     return;
1404   } else if (OpC == PPC::RESTORE_ACC || OpC == PPC::RESTORE_UACC) {
1405     lowerACCRestore(II, FrameIndex);
1406     return;
1407   } else if (OpC == PPC::SPILL_QUADWORD) {
1408     lowerQuadwordSpilling(II, FrameIndex);
1409     return;
1410   } else if (OpC == PPC::RESTORE_QUADWORD) {
1411     lowerQuadwordRestore(II, FrameIndex);
1412     return;
1413   }
1414 
1415   // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
1416   MI.getOperand(FIOperandNum).ChangeToRegister(
1417     FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), false);
1418 
1419   // If the instruction is not present in ImmToIdxMap, then it has no immediate
1420   // form (and must be r+r).
1421   bool noImmForm = !MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP &&
1422                    OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(OpC);
1423 
1424   // Now add the frame object offset to the offset from r1.
1425   int Offset = MFI.getObjectOffset(FrameIndex);
1426   Offset += MI.getOperand(OffsetOperandNo).getImm();
1427 
1428   // If we're not using a Frame Pointer that has been set to the value of the
1429   // SP before having the stack size subtracted from it, then add the stack size
1430   // to Offset to get the correct offset.
1431   // Naked functions have stack size 0, although getStackSize may not reflect
1432   // that because we didn't call all the pieces that compute it for naked
1433   // functions.
1434   if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) {
1435     if (!(hasBasePointer(MF) && FrameIndex < 0))
1436       Offset += MFI.getStackSize();
1437   }
1438 
1439   // If we encounter an LXVP/STXVP with an offset that doesn't fit, we can
1440   // transform it to the prefixed version so we don't have to use the XForm.
1441   if ((OpC == PPC::LXVP || OpC == PPC::STXVP) &&
1442       (!isInt<16>(Offset) || (Offset % offsetMinAlign(MI)) != 0) &&
1443       Subtarget.hasPrefixInstrs()) {
1444     unsigned NewOpc = OpC == PPC::LXVP ? PPC::PLXVP : PPC::PSTXVP;
1445     MI.setDesc(TII.get(NewOpc));
1446     OpC = NewOpc;
1447   }
1448 
1449   // If we can, encode the offset directly into the instruction.  If this is a
1450   // normal PPC "ri" instruction, any 16-bit value can be safely encoded.  If
1451   // this is a PPC64 "ix" instruction, only a 16-bit value with the low two bits
1452   // clear can be encoded.  This is extremely uncommon, because normally you
1453   // only "std" to a stack slot that is at least 4-byte aligned, but it can
1454   // happen in invalid code.
1455   assert(OpC != PPC::DBG_VALUE &&
1456          "This should be handled in a target-independent way");
1457   // FIXME: This should be factored out to a separate function as prefixed
1458   // instructions add a number of opcodes for which we can use 34-bit imm.
1459   bool OffsetFitsMnemonic = (OpC == PPC::EVSTDD || OpC == PPC::EVLDD) ?
1460                             isUInt<8>(Offset) :
1461                             isInt<16>(Offset);
1462   if (OpC == PPC::PLXVP || OpC == PPC::PSTXVP)
1463     OffsetFitsMnemonic = isInt<34>(Offset);
1464   if (!noImmForm && ((OffsetFitsMnemonic &&
1465                       ((Offset % offsetMinAlign(MI)) == 0)) ||
1466                      OpC == TargetOpcode::STACKMAP ||
1467                      OpC == TargetOpcode::PATCHPOINT)) {
1468     MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
1469     return;
1470   }
1471 
1472   // The offset doesn't fit into a single register, scavenge one to build the
1473   // offset in.
1474 
1475   bool is64Bit = TM.isPPC64();
1476   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
1477   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1478   const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC;
1479   Register SRegHi = MF.getRegInfo().createVirtualRegister(RC),
1480            SReg = MF.getRegInfo().createVirtualRegister(RC);
1481 
1482   // Insert a set of rA with the full offset value before the ld, st, or add
1483   if (isInt<16>(Offset))
1484     BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), SReg)
1485       .addImm(Offset);
1486   else {
1487     BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SRegHi)
1488       .addImm(Offset >> 16);
1489     BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::ORI8 : PPC::ORI), SReg)
1490       .addReg(SRegHi, RegState::Kill)
1491       .addImm(Offset);
1492   }
1493 
1494   // Convert into indexed form of the instruction:
1495   //
1496   //   sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0
1497   //   addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0
1498   unsigned OperandBase;
1499 
1500   if (noImmForm)
1501     OperandBase = 1;
1502   else if (OpC != TargetOpcode::INLINEASM &&
1503            OpC != TargetOpcode::INLINEASM_BR) {
1504     assert(ImmToIdxMap.count(OpC) &&
1505            "No indexed form of load or store available!");
1506     unsigned NewOpcode = ImmToIdxMap.find(OpC)->second;
1507     MI.setDesc(TII.get(NewOpcode));
1508     OperandBase = 1;
1509   } else {
1510     OperandBase = OffsetOperandNo;
1511   }
1512 
1513   Register StackReg = MI.getOperand(FIOperandNum).getReg();
1514   MI.getOperand(OperandBase).ChangeToRegister(StackReg, false);
1515   MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true);
1516 }
1517 
1518 Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
1519   const PPCFrameLowering *TFI = getFrameLowering(MF);
1520 
1521   if (!TM.isPPC64())
1522     return TFI->hasFP(MF) ? PPC::R31 : PPC::R1;
1523   else
1524     return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
1525 }
1526 
1527 Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
1528   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1529   if (!hasBasePointer(MF))
1530     return getFrameRegister(MF);
1531 
1532   if (TM.isPPC64())
1533     return PPC::X30;
1534 
1535   if (Subtarget.isSVR4ABI() && TM.isPositionIndependent())
1536     return PPC::R29;
1537 
1538   return PPC::R30;
1539 }
1540 
1541 bool PPCRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
1542   if (!EnableBasePointer)
1543     return false;
1544   if (AlwaysBasePointer)
1545     return true;
1546 
1547   // If we need to realign the stack, then the stack pointer can no longer
1548   // serve as an offset into the caller's stack space. As a result, we need a
1549   // base pointer.
1550   return hasStackRealignment(MF);
1551 }
1552 
1553 /// Returns true if the instruction's frame index
1554 /// reference would be better served by a base register other than FP
1555 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
1556 /// references it should create new base registers for.
1557 bool PPCRegisterInfo::
1558 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1559   assert(Offset < 0 && "Local offset must be negative");
1560 
1561   // It's the load/store FI references that cause issues, as it can be difficult
1562   // to materialize the offset if it won't fit in the literal field. Estimate
1563   // based on the size of the local frame and some conservative assumptions
1564   // about the rest of the stack frame (note, this is pre-regalloc, so
1565   // we don't know everything for certain yet) whether this offset is likely
1566   // to be out of range of the immediate. Return true if so.
1567 
1568   // We only generate virtual base registers for loads and stores that have
1569   // an r+i form. Return false for everything else.
1570   unsigned OpC = MI->getOpcode();
1571   if (!ImmToIdxMap.count(OpC))
1572     return false;
1573 
1574   // Don't generate a new virtual base register just to add zero to it.
1575   if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) &&
1576       MI->getOperand(2).getImm() == 0)
1577     return false;
1578 
1579   MachineBasicBlock &MBB = *MI->getParent();
1580   MachineFunction &MF = *MBB.getParent();
1581   const PPCFrameLowering *TFI = getFrameLowering(MF);
1582   unsigned StackEst = TFI->determineFrameLayout(MF, true);
1583 
1584   // If we likely don't need a stack frame, then we probably don't need a
1585   // virtual base register either.
1586   if (!StackEst)
1587     return false;
1588 
1589   // Estimate an offset from the stack pointer.
1590   // The incoming offset is relating to the SP at the start of the function,
1591   // but when we access the local it'll be relative to the SP after local
1592   // allocation, so adjust our SP-relative offset by that allocation size.
1593   Offset += StackEst;
1594 
1595   // The frame pointer will point to the end of the stack, so estimate the
1596   // offset as the difference between the object offset and the FP location.
1597   return !isFrameOffsetLegal(MI, getBaseRegister(MF), Offset);
1598 }
1599 
1600 /// Insert defining instruction(s) for BaseReg to
1601 /// be a pointer to FrameIdx at the beginning of the basic block.
1602 Register PPCRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
1603                                                        int FrameIdx,
1604                                                        int64_t Offset) const {
1605   unsigned ADDriOpc = TM.isPPC64() ? PPC::ADDI8 : PPC::ADDI;
1606 
1607   MachineBasicBlock::iterator Ins = MBB->begin();
1608   DebugLoc DL;                  // Defaults to "unknown"
1609   if (Ins != MBB->end())
1610     DL = Ins->getDebugLoc();
1611 
1612   const MachineFunction &MF = *MBB->getParent();
1613   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1614   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1615   const MCInstrDesc &MCID = TII.get(ADDriOpc);
1616   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1617   const TargetRegisterClass *RC = getPointerRegClass(MF);
1618   Register BaseReg = MRI.createVirtualRegister(RC);
1619   MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
1620 
1621   BuildMI(*MBB, Ins, DL, MCID, BaseReg)
1622     .addFrameIndex(FrameIdx).addImm(Offset);
1623 
1624   return BaseReg;
1625 }
1626 
1627 void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
1628                                         int64_t Offset) const {
1629   unsigned FIOperandNum = 0;
1630   while (!MI.getOperand(FIOperandNum).isFI()) {
1631     ++FIOperandNum;
1632     assert(FIOperandNum < MI.getNumOperands() &&
1633            "Instr doesn't have FrameIndex operand!");
1634   }
1635 
1636   MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
1637   unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
1638   Offset += MI.getOperand(OffsetOperandNo).getImm();
1639   MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
1640 
1641   MachineBasicBlock &MBB = *MI.getParent();
1642   MachineFunction &MF = *MBB.getParent();
1643   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1644   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1645   const MCInstrDesc &MCID = MI.getDesc();
1646   MachineRegisterInfo &MRI = MF.getRegInfo();
1647   MRI.constrainRegClass(BaseReg,
1648                         TII.getRegClass(MCID, FIOperandNum, this, MF));
1649 }
1650 
1651 bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1652                                          Register BaseReg,
1653                                          int64_t Offset) const {
1654   unsigned FIOperandNum = 0;
1655   while (!MI->getOperand(FIOperandNum).isFI()) {
1656     ++FIOperandNum;
1657     assert(FIOperandNum < MI->getNumOperands() &&
1658            "Instr doesn't have FrameIndex operand!");
1659   }
1660 
1661   unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum);
1662   Offset += MI->getOperand(OffsetOperandNo).getImm();
1663 
1664   return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
1665          MI->getOpcode() == TargetOpcode::STACKMAP ||
1666          MI->getOpcode() == TargetOpcode::PATCHPOINT ||
1667          (isInt<16>(Offset) && (Offset % offsetMinAlign(*MI)) == 0);
1668 }
1669