1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "SystemZRegisterInfo.h"
10 #include "SystemZInstrInfo.h"
11 #include "SystemZSubtarget.h"
12 #include "llvm/CodeGen/LiveIntervals.h"
13 #include "llvm/ADT/SmallSet.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/VirtRegMap.h"
18 
19 using namespace llvm;
20 
21 #define GET_REGINFO_TARGET_DESC
22 #include "SystemZGenRegisterInfo.inc"
23 
24 SystemZRegisterInfo::SystemZRegisterInfo()
25     : SystemZGenRegisterInfo(SystemZ::R14D) {}
26 
27 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
28 // somehow belongs in it. Otherwise, return GRX32.
29 static const TargetRegisterClass *getRC32(MachineOperand &MO,
30                                           const VirtRegMap *VRM,
31                                           const MachineRegisterInfo *MRI) {
32   const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
33 
34   if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
35       MO.getSubReg() == SystemZ::subreg_l32 ||
36       MO.getSubReg() == SystemZ::subreg_hl32)
37     return &SystemZ::GR32BitRegClass;
38   if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
39       MO.getSubReg() == SystemZ::subreg_h32 ||
40       MO.getSubReg() == SystemZ::subreg_hh32)
41     return &SystemZ::GRH32BitRegClass;
42 
43   if (VRM && VRM->hasPhys(MO.getReg())) {
44     Register PhysReg = VRM->getPhys(MO.getReg());
45     if (SystemZ::GR32BitRegClass.contains(PhysReg))
46       return &SystemZ::GR32BitRegClass;
47     assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
48             "Phys reg not in GR32 or GRH32?");
49     return &SystemZ::GRH32BitRegClass;
50   }
51 
52   assert (RC == &SystemZ::GRX32BitRegClass);
53   return RC;
54 }
55 
56 // Pass the registers of RC as hints while making sure that if any of these
57 // registers are copy hints (and therefore already in Hints), hint them
58 // first.
59 static void addHints(ArrayRef<MCPhysReg> Order,
60                      SmallVectorImpl<MCPhysReg> &Hints,
61                      const TargetRegisterClass *RC,
62                      const MachineRegisterInfo *MRI) {
63   SmallSet<unsigned, 4> CopyHints;
64   CopyHints.insert(Hints.begin(), Hints.end());
65   Hints.clear();
66   for (MCPhysReg Reg : Order)
67     if (CopyHints.count(Reg) &&
68         RC->contains(Reg) && !MRI->isReserved(Reg))
69       Hints.push_back(Reg);
70   for (MCPhysReg Reg : Order)
71     if (!CopyHints.count(Reg) &&
72         RC->contains(Reg) && !MRI->isReserved(Reg))
73       Hints.push_back(Reg);
74 }
75 
76 bool SystemZRegisterInfo::getRegAllocationHints(
77     Register VirtReg, ArrayRef<MCPhysReg> Order,
78     SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
79     const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
80   const MachineRegisterInfo *MRI = &MF.getRegInfo();
81   const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
82   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
83 
84   bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
85       VirtReg, Order, Hints, MF, VRM, Matrix);
86 
87   if (VRM != nullptr) {
88     // Add any two address hints after any copy hints.
89     SmallSet<unsigned, 4> TwoAddrHints;
90     for (auto &Use : MRI->reg_nodbg_instructions(VirtReg))
91       if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) {
92         const MachineOperand *VRRegMO = nullptr;
93         const MachineOperand *OtherMO = nullptr;
94         const MachineOperand *CommuMO = nullptr;
95         if (VirtReg == Use.getOperand(0).getReg()) {
96           VRRegMO = &Use.getOperand(0);
97           OtherMO = &Use.getOperand(1);
98           if (Use.isCommutable())
99             CommuMO = &Use.getOperand(2);
100         } else if (VirtReg == Use.getOperand(1).getReg()) {
101           VRRegMO = &Use.getOperand(1);
102           OtherMO = &Use.getOperand(0);
103         } else if (VirtReg == Use.getOperand(2).getReg() &&
104                    Use.isCommutable()) {
105           VRRegMO = &Use.getOperand(2);
106           OtherMO = &Use.getOperand(0);
107         } else
108           continue;
109 
110         auto tryAddHint = [&](const MachineOperand *MO) -> void {
111           Register Reg = MO->getReg();
112           Register PhysReg = Register::isPhysicalRegister(Reg)
113                                  ? Reg
114                                  : Register(VRM->getPhys(Reg));
115           if (PhysReg) {
116             if (MO->getSubReg())
117               PhysReg = getSubReg(PhysReg, MO->getSubReg());
118             if (VRRegMO->getSubReg())
119               PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(),
120                                             MRI->getRegClass(VirtReg));
121             if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
122               TwoAddrHints.insert(PhysReg);
123           }
124         };
125         tryAddHint(OtherMO);
126         if (CommuMO)
127           tryAddHint(CommuMO);
128       }
129     for (MCPhysReg OrderReg : Order)
130       if (TwoAddrHints.count(OrderReg))
131         Hints.push_back(OrderReg);
132   }
133 
134   if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
135     SmallVector<Register, 8> Worklist;
136     SmallSet<Register, 4> DoneRegs;
137     Worklist.push_back(VirtReg);
138     while (Worklist.size()) {
139       Register Reg = Worklist.pop_back_val();
140       if (!DoneRegs.insert(Reg).second)
141         continue;
142 
143       for (auto &Use : MRI->reg_instructions(Reg)) {
144         // For LOCRMux, see if the other operand is already a high or low
145         // register, and in that case give the corresponding hints for
146         // VirtReg. LOCR instructions need both operands in either high or
147         // low parts. Same handling for SELRMux.
148         if (Use.getOpcode() == SystemZ::LOCRMux ||
149             Use.getOpcode() == SystemZ::SELRMux) {
150           MachineOperand &TrueMO = Use.getOperand(1);
151           MachineOperand &FalseMO = Use.getOperand(2);
152           const TargetRegisterClass *RC =
153             TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
154                                    getRC32(TrueMO, VRM, MRI));
155           if (Use.getOpcode() == SystemZ::SELRMux)
156             RC = TRI->getCommonSubClass(RC,
157                                         getRC32(Use.getOperand(0), VRM, MRI));
158           if (RC && RC != &SystemZ::GRX32BitRegClass) {
159             addHints(Order, Hints, RC, MRI);
160             // Return true to make these hints the only regs available to
161             // RA. This may mean extra spilling but since the alternative is
162             // a jump sequence expansion of the LOCRMux, it is preferred.
163             return true;
164           }
165 
166           // Add the other operand of the LOCRMux to the worklist.
167           Register OtherReg =
168               (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
169           if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
170             Worklist.push_back(OtherReg);
171         } // end LOCRMux
172         else if (Use.getOpcode() == SystemZ::CHIMux ||
173                  Use.getOpcode() == SystemZ::CFIMux) {
174           if (Use.getOperand(1).getImm() == 0) {
175             bool OnlyLMuxes = true;
176             for (MachineInstr &DefMI : MRI->def_instructions(VirtReg))
177               if (DefMI.getOpcode() != SystemZ::LMux)
178                 OnlyLMuxes = false;
179             if (OnlyLMuxes) {
180               addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI);
181               // Return false to make these hints preferred but not obligatory.
182               return false;
183             }
184           }
185         } // end CHIMux / CFIMux
186       }
187     }
188   }
189 
190   return BaseImplRetVal;
191 }
192 
193 const MCPhysReg *
194 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
195   const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
196   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
197     return CSR_SystemZ_NoRegs_SaveList;
198   if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
199     return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList
200                                 : CSR_SystemZ_AllRegs_SaveList;
201   if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
202       MF->getFunction().getAttributes().hasAttrSomewhere(
203           Attribute::SwiftError))
204     return CSR_SystemZ_SwiftError_SaveList;
205   return CSR_SystemZ_SaveList;
206 }
207 
208 const uint32_t *
209 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
210                                           CallingConv::ID CC) const {
211   const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
212   if (CC == CallingConv::GHC)
213     return CSR_SystemZ_NoRegs_RegMask;
214   if (CC == CallingConv::AnyReg)
215     return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask
216                                 : CSR_SystemZ_AllRegs_RegMask;
217   if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
218       MF.getFunction().getAttributes().hasAttrSomewhere(
219           Attribute::SwiftError))
220     return CSR_SystemZ_SwiftError_RegMask;
221   return CSR_SystemZ_RegMask;
222 }
223 
224 BitVector
225 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
226   BitVector Reserved(getNumRegs());
227   const SystemZFrameLowering *TFI = getFrameLowering(MF);
228 
229   if (TFI->hasFP(MF)) {
230     // R11D is the frame pointer.  Reserve all aliases.
231     Reserved.set(SystemZ::R11D);
232     Reserved.set(SystemZ::R11L);
233     Reserved.set(SystemZ::R11H);
234     Reserved.set(SystemZ::R10Q);
235   }
236 
237   // R15D is the stack pointer.  Reserve all aliases.
238   Reserved.set(SystemZ::R15D);
239   Reserved.set(SystemZ::R15L);
240   Reserved.set(SystemZ::R15H);
241   Reserved.set(SystemZ::R14Q);
242 
243   // A0 and A1 hold the thread pointer.
244   Reserved.set(SystemZ::A0);
245   Reserved.set(SystemZ::A1);
246 
247   // FPC is the floating-point control register.
248   Reserved.set(SystemZ::FPC);
249 
250   return Reserved;
251 }
252 
253 void
254 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
255                                          int SPAdj, unsigned FIOperandNum,
256                                          RegScavenger *RS) const {
257   assert(SPAdj == 0 && "Outgoing arguments should be part of the frame");
258 
259   MachineBasicBlock &MBB = *MI->getParent();
260   MachineFunction &MF = *MBB.getParent();
261   auto *TII =
262       static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
263   const SystemZFrameLowering *TFI = getFrameLowering(MF);
264   DebugLoc DL = MI->getDebugLoc();
265 
266   // Decompose the frame index into a base and offset.
267   int FrameIndex = MI->getOperand(FIOperandNum).getIndex();
268   Register BasePtr;
269   int64_t Offset = (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr) +
270                     MI->getOperand(FIOperandNum + 1).getImm());
271 
272   // Special handling of dbg_value instructions.
273   if (MI->isDebugValue()) {
274     MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false);
275     MI->getDebugOffset().ChangeToImmediate(Offset);
276     return;
277   }
278 
279   // See if the offset is in range, or if an equivalent instruction that
280   // accepts the offset exists.
281   unsigned Opcode = MI->getOpcode();
282   unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
283   if (OpcodeForOffset) {
284     if (OpcodeForOffset == SystemZ::LE &&
285         MF.getSubtarget<SystemZSubtarget>().hasVector()) {
286       // If LE is ok for offset, use LDE instead on z13.
287       OpcodeForOffset = SystemZ::LDE32;
288     }
289     MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
290   }
291   else {
292     // Create an anchor point that is in range.  Start at 0xffff so that
293     // can use LLILH to load the immediate.
294     int64_t OldOffset = Offset;
295     int64_t Mask = 0xffff;
296     do {
297       Offset = OldOffset & Mask;
298       OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
299       Mask >>= 1;
300       assert(Mask && "One offset must be OK");
301     } while (!OpcodeForOffset);
302 
303     Register ScratchReg =
304         MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);
305     int64_t HighOffset = OldOffset - Offset;
306 
307     if (MI->getDesc().TSFlags & SystemZII::HasIndex
308         && MI->getOperand(FIOperandNum + 2).getReg() == 0) {
309       // Load the offset into the scratch register and use it as an index.
310       // The scratch register then dies here.
311       TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
312       MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
313       MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
314                                                         false, false, true);
315     } else {
316       // Load the anchor address into a scratch register.
317       unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
318       if (LAOpcode)
319         BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
320           .addReg(BasePtr).addImm(HighOffset).addReg(0);
321       else {
322         // Load the high offset into the scratch register and use it as
323         // an index.
324         TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
325         BuildMI(MBB, MI, DL, TII->get(SystemZ::LA), ScratchReg)
326           .addReg(BasePtr, RegState::Kill).addImm(0).addReg(ScratchReg);
327       }
328 
329       // Use the scratch register as the base.  It then dies here.
330       MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg,
331                                                     false, false, true);
332     }
333   }
334   MI->setDesc(TII->get(OpcodeForOffset));
335   MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
336 }
337 
338 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
339                                   const TargetRegisterClass *SrcRC,
340                                   unsigned SubReg,
341                                   const TargetRegisterClass *DstRC,
342                                   unsigned DstSubReg,
343                                   const TargetRegisterClass *NewRC,
344                                   LiveIntervals &LIS) const {
345   assert (MI->isCopy() && "Only expecting COPY instructions");
346 
347   // Coalesce anything which is not a COPY involving a subreg to/from GR128.
348   if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) &&
349         (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64)))
350     return true;
351 
352   // Allow coalescing of a GR128 subreg COPY only if the live ranges are small
353   // and local to one MBB with not too much interferring registers. Otherwise
354   // regalloc may run out of registers.
355 
356   unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0);
357   Register GR128Reg = MI->getOperand(WideOpNo).getReg();
358   Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg();
359   LiveInterval &IntGR128 = LIS.getInterval(GR128Reg);
360   LiveInterval &IntGRNar = LIS.getInterval(GRNarReg);
361 
362   // Check that the two virtual registers are local to MBB.
363   MachineBasicBlock *MBB = MI->getParent();
364   MachineInstr *FirstMI_GR128 =
365     LIS.getInstructionFromIndex(IntGR128.beginIndex());
366   MachineInstr *FirstMI_GRNar =
367     LIS.getInstructionFromIndex(IntGRNar.beginIndex());
368   MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex());
369   MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex());
370   if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) ||
371       (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) ||
372       (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) ||
373       (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB))
374     return false;
375 
376   MachineBasicBlock::iterator MII = nullptr, MEE = nullptr;
377   if (WideOpNo == 1) {
378     MII = FirstMI_GR128;
379     MEE = LastMI_GRNar;
380   } else {
381     MII = FirstMI_GRNar;
382     MEE = LastMI_GR128;
383   }
384 
385   // Check if coalescing seems safe by finding the set of clobbered physreg
386   // pairs in the region.
387   BitVector PhysClobbered(getNumRegs());
388   MEE++;
389   for (; MII != MEE; ++MII) {
390     for (const MachineOperand &MO : MII->operands())
391       if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) {
392         for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/);
393              SI.isValid(); ++SI)
394           if (NewRC->contains(*SI)) {
395             PhysClobbered.set(*SI);
396             break;
397           }
398       }
399   }
400 
401   // Demand an arbitrary margin of free regs.
402   unsigned const DemandedFreeGR128 = 3;
403   if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128))
404     return false;
405 
406   return true;
407 }
408 
409 Register
410 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
411   const SystemZFrameLowering *TFI = getFrameLowering(MF);
412   return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D;
413 }
414 
415 const TargetRegisterClass *
416 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
417   if (RC == &SystemZ::CCRRegClass)
418     return &SystemZ::GR32BitRegClass;
419   return RC;
420 }
421 
422