1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "SystemZRegisterInfo.h" 10 #include "SystemZInstrInfo.h" 11 #include "SystemZSubtarget.h" 12 #include "llvm/CodeGen/LiveIntervals.h" 13 #include "llvm/ADT/SmallSet.h" 14 #include "llvm/CodeGen/MachineInstrBuilder.h" 15 #include "llvm/CodeGen/MachineRegisterInfo.h" 16 #include "llvm/CodeGen/TargetFrameLowering.h" 17 #include "llvm/CodeGen/VirtRegMap.h" 18 19 using namespace llvm; 20 21 #define GET_REGINFO_TARGET_DESC 22 #include "SystemZGenRegisterInfo.inc" 23 24 SystemZRegisterInfo::SystemZRegisterInfo() 25 : SystemZGenRegisterInfo(SystemZ::R14D) {} 26 27 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO 28 // somehow belongs in it. Otherwise, return GRX32. 29 static const TargetRegisterClass *getRC32(MachineOperand &MO, 30 const VirtRegMap *VRM, 31 const MachineRegisterInfo *MRI) { 32 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg()); 33 34 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) || 35 MO.getSubReg() == SystemZ::subreg_l32 || 36 MO.getSubReg() == SystemZ::subreg_hl32) 37 return &SystemZ::GR32BitRegClass; 38 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) || 39 MO.getSubReg() == SystemZ::subreg_h32 || 40 MO.getSubReg() == SystemZ::subreg_hh32) 41 return &SystemZ::GRH32BitRegClass; 42 43 if (VRM && VRM->hasPhys(MO.getReg())) { 44 unsigned PhysReg = VRM->getPhys(MO.getReg()); 45 if (SystemZ::GR32BitRegClass.contains(PhysReg)) 46 return &SystemZ::GR32BitRegClass; 47 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) && 48 "Phys reg not in GR32 or GRH32?"); 49 return &SystemZ::GRH32BitRegClass; 50 } 51 52 assert (RC == &SystemZ::GRX32BitRegClass); 53 return RC; 54 } 55 56 bool 57 SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg, 58 ArrayRef<MCPhysReg> Order, 59 SmallVectorImpl<MCPhysReg> &Hints, 60 const MachineFunction &MF, 61 const VirtRegMap *VRM, 62 const LiveRegMatrix *Matrix) const { 63 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 64 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 65 66 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( 67 VirtReg, Order, Hints, MF, VRM, Matrix); 68 69 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) { 70 SmallVector<unsigned, 8> Worklist; 71 SmallSet<unsigned, 4> DoneRegs; 72 Worklist.push_back(VirtReg); 73 while (Worklist.size()) { 74 unsigned Reg = Worklist.pop_back_val(); 75 if (!DoneRegs.insert(Reg).second) 76 continue; 77 78 for (auto &Use : MRI->use_instructions(Reg)) 79 // For LOCRMux, see if the other operand is already a high or low 80 // register, and in that case give the correpsonding hints for 81 // VirtReg. LOCR instructions need both operands in either high or 82 // low parts. 83 if (Use.getOpcode() == SystemZ::LOCRMux) { 84 MachineOperand &TrueMO = Use.getOperand(1); 85 MachineOperand &FalseMO = Use.getOperand(2); 86 const TargetRegisterClass *RC = 87 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI), 88 getRC32(TrueMO, VRM, MRI)); 89 if (RC && RC != &SystemZ::GRX32BitRegClass) { 90 // Pass the registers of RC as hints while making sure that if 91 // any of these registers are copy hints, hint them first. 92 SmallSet<unsigned, 4> CopyHints; 93 CopyHints.insert(Hints.begin(), Hints.end()); 94 Hints.clear(); 95 for (MCPhysReg Reg : Order) 96 if (CopyHints.count(Reg) && 97 RC->contains(Reg) && !MRI->isReserved(Reg)) 98 Hints.push_back(Reg); 99 for (MCPhysReg Reg : Order) 100 if (!CopyHints.count(Reg) && 101 RC->contains(Reg) && !MRI->isReserved(Reg)) 102 Hints.push_back(Reg); 103 // Return true to make these hints the only regs available to 104 // RA. This may mean extra spilling but since the alternative is 105 // a jump sequence expansion of the LOCRMux, it is preferred. 106 return true; 107 } 108 109 // Add the other operand of the LOCRMux to the worklist. 110 unsigned OtherReg = 111 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg()); 112 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass) 113 Worklist.push_back(OtherReg); 114 } 115 } 116 } 117 118 return BaseImplRetVal; 119 } 120 121 const MCPhysReg * 122 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 123 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>(); 124 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) 125 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList 126 : CSR_SystemZ_AllRegs_SaveList; 127 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() && 128 MF->getFunction().getAttributes().hasAttrSomewhere( 129 Attribute::SwiftError)) 130 return CSR_SystemZ_SwiftError_SaveList; 131 return CSR_SystemZ_SaveList; 132 } 133 134 const uint32_t * 135 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 136 CallingConv::ID CC) const { 137 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 138 if (CC == CallingConv::AnyReg) 139 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask 140 : CSR_SystemZ_AllRegs_RegMask; 141 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() && 142 MF.getFunction().getAttributes().hasAttrSomewhere( 143 Attribute::SwiftError)) 144 return CSR_SystemZ_SwiftError_RegMask; 145 return CSR_SystemZ_RegMask; 146 } 147 148 BitVector 149 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 150 BitVector Reserved(getNumRegs()); 151 const SystemZFrameLowering *TFI = getFrameLowering(MF); 152 153 if (TFI->hasFP(MF)) { 154 // R11D is the frame pointer. Reserve all aliases. 155 Reserved.set(SystemZ::R11D); 156 Reserved.set(SystemZ::R11L); 157 Reserved.set(SystemZ::R11H); 158 Reserved.set(SystemZ::R10Q); 159 } 160 161 // R15D is the stack pointer. Reserve all aliases. 162 Reserved.set(SystemZ::R15D); 163 Reserved.set(SystemZ::R15L); 164 Reserved.set(SystemZ::R15H); 165 Reserved.set(SystemZ::R14Q); 166 167 // A0 and A1 hold the thread pointer. 168 Reserved.set(SystemZ::A0); 169 Reserved.set(SystemZ::A1); 170 171 return Reserved; 172 } 173 174 void 175 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, 176 int SPAdj, unsigned FIOperandNum, 177 RegScavenger *RS) const { 178 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame"); 179 180 MachineBasicBlock &MBB = *MI->getParent(); 181 MachineFunction &MF = *MBB.getParent(); 182 auto *TII = 183 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 184 const SystemZFrameLowering *TFI = getFrameLowering(MF); 185 DebugLoc DL = MI->getDebugLoc(); 186 187 // Decompose the frame index into a base and offset. 188 int FrameIndex = MI->getOperand(FIOperandNum).getIndex(); 189 unsigned BasePtr; 190 int64_t Offset = (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr) + 191 MI->getOperand(FIOperandNum + 1).getImm()); 192 193 // Special handling of dbg_value instructions. 194 if (MI->isDebugValue()) { 195 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false); 196 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 197 return; 198 } 199 200 // See if the offset is in range, or if an equivalent instruction that 201 // accepts the offset exists. 202 unsigned Opcode = MI->getOpcode(); 203 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 204 if (OpcodeForOffset) { 205 if (OpcodeForOffset == SystemZ::LE && 206 MF.getSubtarget<SystemZSubtarget>().hasVector()) { 207 // If LE is ok for offset, use LDE instead on z13. 208 OpcodeForOffset = SystemZ::LDE32; 209 } 210 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 211 } 212 else { 213 // Create an anchor point that is in range. Start at 0xffff so that 214 // can use LLILH to load the immediate. 215 int64_t OldOffset = Offset; 216 int64_t Mask = 0xffff; 217 do { 218 Offset = OldOffset & Mask; 219 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 220 Mask >>= 1; 221 assert(Mask && "One offset must be OK"); 222 } while (!OpcodeForOffset); 223 224 unsigned ScratchReg = 225 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass); 226 int64_t HighOffset = OldOffset - Offset; 227 228 if (MI->getDesc().TSFlags & SystemZII::HasIndex 229 && MI->getOperand(FIOperandNum + 2).getReg() == 0) { 230 // Load the offset into the scratch register and use it as an index. 231 // The scratch register then dies here. 232 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 233 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 234 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg, 235 false, false, true); 236 } else { 237 // Load the anchor address into a scratch register. 238 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset); 239 if (LAOpcode) 240 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg) 241 .addReg(BasePtr).addImm(HighOffset).addReg(0); 242 else { 243 // Load the high offset into the scratch register and use it as 244 // an index. 245 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 246 BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg) 247 .addReg(ScratchReg, RegState::Kill).addReg(BasePtr); 248 } 249 250 // Use the scratch register as the base. It then dies here. 251 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg, 252 false, false, true); 253 } 254 } 255 MI->setDesc(TII->get(OpcodeForOffset)); 256 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 257 } 258 259 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI, 260 const TargetRegisterClass *SrcRC, 261 unsigned SubReg, 262 const TargetRegisterClass *DstRC, 263 unsigned DstSubReg, 264 const TargetRegisterClass *NewRC, 265 LiveIntervals &LIS) const { 266 assert (MI->isCopy() && "Only expecting COPY instructions"); 267 268 // Coalesce anything which is not a COPY involving a subreg to/from GR128. 269 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) && 270 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64))) 271 return true; 272 273 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small 274 // and local to one MBB with not too much interferring registers. Otherwise 275 // regalloc may run out of registers. 276 277 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0); 278 unsigned GR128Reg = MI->getOperand(WideOpNo).getReg(); 279 unsigned GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg(); 280 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg); 281 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg); 282 283 // Check that the two virtual registers are local to MBB. 284 MachineBasicBlock *MBB = MI->getParent(); 285 MachineInstr *FirstMI_GR128 = 286 LIS.getInstructionFromIndex(IntGR128.beginIndex()); 287 MachineInstr *FirstMI_GRNar = 288 LIS.getInstructionFromIndex(IntGRNar.beginIndex()); 289 MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex()); 290 MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex()); 291 if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) || 292 (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) || 293 (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) || 294 (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB)) 295 return false; 296 297 MachineBasicBlock::iterator MII = nullptr, MEE = nullptr; 298 if (WideOpNo == 1) { 299 MII = FirstMI_GR128; 300 MEE = LastMI_GRNar; 301 } else { 302 MII = FirstMI_GRNar; 303 MEE = LastMI_GR128; 304 } 305 306 // Check if coalescing seems safe by finding the set of clobbered physreg 307 // pairs in the region. 308 BitVector PhysClobbered(getNumRegs()); 309 MEE++; 310 for (; MII != MEE; ++MII) { 311 for (const MachineOperand &MO : MII->operands()) 312 if (MO.isReg() && isPhysicalRegister(MO.getReg())) { 313 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/); 314 SI.isValid(); ++SI) 315 if (NewRC->contains(*SI)) { 316 PhysClobbered.set(*SI); 317 break; 318 } 319 } 320 } 321 322 // Demand an arbitrary margin of free regs. 323 unsigned const DemandedFreeGR128 = 3; 324 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128)) 325 return false; 326 327 return true; 328 } 329 330 unsigned 331 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 332 const SystemZFrameLowering *TFI = getFrameLowering(MF); 333 return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D; 334 } 335 336 const TargetRegisterClass * 337 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 338 if (RC == &SystemZ::CCRRegClass) 339 return &SystemZ::GR32BitRegClass; 340 return RC; 341 } 342 343