1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "SystemZRegisterInfo.h" 11 #include "SystemZInstrInfo.h" 12 #include "SystemZSubtarget.h" 13 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 14 #include "llvm/ADT/SmallSet.h" 15 #include "llvm/CodeGen/MachineInstrBuilder.h" 16 #include "llvm/CodeGen/MachineRegisterInfo.h" 17 #include "llvm/CodeGen/TargetFrameLowering.h" 18 #include "llvm/CodeGen/VirtRegMap.h" 19 20 using namespace llvm; 21 22 #define GET_REGINFO_TARGET_DESC 23 #include "SystemZGenRegisterInfo.inc" 24 25 SystemZRegisterInfo::SystemZRegisterInfo() 26 : SystemZGenRegisterInfo(SystemZ::R14D) {} 27 28 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO 29 // somehow belongs in it. Otherwise, return GRX32. 30 static const TargetRegisterClass *getRC32(MachineOperand &MO, 31 const VirtRegMap *VRM, 32 const MachineRegisterInfo *MRI) { 33 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg()); 34 35 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) || 36 MO.getSubReg() == SystemZ::subreg_l32) 37 return &SystemZ::GR32BitRegClass; 38 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) || 39 MO.getSubReg() == SystemZ::subreg_h32) 40 return &SystemZ::GRH32BitRegClass; 41 42 if (VRM && VRM->hasPhys(MO.getReg())) { 43 unsigned PhysReg = VRM->getPhys(MO.getReg()); 44 if (SystemZ::GR32BitRegClass.contains(PhysReg)) 45 return &SystemZ::GR32BitRegClass; 46 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) && 47 "Phys reg not in GR32 or GRH32?"); 48 return &SystemZ::GRH32BitRegClass; 49 } 50 51 assert (RC == &SystemZ::GRX32BitRegClass); 52 return RC; 53 } 54 55 bool 56 SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg, 57 ArrayRef<MCPhysReg> Order, 58 SmallVectorImpl<MCPhysReg> &Hints, 59 const MachineFunction &MF, 60 const VirtRegMap *VRM, 61 const LiveRegMatrix *Matrix) const { 62 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 63 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 64 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) { 65 SmallVector<unsigned, 8> Worklist; 66 SmallSet<unsigned, 4> DoneRegs; 67 Worklist.push_back(VirtReg); 68 while (Worklist.size()) { 69 unsigned Reg = Worklist.pop_back_val(); 70 if (!DoneRegs.insert(Reg).second) 71 continue; 72 73 for (auto &Use : MRI->use_instructions(Reg)) 74 // For LOCRMux, see if the other operand is already a high or low 75 // register, and in that case give the correpsonding hints for 76 // VirtReg. LOCR instructions need both operands in either high or 77 // low parts. 78 if (Use.getOpcode() == SystemZ::LOCRMux) { 79 MachineOperand &TrueMO = Use.getOperand(1); 80 MachineOperand &FalseMO = Use.getOperand(2); 81 const TargetRegisterClass *RC = 82 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI), 83 getRC32(TrueMO, VRM, MRI)); 84 if (RC && RC != &SystemZ::GRX32BitRegClass) { 85 for (MCPhysReg Reg : Order) 86 if (RC->contains(Reg) && !MRI->isReserved(Reg)) 87 Hints.push_back(Reg); 88 // Return true to make these hints the only regs available to 89 // RA. This may mean extra spilling but since the alternative is 90 // a jump sequence expansion of the LOCRMux, it is preferred. 91 return true; 92 } 93 94 // Add the other operand of the LOCRMux to the worklist. 95 unsigned OtherReg = 96 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg()); 97 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass) 98 Worklist.push_back(OtherReg); 99 } 100 } 101 } 102 103 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, 104 VRM, Matrix); 105 } 106 107 const MCPhysReg * 108 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 109 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() && 110 MF->getFunction()->getAttributes().hasAttrSomewhere( 111 Attribute::SwiftError)) 112 return CSR_SystemZ_SwiftError_SaveList; 113 return CSR_SystemZ_SaveList; 114 } 115 116 const uint32_t * 117 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 118 CallingConv::ID CC) const { 119 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() && 120 MF.getFunction()->getAttributes().hasAttrSomewhere( 121 Attribute::SwiftError)) 122 return CSR_SystemZ_SwiftError_RegMask; 123 return CSR_SystemZ_RegMask; 124 } 125 126 BitVector 127 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 128 BitVector Reserved(getNumRegs()); 129 const SystemZFrameLowering *TFI = getFrameLowering(MF); 130 131 if (TFI->hasFP(MF)) { 132 // R11D is the frame pointer. Reserve all aliases. 133 Reserved.set(SystemZ::R11D); 134 Reserved.set(SystemZ::R11L); 135 Reserved.set(SystemZ::R11H); 136 Reserved.set(SystemZ::R10Q); 137 } 138 139 // R15D is the stack pointer. Reserve all aliases. 140 Reserved.set(SystemZ::R15D); 141 Reserved.set(SystemZ::R15L); 142 Reserved.set(SystemZ::R15H); 143 Reserved.set(SystemZ::R14Q); 144 145 // A0 and A1 hold the thread pointer. 146 Reserved.set(SystemZ::A0); 147 Reserved.set(SystemZ::A1); 148 149 return Reserved; 150 } 151 152 void 153 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, 154 int SPAdj, unsigned FIOperandNum, 155 RegScavenger *RS) const { 156 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame"); 157 158 MachineBasicBlock &MBB = *MI->getParent(); 159 MachineFunction &MF = *MBB.getParent(); 160 auto *TII = 161 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 162 const SystemZFrameLowering *TFI = getFrameLowering(MF); 163 DebugLoc DL = MI->getDebugLoc(); 164 165 // Decompose the frame index into a base and offset. 166 int FrameIndex = MI->getOperand(FIOperandNum).getIndex(); 167 unsigned BasePtr; 168 int64_t Offset = (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr) + 169 MI->getOperand(FIOperandNum + 1).getImm()); 170 171 // Special handling of dbg_value instructions. 172 if (MI->isDebugValue()) { 173 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false); 174 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 175 return; 176 } 177 178 // See if the offset is in range, or if an equivalent instruction that 179 // accepts the offset exists. 180 unsigned Opcode = MI->getOpcode(); 181 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 182 if (OpcodeForOffset) { 183 if (OpcodeForOffset == SystemZ::LE && 184 MF.getSubtarget<SystemZSubtarget>().hasVector()) { 185 // If LE is ok for offset, use LDE instead on z13. 186 OpcodeForOffset = SystemZ::LDE32; 187 } 188 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 189 } 190 else { 191 // Create an anchor point that is in range. Start at 0xffff so that 192 // can use LLILH to load the immediate. 193 int64_t OldOffset = Offset; 194 int64_t Mask = 0xffff; 195 do { 196 Offset = OldOffset & Mask; 197 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 198 Mask >>= 1; 199 assert(Mask && "One offset must be OK"); 200 } while (!OpcodeForOffset); 201 202 unsigned ScratchReg = 203 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass); 204 int64_t HighOffset = OldOffset - Offset; 205 206 if (MI->getDesc().TSFlags & SystemZII::HasIndex 207 && MI->getOperand(FIOperandNum + 2).getReg() == 0) { 208 // Load the offset into the scratch register and use it as an index. 209 // The scratch register then dies here. 210 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 211 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 212 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg, 213 false, false, true); 214 } else { 215 // Load the anchor address into a scratch register. 216 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset); 217 if (LAOpcode) 218 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg) 219 .addReg(BasePtr).addImm(HighOffset).addReg(0); 220 else { 221 // Load the high offset into the scratch register and use it as 222 // an index. 223 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 224 BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg) 225 .addReg(ScratchReg, RegState::Kill).addReg(BasePtr); 226 } 227 228 // Use the scratch register as the base. It then dies here. 229 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg, 230 false, false, true); 231 } 232 } 233 MI->setDesc(TII->get(OpcodeForOffset)); 234 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 235 } 236 237 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI, 238 const TargetRegisterClass *SrcRC, 239 unsigned SubReg, 240 const TargetRegisterClass *DstRC, 241 unsigned DstSubReg, 242 const TargetRegisterClass *NewRC, 243 LiveIntervals &LIS) const { 244 assert (MI->isCopy() && "Only expecting COPY instructions"); 245 246 // Coalesce anything which is not a COPY involving a subreg to/from GR128. 247 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) && 248 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64))) 249 return true; 250 251 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small 252 // and local to one MBB with not too much interferring registers. Otherwise 253 // regalloc may run out of registers. 254 255 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0); 256 unsigned GR128Reg = MI->getOperand(WideOpNo).getReg(); 257 unsigned GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg(); 258 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg); 259 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg); 260 261 // Check that the two virtual registers are local to MBB. 262 MachineBasicBlock *MBB = MI->getParent(); 263 if (LIS.isLiveInToMBB(IntGR128, MBB) || LIS.isLiveOutOfMBB(IntGR128, MBB) || 264 LIS.isLiveInToMBB(IntGRNar, MBB) || LIS.isLiveOutOfMBB(IntGRNar, MBB)) 265 return false; 266 267 // Find the first and last MIs of the registers. 268 MachineInstr *FirstMI = nullptr, *LastMI = nullptr; 269 if (WideOpNo == 1) { 270 FirstMI = LIS.getInstructionFromIndex(IntGR128.beginIndex()); 271 LastMI = LIS.getInstructionFromIndex(IntGRNar.endIndex()); 272 } else { 273 FirstMI = LIS.getInstructionFromIndex(IntGRNar.beginIndex()); 274 LastMI = LIS.getInstructionFromIndex(IntGR128.endIndex()); 275 } 276 assert (FirstMI && LastMI && "No instruction from index?"); 277 278 // Check if coalescing seems safe by finding the set of clobbered physreg 279 // pairs in the region. 280 BitVector PhysClobbered(getNumRegs()); 281 MachineBasicBlock::iterator MII = FirstMI, MEE = LastMI; 282 MEE++; 283 for (; MII != MEE; ++MII) { 284 for (const MachineOperand &MO : MII->operands()) 285 if (MO.isReg() && isPhysicalRegister(MO.getReg())) { 286 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/); 287 SI.isValid(); ++SI) 288 if (NewRC->contains(*SI)) { 289 PhysClobbered.set(*SI); 290 break; 291 } 292 } 293 } 294 295 // Demand an arbitrary margin of free regs. 296 unsigned const DemandedFreeGR128 = 3; 297 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128)) 298 return false; 299 300 return true; 301 } 302 303 unsigned 304 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 305 const SystemZFrameLowering *TFI = getFrameLowering(MF); 306 return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D; 307 } 308