1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "SystemZRegisterInfo.h" 10 #include "SystemZInstrInfo.h" 11 #include "SystemZSubtarget.h" 12 #include "llvm/ADT/SmallSet.h" 13 #include "llvm/CodeGen/LiveIntervals.h" 14 #include "llvm/CodeGen/MachineInstrBuilder.h" 15 #include "llvm/CodeGen/MachineRegisterInfo.h" 16 #include "llvm/CodeGen/TargetFrameLowering.h" 17 #include "llvm/CodeGen/VirtRegMap.h" 18 #include "llvm/IR/DebugInfoMetadata.h" 19 20 using namespace llvm; 21 22 #define GET_REGINFO_TARGET_DESC 23 #include "SystemZGenRegisterInfo.inc" 24 25 SystemZRegisterInfo::SystemZRegisterInfo() 26 : SystemZGenRegisterInfo(SystemZ::R14D) {} 27 28 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO 29 // somehow belongs in it. Otherwise, return GRX32. 30 static const TargetRegisterClass *getRC32(MachineOperand &MO, 31 const VirtRegMap *VRM, 32 const MachineRegisterInfo *MRI) { 33 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg()); 34 35 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) || 36 MO.getSubReg() == SystemZ::subreg_l32 || 37 MO.getSubReg() == SystemZ::subreg_hl32) 38 return &SystemZ::GR32BitRegClass; 39 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) || 40 MO.getSubReg() == SystemZ::subreg_h32 || 41 MO.getSubReg() == SystemZ::subreg_hh32) 42 return &SystemZ::GRH32BitRegClass; 43 44 if (VRM && VRM->hasPhys(MO.getReg())) { 45 Register PhysReg = VRM->getPhys(MO.getReg()); 46 if (SystemZ::GR32BitRegClass.contains(PhysReg)) 47 return &SystemZ::GR32BitRegClass; 48 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) && 49 "Phys reg not in GR32 or GRH32?"); 50 return &SystemZ::GRH32BitRegClass; 51 } 52 53 assert (RC == &SystemZ::GRX32BitRegClass); 54 return RC; 55 } 56 57 // Pass the registers of RC as hints while making sure that if any of these 58 // registers are copy hints (and therefore already in Hints), hint them 59 // first. 60 static void addHints(ArrayRef<MCPhysReg> Order, 61 SmallVectorImpl<MCPhysReg> &Hints, 62 const TargetRegisterClass *RC, 63 const MachineRegisterInfo *MRI) { 64 SmallSet<unsigned, 4> CopyHints; 65 CopyHints.insert(Hints.begin(), Hints.end()); 66 Hints.clear(); 67 for (MCPhysReg Reg : Order) 68 if (CopyHints.count(Reg) && 69 RC->contains(Reg) && !MRI->isReserved(Reg)) 70 Hints.push_back(Reg); 71 for (MCPhysReg Reg : Order) 72 if (!CopyHints.count(Reg) && 73 RC->contains(Reg) && !MRI->isReserved(Reg)) 74 Hints.push_back(Reg); 75 } 76 77 bool SystemZRegisterInfo::getRegAllocationHints( 78 Register VirtReg, ArrayRef<MCPhysReg> Order, 79 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 80 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 81 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 82 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 83 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 84 85 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( 86 VirtReg, Order, Hints, MF, VRM, Matrix); 87 88 if (VRM != nullptr) { 89 // Add any two address hints after any copy hints. 90 SmallSet<unsigned, 4> TwoAddrHints; 91 for (auto &Use : MRI->reg_nodbg_instructions(VirtReg)) 92 if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) { 93 const MachineOperand *VRRegMO = nullptr; 94 const MachineOperand *OtherMO = nullptr; 95 const MachineOperand *CommuMO = nullptr; 96 if (VirtReg == Use.getOperand(0).getReg()) { 97 VRRegMO = &Use.getOperand(0); 98 OtherMO = &Use.getOperand(1); 99 if (Use.isCommutable()) 100 CommuMO = &Use.getOperand(2); 101 } else if (VirtReg == Use.getOperand(1).getReg()) { 102 VRRegMO = &Use.getOperand(1); 103 OtherMO = &Use.getOperand(0); 104 } else if (VirtReg == Use.getOperand(2).getReg() && 105 Use.isCommutable()) { 106 VRRegMO = &Use.getOperand(2); 107 OtherMO = &Use.getOperand(0); 108 } else 109 continue; 110 111 auto tryAddHint = [&](const MachineOperand *MO) -> void { 112 Register Reg = MO->getReg(); 113 Register PhysReg = Register::isPhysicalRegister(Reg) 114 ? Reg 115 : Register(VRM->getPhys(Reg)); 116 if (PhysReg) { 117 if (MO->getSubReg()) 118 PhysReg = getSubReg(PhysReg, MO->getSubReg()); 119 if (VRRegMO->getSubReg()) 120 PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(), 121 MRI->getRegClass(VirtReg)); 122 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg)) 123 TwoAddrHints.insert(PhysReg); 124 } 125 }; 126 tryAddHint(OtherMO); 127 if (CommuMO) 128 tryAddHint(CommuMO); 129 } 130 for (MCPhysReg OrderReg : Order) 131 if (TwoAddrHints.count(OrderReg)) 132 Hints.push_back(OrderReg); 133 } 134 135 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) { 136 SmallVector<Register, 8> Worklist; 137 SmallSet<Register, 4> DoneRegs; 138 Worklist.push_back(VirtReg); 139 while (Worklist.size()) { 140 Register Reg = Worklist.pop_back_val(); 141 if (!DoneRegs.insert(Reg).second) 142 continue; 143 144 for (auto &Use : MRI->reg_instructions(Reg)) { 145 // For LOCRMux, see if the other operand is already a high or low 146 // register, and in that case give the corresponding hints for 147 // VirtReg. LOCR instructions need both operands in either high or 148 // low parts. Same handling for SELRMux. 149 if (Use.getOpcode() == SystemZ::LOCRMux || 150 Use.getOpcode() == SystemZ::SELRMux) { 151 MachineOperand &TrueMO = Use.getOperand(1); 152 MachineOperand &FalseMO = Use.getOperand(2); 153 const TargetRegisterClass *RC = 154 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI), 155 getRC32(TrueMO, VRM, MRI)); 156 if (Use.getOpcode() == SystemZ::SELRMux) 157 RC = TRI->getCommonSubClass(RC, 158 getRC32(Use.getOperand(0), VRM, MRI)); 159 if (RC && RC != &SystemZ::GRX32BitRegClass) { 160 addHints(Order, Hints, RC, MRI); 161 // Return true to make these hints the only regs available to 162 // RA. This may mean extra spilling but since the alternative is 163 // a jump sequence expansion of the LOCRMux, it is preferred. 164 return true; 165 } 166 167 // Add the other operand of the LOCRMux to the worklist. 168 Register OtherReg = 169 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg()); 170 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass) 171 Worklist.push_back(OtherReg); 172 } // end LOCRMux 173 else if (Use.getOpcode() == SystemZ::CHIMux || 174 Use.getOpcode() == SystemZ::CFIMux) { 175 if (Use.getOperand(1).getImm() == 0) { 176 bool OnlyLMuxes = true; 177 for (MachineInstr &DefMI : MRI->def_instructions(VirtReg)) 178 if (DefMI.getOpcode() != SystemZ::LMux) 179 OnlyLMuxes = false; 180 if (OnlyLMuxes) { 181 addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI); 182 // Return false to make these hints preferred but not obligatory. 183 return false; 184 } 185 } 186 } // end CHIMux / CFIMux 187 } 188 } 189 } 190 191 return BaseImplRetVal; 192 } 193 194 const MCPhysReg * 195 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 196 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>(); 197 if (MF->getFunction().getCallingConv() == CallingConv::GHC) 198 return CSR_SystemZ_NoRegs_SaveList; 199 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) 200 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList 201 : CSR_SystemZ_AllRegs_SaveList; 202 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() && 203 MF->getFunction().getAttributes().hasAttrSomewhere( 204 Attribute::SwiftError)) 205 return CSR_SystemZ_SwiftError_SaveList; 206 return CSR_SystemZ_ELF_SaveList; 207 } 208 209 const uint32_t * 210 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 211 CallingConv::ID CC) const { 212 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 213 if (CC == CallingConv::GHC) 214 return CSR_SystemZ_NoRegs_RegMask; 215 if (CC == CallingConv::AnyReg) 216 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask 217 : CSR_SystemZ_AllRegs_RegMask; 218 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() && 219 MF.getFunction().getAttributes().hasAttrSomewhere( 220 Attribute::SwiftError)) 221 return CSR_SystemZ_SwiftError_RegMask; 222 return CSR_SystemZ_ELF_RegMask; 223 } 224 225 BitVector 226 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 227 BitVector Reserved(getNumRegs()); 228 const SystemZFrameLowering *TFI = getFrameLowering(MF); 229 230 if (TFI->hasFP(MF)) { 231 // R11D is the frame pointer. Reserve all aliases. 232 Reserved.set(SystemZ::R11D); 233 Reserved.set(SystemZ::R11L); 234 Reserved.set(SystemZ::R11H); 235 Reserved.set(SystemZ::R10Q); 236 } 237 238 // R15D is the stack pointer. Reserve all aliases. 239 Reserved.set(SystemZ::R15D); 240 Reserved.set(SystemZ::R15L); 241 Reserved.set(SystemZ::R15H); 242 Reserved.set(SystemZ::R14Q); 243 244 // A0 and A1 hold the thread pointer. 245 Reserved.set(SystemZ::A0); 246 Reserved.set(SystemZ::A1); 247 248 // FPC is the floating-point control register. 249 Reserved.set(SystemZ::FPC); 250 251 return Reserved; 252 } 253 254 void 255 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, 256 int SPAdj, unsigned FIOperandNum, 257 RegScavenger *RS) const { 258 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame"); 259 260 MachineBasicBlock &MBB = *MI->getParent(); 261 MachineFunction &MF = *MBB.getParent(); 262 auto *TII = 263 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 264 const SystemZFrameLowering *TFI = getFrameLowering(MF); 265 DebugLoc DL = MI->getDebugLoc(); 266 267 // Decompose the frame index into a base and offset. 268 int FrameIndex = MI->getOperand(FIOperandNum).getIndex(); 269 Register BasePtr; 270 int64_t Offset = 271 (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed() + 272 MI->getOperand(FIOperandNum + 1).getImm()); 273 274 // Special handling of dbg_value instructions. 275 if (MI->isDebugValue()) { 276 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false); 277 if (MI->isNonListDebugValue()) { 278 MI->getDebugOffset().ChangeToImmediate(Offset); 279 } else { 280 unsigned OpIdx = MI->getDebugOperandIndex(&MI->getOperand(FIOperandNum)); 281 SmallVector<uint64_t, 3> Ops; 282 DIExpression::appendOffset( 283 Ops, TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed()); 284 MI->getDebugExpressionOp().setMetadata( 285 DIExpression::appendOpsToArg(MI->getDebugExpression(), Ops, OpIdx)); 286 } 287 return; 288 } 289 290 // See if the offset is in range, or if an equivalent instruction that 291 // accepts the offset exists. 292 unsigned Opcode = MI->getOpcode(); 293 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 294 if (OpcodeForOffset) { 295 if (OpcodeForOffset == SystemZ::LE && 296 MF.getSubtarget<SystemZSubtarget>().hasVector()) { 297 // If LE is ok for offset, use LDE instead on z13. 298 OpcodeForOffset = SystemZ::LDE32; 299 } 300 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 301 } 302 else { 303 // Create an anchor point that is in range. Start at 0xffff so that 304 // can use LLILH to load the immediate. 305 int64_t OldOffset = Offset; 306 int64_t Mask = 0xffff; 307 do { 308 Offset = OldOffset & Mask; 309 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 310 Mask >>= 1; 311 assert(Mask && "One offset must be OK"); 312 } while (!OpcodeForOffset); 313 314 Register ScratchReg = 315 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass); 316 int64_t HighOffset = OldOffset - Offset; 317 318 if (MI->getDesc().TSFlags & SystemZII::HasIndex 319 && MI->getOperand(FIOperandNum + 2).getReg() == 0) { 320 // Load the offset into the scratch register and use it as an index. 321 // The scratch register then dies here. 322 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 323 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 324 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg, 325 false, false, true); 326 } else { 327 // Load the anchor address into a scratch register. 328 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset); 329 if (LAOpcode) 330 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg) 331 .addReg(BasePtr).addImm(HighOffset).addReg(0); 332 else { 333 // Load the high offset into the scratch register and use it as 334 // an index. 335 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 336 BuildMI(MBB, MI, DL, TII->get(SystemZ::LA), ScratchReg) 337 .addReg(BasePtr, RegState::Kill).addImm(0).addReg(ScratchReg); 338 } 339 340 // Use the scratch register as the base. It then dies here. 341 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg, 342 false, false, true); 343 } 344 } 345 MI->setDesc(TII->get(OpcodeForOffset)); 346 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 347 } 348 349 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI, 350 const TargetRegisterClass *SrcRC, 351 unsigned SubReg, 352 const TargetRegisterClass *DstRC, 353 unsigned DstSubReg, 354 const TargetRegisterClass *NewRC, 355 LiveIntervals &LIS) const { 356 assert (MI->isCopy() && "Only expecting COPY instructions"); 357 358 // Coalesce anything which is not a COPY involving a subreg to/from GR128. 359 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) && 360 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64))) 361 return true; 362 363 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small 364 // and local to one MBB with not too much interferring registers. Otherwise 365 // regalloc may run out of registers. 366 367 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0); 368 Register GR128Reg = MI->getOperand(WideOpNo).getReg(); 369 Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg(); 370 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg); 371 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg); 372 373 // Check that the two virtual registers are local to MBB. 374 MachineBasicBlock *MBB = MI->getParent(); 375 MachineInstr *FirstMI_GR128 = 376 LIS.getInstructionFromIndex(IntGR128.beginIndex()); 377 MachineInstr *FirstMI_GRNar = 378 LIS.getInstructionFromIndex(IntGRNar.beginIndex()); 379 MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex()); 380 MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex()); 381 if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) || 382 (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) || 383 (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) || 384 (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB)) 385 return false; 386 387 MachineBasicBlock::iterator MII = nullptr, MEE = nullptr; 388 if (WideOpNo == 1) { 389 MII = FirstMI_GR128; 390 MEE = LastMI_GRNar; 391 } else { 392 MII = FirstMI_GRNar; 393 MEE = LastMI_GR128; 394 } 395 396 // Check if coalescing seems safe by finding the set of clobbered physreg 397 // pairs in the region. 398 BitVector PhysClobbered(getNumRegs()); 399 MEE++; 400 for (; MII != MEE; ++MII) { 401 for (const MachineOperand &MO : MII->operands()) 402 if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) { 403 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/); 404 SI.isValid(); ++SI) 405 if (NewRC->contains(*SI)) { 406 PhysClobbered.set(*SI); 407 break; 408 } 409 } 410 } 411 412 // Demand an arbitrary margin of free regs. 413 unsigned const DemandedFreeGR128 = 3; 414 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128)) 415 return false; 416 417 return true; 418 } 419 420 Register 421 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 422 const SystemZFrameLowering *TFI = getFrameLowering(MF); 423 return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D; 424 } 425 426 const TargetRegisterClass * 427 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 428 if (RC == &SystemZ::CCRRegClass) 429 return &SystemZ::GR32BitRegClass; 430 return RC; 431 } 432 433