1 //===-- PPCRegisterInfo.cpp - PowerPC Register Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the PowerPC implementation of the TargetRegisterInfo 10 // class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCRegisterInfo.h" 15 #include "PPCFrameLowering.h" 16 #include "PPCInstrBuilder.h" 17 #include "PPCMachineFunctionInfo.h" 18 #include "PPCSubtarget.h" 19 #include "PPCTargetMachine.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineModuleInfo.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/RegisterScavenging.h" 29 #include "llvm/CodeGen/TargetFrameLowering.h" 30 #include "llvm/CodeGen/TargetInstrInfo.h" 31 #include "llvm/IR/CallingConv.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include "llvm/Target/TargetMachine.h" 41 #include "llvm/Target/TargetOptions.h" 42 #include <cstdlib> 43 44 using namespace llvm; 45 46 #define DEBUG_TYPE "reginfo" 47 48 #define GET_REGINFO_TARGET_DESC 49 #include "PPCGenRegisterInfo.inc" 50 51 STATISTIC(InflateGPRC, "Number of gprc inputs for getLargestLegalClass"); 52 STATISTIC(InflateGP8RC, "Number of g8rc inputs for getLargestLegalClass"); 53 54 static cl::opt<bool> 55 EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true), 56 cl::desc("Enable use of a base pointer for complex stack frames")); 57 58 static cl::opt<bool> 59 AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false), 60 cl::desc("Force the use of a base pointer in every function")); 61 62 static cl::opt<bool> 63 EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills", cl::Hidden, cl::init(false), 64 cl::desc("Enable spills from gpr to vsr rather than stack")); 65 66 static cl::opt<bool> 67 StackPtrConst("ppc-stack-ptr-caller-preserved", 68 cl::desc("Consider R1 caller preserved so stack saves of " 69 "caller preserved registers can be LICM candidates"), 70 cl::init(true), cl::Hidden); 71 72 static cl::opt<unsigned> 73 MaxCRBitSpillDist("ppc-max-crbit-spill-dist", 74 cl::desc("Maximum search distance for definition of CR bit " 75 "spill on ppc"), 76 cl::Hidden, cl::init(100)); 77 78 // Copies/moves of physical accumulators are expensive operations 79 // that should be avoided whenever possible. MMA instructions are 80 // meant to be used in performance-sensitive computational kernels. 81 // This option is provided, at least for the time being, to give the 82 // user a tool to detect this expensive operation and either rework 83 // their code or report a compiler bug if that turns out to be the 84 // cause. 85 #ifndef NDEBUG 86 static cl::opt<bool> 87 ReportAccMoves("ppc-report-acc-moves", 88 cl::desc("Emit information about accumulator register spills " 89 "and copies"), 90 cl::Hidden, cl::init(false)); 91 #endif 92 93 static unsigned offsetMinAlignForOpcode(unsigned OpC); 94 95 PPCRegisterInfo::PPCRegisterInfo(const PPCTargetMachine &TM) 96 : PPCGenRegisterInfo(TM.isPPC64() ? PPC::LR8 : PPC::LR, 97 TM.isPPC64() ? 0 : 1, 98 TM.isPPC64() ? 0 : 1), 99 TM(TM) { 100 ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX; 101 ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX; 102 ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX; 103 ImmToIdxMap[PPC::LWZ] = PPC::LWZX; ImmToIdxMap[PPC::LWA] = PPC::LWAX; 104 ImmToIdxMap[PPC::LFS] = PPC::LFSX; ImmToIdxMap[PPC::LFD] = PPC::LFDX; 105 ImmToIdxMap[PPC::STH] = PPC::STHX; ImmToIdxMap[PPC::STW] = PPC::STWX; 106 ImmToIdxMap[PPC::STFS] = PPC::STFSX; ImmToIdxMap[PPC::STFD] = PPC::STFDX; 107 ImmToIdxMap[PPC::ADDI] = PPC::ADD4; 108 ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32; 109 110 // 64-bit 111 ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8; 112 ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8; 113 ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8; 114 ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX; 115 ImmToIdxMap[PPC::ADDI8] = PPC::ADD8; 116 117 // VSX 118 ImmToIdxMap[PPC::DFLOADf32] = PPC::LXSSPX; 119 ImmToIdxMap[PPC::DFLOADf64] = PPC::LXSDX; 120 ImmToIdxMap[PPC::SPILLTOVSR_LD] = PPC::SPILLTOVSR_LDX; 121 ImmToIdxMap[PPC::SPILLTOVSR_ST] = PPC::SPILLTOVSR_STX; 122 ImmToIdxMap[PPC::DFSTOREf32] = PPC::STXSSPX; 123 ImmToIdxMap[PPC::DFSTOREf64] = PPC::STXSDX; 124 ImmToIdxMap[PPC::LXV] = PPC::LXVX; 125 ImmToIdxMap[PPC::LXSD] = PPC::LXSDX; 126 ImmToIdxMap[PPC::LXSSP] = PPC::LXSSPX; 127 ImmToIdxMap[PPC::STXV] = PPC::STXVX; 128 ImmToIdxMap[PPC::STXSD] = PPC::STXSDX; 129 ImmToIdxMap[PPC::STXSSP] = PPC::STXSSPX; 130 131 // SPE 132 ImmToIdxMap[PPC::EVLDD] = PPC::EVLDDX; 133 ImmToIdxMap[PPC::EVSTDD] = PPC::EVSTDDX; 134 ImmToIdxMap[PPC::SPESTW] = PPC::SPESTWX; 135 ImmToIdxMap[PPC::SPELWZ] = PPC::SPELWZX; 136 } 137 138 /// getPointerRegClass - Return the register class to use to hold pointers. 139 /// This is used for addressing modes. 140 const TargetRegisterClass * 141 PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 142 const { 143 // Note that PPCInstrInfo::FoldImmediate also directly uses this Kind value 144 // when it checks for ZERO folding. 145 if (Kind == 1) { 146 if (TM.isPPC64()) 147 return &PPC::G8RC_NOX0RegClass; 148 return &PPC::GPRC_NOR0RegClass; 149 } 150 151 if (TM.isPPC64()) 152 return &PPC::G8RCRegClass; 153 return &PPC::GPRCRegClass; 154 } 155 156 const MCPhysReg* 157 PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 158 const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>(); 159 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) { 160 if (!TM.isPPC64() && Subtarget.isAIXABI()) 161 report_fatal_error("AnyReg unimplemented on 32-bit AIX."); 162 if (Subtarget.hasVSX()) 163 return CSR_64_AllRegs_VSX_SaveList; 164 if (Subtarget.hasAltivec()) 165 return CSR_64_AllRegs_Altivec_SaveList; 166 return CSR_64_AllRegs_SaveList; 167 } 168 169 // On PPC64, we might need to save r2 (but only if it is not reserved). 170 // We do not need to treat R2 as callee-saved when using PC-Relative calls 171 // because any direct uses of R2 will cause it to be reserved. If the function 172 // is a leaf or the only uses of R2 are implicit uses for calls, the calls 173 // will use the @notoc relocation which will cause this function to set the 174 // st_other bit to 1, thereby communicating to its caller that it arbitrarily 175 // clobbers the TOC. 176 bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2) && 177 !Subtarget.isUsingPCRelativeCalls(); 178 179 // Cold calling convention CSRs. 180 if (MF->getFunction().getCallingConv() == CallingConv::Cold) { 181 if (Subtarget.isAIXABI()) 182 report_fatal_error("Cold calling unimplemented on AIX."); 183 if (TM.isPPC64()) { 184 if (Subtarget.hasAltivec()) 185 return SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList 186 : CSR_SVR64_ColdCC_Altivec_SaveList; 187 return SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList 188 : CSR_SVR64_ColdCC_SaveList; 189 } 190 // 32-bit targets. 191 if (Subtarget.hasAltivec()) 192 return CSR_SVR32_ColdCC_Altivec_SaveList; 193 else if (Subtarget.hasSPE()) 194 return CSR_SVR32_ColdCC_SPE_SaveList; 195 return CSR_SVR32_ColdCC_SaveList; 196 } 197 // Standard calling convention CSRs. 198 if (TM.isPPC64()) { 199 if (Subtarget.hasAltivec() && 200 (!Subtarget.isAIXABI() || TM.getAIXExtendedAltivecABI())) { 201 return SaveR2 ? CSR_PPC64_R2_Altivec_SaveList 202 : CSR_PPC64_Altivec_SaveList; 203 } 204 return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList; 205 } 206 // 32-bit targets. 207 if (Subtarget.isAIXABI()) { 208 if (Subtarget.hasAltivec()) 209 return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_Altivec_SaveList 210 : CSR_AIX32_SaveList; 211 return CSR_AIX32_SaveList; 212 } 213 if (Subtarget.hasAltivec()) 214 return CSR_SVR432_Altivec_SaveList; 215 else if (Subtarget.hasSPE()) 216 return CSR_SVR432_SPE_SaveList; 217 return CSR_SVR432_SaveList; 218 } 219 220 const uint32_t * 221 PPCRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 222 CallingConv::ID CC) const { 223 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 224 if (CC == CallingConv::AnyReg) { 225 if (Subtarget.hasVSX()) 226 return CSR_64_AllRegs_VSX_RegMask; 227 if (Subtarget.hasAltivec()) 228 return CSR_64_AllRegs_Altivec_RegMask; 229 return CSR_64_AllRegs_RegMask; 230 } 231 232 if (Subtarget.isAIXABI()) { 233 return TM.isPPC64() 234 ? ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI()) 235 ? CSR_PPC64_Altivec_RegMask 236 : CSR_PPC64_RegMask) 237 : ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI()) 238 ? CSR_AIX32_Altivec_RegMask 239 : CSR_AIX32_RegMask); 240 } 241 242 if (CC == CallingConv::Cold) { 243 return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask 244 : CSR_SVR64_ColdCC_RegMask) 245 : (Subtarget.hasAltivec() ? CSR_SVR32_ColdCC_Altivec_RegMask 246 : (Subtarget.hasSPE() 247 ? CSR_SVR32_ColdCC_SPE_RegMask 248 : CSR_SVR32_ColdCC_RegMask)); 249 } 250 251 return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_PPC64_Altivec_RegMask 252 : CSR_PPC64_RegMask) 253 : (Subtarget.hasAltivec() 254 ? CSR_SVR432_Altivec_RegMask 255 : (Subtarget.hasSPE() ? CSR_SVR432_SPE_RegMask 256 : CSR_SVR432_RegMask)); 257 } 258 259 const uint32_t* 260 PPCRegisterInfo::getNoPreservedMask() const { 261 return CSR_NoRegs_RegMask; 262 } 263 264 void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const { 265 for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM}) 266 Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32)); 267 } 268 269 BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 270 BitVector Reserved(getNumRegs()); 271 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 272 const PPCFrameLowering *TFI = getFrameLowering(MF); 273 274 // The ZERO register is not really a register, but the representation of r0 275 // when used in instructions that treat r0 as the constant 0. 276 markSuperRegs(Reserved, PPC::ZERO); 277 278 // The FP register is also not really a register, but is the representation 279 // of the frame pointer register used by ISD::FRAMEADDR. 280 markSuperRegs(Reserved, PPC::FP); 281 282 // The BP register is also not really a register, but is the representation 283 // of the base pointer register used by setjmp. 284 markSuperRegs(Reserved, PPC::BP); 285 286 // The counter registers must be reserved so that counter-based loops can 287 // be correctly formed (and the mtctr instructions are not DCE'd). 288 markSuperRegs(Reserved, PPC::CTR); 289 markSuperRegs(Reserved, PPC::CTR8); 290 291 markSuperRegs(Reserved, PPC::R1); 292 markSuperRegs(Reserved, PPC::LR); 293 markSuperRegs(Reserved, PPC::LR8); 294 markSuperRegs(Reserved, PPC::RM); 295 296 markSuperRegs(Reserved, PPC::VRSAVE); 297 298 // The SVR4 ABI reserves r2 and r13 299 if (Subtarget.isSVR4ABI()) { 300 // We only reserve r2 if we need to use the TOC pointer. If we have no 301 // explicit uses of the TOC pointer (meaning we're a leaf function with 302 // no constant-pool loads, etc.) and we have no potential uses inside an 303 // inline asm block, then we can treat r2 has an ordinary callee-saved 304 // register. 305 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 306 if (!TM.isPPC64() || FuncInfo->usesTOCBasePtr() || MF.hasInlineAsm()) 307 markSuperRegs(Reserved, PPC::R2); // System-reserved register 308 markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register 309 } 310 311 // Always reserve r2 on AIX for now. 312 // TODO: Make r2 allocatable on AIX/XCOFF for some leaf functions. 313 if (Subtarget.isAIXABI()) 314 markSuperRegs(Reserved, PPC::R2); // System-reserved register 315 316 // On PPC64, r13 is the thread pointer. Never allocate this register. 317 if (TM.isPPC64()) 318 markSuperRegs(Reserved, PPC::R13); 319 320 if (TFI->needsFP(MF)) 321 markSuperRegs(Reserved, PPC::R31); 322 323 bool IsPositionIndependent = TM.isPositionIndependent(); 324 if (hasBasePointer(MF)) { 325 if (Subtarget.is32BitELFABI() && IsPositionIndependent) 326 markSuperRegs(Reserved, PPC::R29); 327 else 328 markSuperRegs(Reserved, PPC::R30); 329 } 330 331 if (Subtarget.is32BitELFABI() && IsPositionIndependent) 332 markSuperRegs(Reserved, PPC::R30); 333 334 // Reserve Altivec registers when Altivec is unavailable. 335 if (!Subtarget.hasAltivec()) 336 for (TargetRegisterClass::iterator I = PPC::VRRCRegClass.begin(), 337 IE = PPC::VRRCRegClass.end(); I != IE; ++I) 338 markSuperRegs(Reserved, *I); 339 340 if (Subtarget.isAIXABI() && Subtarget.hasAltivec() && 341 !TM.getAIXExtendedAltivecABI()) { 342 // In the AIX default Altivec ABI, vector registers VR20-VR31 are reserved 343 // and cannot be used. 344 for (auto Reg : CSR_Altivec_SaveList) { 345 if (Reg == 0) 346 break; 347 markSuperRegs(Reserved, Reg); 348 } 349 } 350 351 assert(checkAllSuperRegsMarked(Reserved)); 352 return Reserved; 353 } 354 355 bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const { 356 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 357 const PPCInstrInfo *InstrInfo = Subtarget.getInstrInfo(); 358 const MachineFrameInfo &MFI = MF.getFrameInfo(); 359 const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo(); 360 361 // If the callee saved info is invalid we have to default to true for safety. 362 if (!MFI.isCalleeSavedInfoValid()) 363 return true; 364 365 // We will require the use of X-Forms because the frame is larger than what 366 // can be represented in signed 16 bits that fit in the immediate of a D-Form. 367 // If we need an X-Form then we need a register to store the address offset. 368 unsigned FrameSize = MFI.getStackSize(); 369 // Signed 16 bits means that the FrameSize cannot be more than 15 bits. 370 if (FrameSize & ~0x7FFF) 371 return true; 372 373 // The callee saved info is valid so it can be traversed. 374 // Checking for registers that need saving that do not have load or store 375 // forms where the address offset is an immediate. 376 for (unsigned i = 0; i < Info.size(); i++) { 377 int FrIdx = Info[i].getFrameIdx(); 378 unsigned Reg = Info[i].getReg(); 379 380 const TargetRegisterClass *RC = getMinimalPhysRegClass(Reg); 381 unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC); 382 if (!MFI.isFixedObjectIndex(FrIdx)) { 383 // This is not a fixed object. If it requires alignment then we may still 384 // need to use the XForm. 385 if (offsetMinAlignForOpcode(Opcode) > 1) 386 return true; 387 } 388 389 // This is eiher: 390 // 1) A fixed frame index object which we know are aligned so 391 // as long as we have a valid DForm/DSForm/DQForm (non XForm) we don't 392 // need to consider the alignment here. 393 // 2) A not fixed object but in that case we now know that the min required 394 // alignment is no more than 1 based on the previous check. 395 if (InstrInfo->isXFormMemOp(Opcode)) 396 return true; 397 } 398 return false; 399 } 400 401 bool PPCRegisterInfo::isCallerPreservedPhysReg(MCRegister PhysReg, 402 const MachineFunction &MF) const { 403 assert(Register::isPhysicalRegister(PhysReg)); 404 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 405 const MachineFrameInfo &MFI = MF.getFrameInfo(); 406 407 if (!Subtarget.is64BitELFABI() && !Subtarget.isAIXABI()) 408 return false; 409 if (PhysReg == Subtarget.getTOCPointerRegister()) 410 // X2/R2 is guaranteed to be preserved within a function if it is reserved. 411 // The reason it's reserved is that it's the TOC pointer (and the function 412 // uses the TOC). In functions where it isn't reserved (i.e. leaf functions 413 // with no TOC access), we can't claim that it is preserved. 414 return (getReservedRegs(MF).test(PhysReg)); 415 if (StackPtrConst && PhysReg == Subtarget.getStackPointerRegister() && 416 !MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment()) 417 // The value of the stack pointer does not change within a function after 418 // the prologue and before the epilogue if there are no dynamic allocations 419 // and no inline asm which clobbers X1/R1. 420 return true; 421 return false; 422 } 423 424 unsigned PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 425 MachineFunction &MF) const { 426 const PPCFrameLowering *TFI = getFrameLowering(MF); 427 const unsigned DefaultSafety = 1; 428 429 switch (RC->getID()) { 430 default: 431 return 0; 432 case PPC::G8RC_NOX0RegClassID: 433 case PPC::GPRC_NOR0RegClassID: 434 case PPC::SPERCRegClassID: 435 case PPC::G8RCRegClassID: 436 case PPC::GPRCRegClassID: { 437 unsigned FP = TFI->hasFP(MF) ? 1 : 0; 438 return 32 - FP - DefaultSafety; 439 } 440 case PPC::F4RCRegClassID: 441 case PPC::F8RCRegClassID: 442 case PPC::VSLRCRegClassID: 443 return 32 - DefaultSafety; 444 case PPC::VFRCRegClassID: 445 case PPC::VRRCRegClassID: { 446 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 447 // Vector registers VR20-VR31 are reserved and cannot be used in the default 448 // Altivec ABI on AIX. 449 if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI()) 450 return 20 - DefaultSafety; 451 } 452 return 32 - DefaultSafety; 453 case PPC::VSFRCRegClassID: 454 case PPC::VSSRCRegClassID: 455 case PPC::VSRCRegClassID: { 456 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 457 if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI()) 458 // Vector registers VR20-VR31 are reserved and cannot be used in the 459 // default Altivec ABI on AIX. 460 return 52 - DefaultSafety; 461 } 462 return 64 - DefaultSafety; 463 case PPC::CRRCRegClassID: 464 return 8 - DefaultSafety; 465 } 466 } 467 468 const TargetRegisterClass * 469 PPCRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 470 const MachineFunction &MF) const { 471 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 472 if (Subtarget.hasVSX()) { 473 // With VSX, we can inflate various sub-register classes to the full VSX 474 // register set. 475 476 // For Power9 we allow the user to enable GPR to vector spills. 477 // FIXME: Currently limited to spilling GP8RC. A follow on patch will add 478 // support to spill GPRC. 479 if (TM.isELFv2ABI() || Subtarget.isAIXABI()) { 480 if (Subtarget.hasP9Vector() && EnableGPRToVecSpills && 481 RC == &PPC::G8RCRegClass) { 482 InflateGP8RC++; 483 return &PPC::SPILLTOVSRRCRegClass; 484 } 485 if (RC == &PPC::GPRCRegClass && EnableGPRToVecSpills) 486 InflateGPRC++; 487 } 488 if (RC == &PPC::F8RCRegClass) 489 return &PPC::VSFRCRegClass; 490 else if (RC == &PPC::VRRCRegClass) 491 return &PPC::VSRCRegClass; 492 else if (RC == &PPC::F4RCRegClass && Subtarget.hasP8Vector()) 493 return &PPC::VSSRCRegClass; 494 } 495 496 return TargetRegisterInfo::getLargestLegalSuperClass(RC, MF); 497 } 498 499 //===----------------------------------------------------------------------===// 500 // Stack Frame Processing methods 501 //===----------------------------------------------------------------------===// 502 503 /// lowerDynamicAlloc - Generate the code for allocating an object in the 504 /// current frame. The sequence of code will be in the general form 505 /// 506 /// addi R0, SP, \#frameSize ; get the address of the previous frame 507 /// stwxu R0, SP, Rnegsize ; add and update the SP with the negated size 508 /// addi Rnew, SP, \#maxCalFrameSize ; get the top of the allocation 509 /// 510 void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const { 511 // Get the instruction. 512 MachineInstr &MI = *II; 513 // Get the instruction's basic block. 514 MachineBasicBlock &MBB = *MI.getParent(); 515 // Get the basic block's function. 516 MachineFunction &MF = *MBB.getParent(); 517 // Get the frame info. 518 MachineFrameInfo &MFI = MF.getFrameInfo(); 519 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 520 // Get the instruction info. 521 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 522 // Determine whether 64-bit pointers are used. 523 bool LP64 = TM.isPPC64(); 524 DebugLoc dl = MI.getDebugLoc(); 525 526 // Get the maximum call stack size. 527 unsigned maxCallFrameSize = MFI.getMaxCallFrameSize(); 528 Align MaxAlign = MFI.getMaxAlign(); 529 assert(isAligned(MaxAlign, maxCallFrameSize) && 530 "Maximum call-frame size not sufficiently aligned"); 531 (void)MaxAlign; 532 533 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 534 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 535 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 536 bool KillNegSizeReg = MI.getOperand(1).isKill(); 537 Register NegSizeReg = MI.getOperand(1).getReg(); 538 539 prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, Reg); 540 // Grow the stack and update the stack pointer link, then determine the 541 // address of new allocated space. 542 if (LP64) { 543 BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1) 544 .addReg(Reg, RegState::Kill) 545 .addReg(PPC::X1) 546 .addReg(NegSizeReg, getKillRegState(KillNegSizeReg)); 547 BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg()) 548 .addReg(PPC::X1) 549 .addImm(maxCallFrameSize); 550 } else { 551 BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1) 552 .addReg(Reg, RegState::Kill) 553 .addReg(PPC::R1) 554 .addReg(NegSizeReg, getKillRegState(KillNegSizeReg)); 555 BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg()) 556 .addReg(PPC::R1) 557 .addImm(maxCallFrameSize); 558 } 559 560 // Discard the DYNALLOC instruction. 561 MBB.erase(II); 562 } 563 564 /// To accomplish dynamic stack allocation, we have to calculate exact size 565 /// subtracted from the stack pointer according alignment information and get 566 /// previous frame pointer. 567 void PPCRegisterInfo::prepareDynamicAlloca(MachineBasicBlock::iterator II, 568 Register &NegSizeReg, 569 bool &KillNegSizeReg, 570 Register &FramePointer) const { 571 // Get the instruction. 572 MachineInstr &MI = *II; 573 // Get the instruction's basic block. 574 MachineBasicBlock &MBB = *MI.getParent(); 575 // Get the basic block's function. 576 MachineFunction &MF = *MBB.getParent(); 577 // Get the frame info. 578 MachineFrameInfo &MFI = MF.getFrameInfo(); 579 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 580 // Get the instruction info. 581 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 582 // Determine whether 64-bit pointers are used. 583 bool LP64 = TM.isPPC64(); 584 DebugLoc dl = MI.getDebugLoc(); 585 // Get the total frame size. 586 unsigned FrameSize = MFI.getStackSize(); 587 588 // Get stack alignments. 589 const PPCFrameLowering *TFI = getFrameLowering(MF); 590 Align TargetAlign = TFI->getStackAlign(); 591 Align MaxAlign = MFI.getMaxAlign(); 592 593 // Determine the previous frame's address. If FrameSize can't be 594 // represented as 16 bits or we need special alignment, then we load the 595 // previous frame's address from 0(SP). Why not do an addis of the hi? 596 // Because R0 is our only safe tmp register and addi/addis treat R0 as zero. 597 // Constructing the constant and adding would take 3 instructions. 598 // Fortunately, a frame greater than 32K is rare. 599 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 600 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 601 602 if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) { 603 if (LP64) 604 BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), FramePointer) 605 .addReg(PPC::X31) 606 .addImm(FrameSize); 607 else 608 BuildMI(MBB, II, dl, TII.get(PPC::ADDI), FramePointer) 609 .addReg(PPC::R31) 610 .addImm(FrameSize); 611 } else if (LP64) { 612 BuildMI(MBB, II, dl, TII.get(PPC::LD), FramePointer) 613 .addImm(0) 614 .addReg(PPC::X1); 615 } else { 616 BuildMI(MBB, II, dl, TII.get(PPC::LWZ), FramePointer) 617 .addImm(0) 618 .addReg(PPC::R1); 619 } 620 // Determine the actual NegSizeReg according to alignment info. 621 if (LP64) { 622 if (MaxAlign > TargetAlign) { 623 unsigned UnalNegSizeReg = NegSizeReg; 624 NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC); 625 626 // Unfortunately, there is no andi, only andi., and we can't insert that 627 // here because we might clobber cr0 while it is live. 628 BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg) 629 .addImm(~(MaxAlign.value() - 1)); 630 631 unsigned NegSizeReg1 = NegSizeReg; 632 NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC); 633 BuildMI(MBB, II, dl, TII.get(PPC::AND8), NegSizeReg) 634 .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg)) 635 .addReg(NegSizeReg1, RegState::Kill); 636 KillNegSizeReg = true; 637 } 638 } else { 639 if (MaxAlign > TargetAlign) { 640 unsigned UnalNegSizeReg = NegSizeReg; 641 NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC); 642 643 // Unfortunately, there is no andi, only andi., and we can't insert that 644 // here because we might clobber cr0 while it is live. 645 BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg) 646 .addImm(~(MaxAlign.value() - 1)); 647 648 unsigned NegSizeReg1 = NegSizeReg; 649 NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC); 650 BuildMI(MBB, II, dl, TII.get(PPC::AND), NegSizeReg) 651 .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg)) 652 .addReg(NegSizeReg1, RegState::Kill); 653 KillNegSizeReg = true; 654 } 655 } 656 } 657 658 void PPCRegisterInfo::lowerPrepareProbedAlloca( 659 MachineBasicBlock::iterator II) const { 660 MachineInstr &MI = *II; 661 // Get the instruction's basic block. 662 MachineBasicBlock &MBB = *MI.getParent(); 663 // Get the basic block's function. 664 MachineFunction &MF = *MBB.getParent(); 665 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 666 // Get the instruction info. 667 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 668 // Determine whether 64-bit pointers are used. 669 bool LP64 = TM.isPPC64(); 670 DebugLoc dl = MI.getDebugLoc(); 671 Register FramePointer = MI.getOperand(0).getReg(); 672 const Register ActualNegSizeReg = MI.getOperand(1).getReg(); 673 bool KillNegSizeReg = MI.getOperand(2).isKill(); 674 Register NegSizeReg = MI.getOperand(2).getReg(); 675 const MCInstrDesc &CopyInst = TII.get(LP64 ? PPC::OR8 : PPC::OR); 676 // RegAllocator might allocate FramePointer and NegSizeReg in the same phyreg. 677 if (FramePointer == NegSizeReg) { 678 assert(KillNegSizeReg && "FramePointer is a def and NegSizeReg is an use, " 679 "NegSizeReg should be killed"); 680 // FramePointer is clobbered earlier than the use of NegSizeReg in 681 // prepareDynamicAlloca, save NegSizeReg in ActualNegSizeReg to avoid 682 // misuse. 683 BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg) 684 .addReg(NegSizeReg) 685 .addReg(NegSizeReg); 686 NegSizeReg = ActualNegSizeReg; 687 KillNegSizeReg = false; 688 } 689 prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, FramePointer); 690 // NegSizeReg might be updated in prepareDynamicAlloca if MaxAlign > 691 // TargetAlign. 692 if (NegSizeReg != ActualNegSizeReg) 693 BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg) 694 .addReg(NegSizeReg) 695 .addReg(NegSizeReg); 696 MBB.erase(II); 697 } 698 699 void PPCRegisterInfo::lowerDynamicAreaOffset( 700 MachineBasicBlock::iterator II) const { 701 // Get the instruction. 702 MachineInstr &MI = *II; 703 // Get the instruction's basic block. 704 MachineBasicBlock &MBB = *MI.getParent(); 705 // Get the basic block's function. 706 MachineFunction &MF = *MBB.getParent(); 707 // Get the frame info. 708 MachineFrameInfo &MFI = MF.getFrameInfo(); 709 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 710 // Get the instruction info. 711 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 712 713 unsigned maxCallFrameSize = MFI.getMaxCallFrameSize(); 714 bool is64Bit = TM.isPPC64(); 715 DebugLoc dl = MI.getDebugLoc(); 716 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), 717 MI.getOperand(0).getReg()) 718 .addImm(maxCallFrameSize); 719 MBB.erase(II); 720 } 721 722 /// lowerCRSpilling - Generate the code for spilling a CR register. Instead of 723 /// reserving a whole register (R0), we scrounge for one here. This generates 724 /// code like this: 725 /// 726 /// mfcr rA ; Move the conditional register into GPR rA. 727 /// rlwinm rA, rA, SB, 0, 31 ; Shift the bits left so they are in CR0's slot. 728 /// stw rA, FI ; Store rA to the frame. 729 /// 730 void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II, 731 unsigned FrameIndex) const { 732 // Get the instruction. 733 MachineInstr &MI = *II; // ; SPILL_CR <SrcReg>, <offset> 734 // Get the instruction's basic block. 735 MachineBasicBlock &MBB = *MI.getParent(); 736 MachineFunction &MF = *MBB.getParent(); 737 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 738 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 739 DebugLoc dl = MI.getDebugLoc(); 740 741 bool LP64 = TM.isPPC64(); 742 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 743 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 744 745 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 746 Register SrcReg = MI.getOperand(0).getReg(); 747 748 // We need to store the CR in the low 4-bits of the saved value. First, issue 749 // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg. 750 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg) 751 .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); 752 753 // If the saved register wasn't CR0, shift the bits left so that they are in 754 // CR0's slot. 755 if (SrcReg != PPC::CR0) { 756 Register Reg1 = Reg; 757 Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 758 759 // rlwinm rA, rA, ShiftBits, 0, 31. 760 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg) 761 .addReg(Reg1, RegState::Kill) 762 .addImm(getEncodingValue(SrcReg) * 4) 763 .addImm(0) 764 .addImm(31); 765 } 766 767 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW)) 768 .addReg(Reg, RegState::Kill), 769 FrameIndex); 770 771 // Discard the pseudo instruction. 772 MBB.erase(II); 773 } 774 775 void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II, 776 unsigned FrameIndex) const { 777 // Get the instruction. 778 MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CR <offset> 779 // Get the instruction's basic block. 780 MachineBasicBlock &MBB = *MI.getParent(); 781 MachineFunction &MF = *MBB.getParent(); 782 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 783 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 784 DebugLoc dl = MI.getDebugLoc(); 785 786 bool LP64 = TM.isPPC64(); 787 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 788 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 789 790 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 791 Register DestReg = MI.getOperand(0).getReg(); 792 assert(MI.definesRegister(DestReg) && 793 "RESTORE_CR does not define its destination"); 794 795 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ), 796 Reg), FrameIndex); 797 798 // If the reloaded register isn't CR0, shift the bits right so that they are 799 // in the right CR's slot. 800 if (DestReg != PPC::CR0) { 801 Register Reg1 = Reg; 802 Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 803 804 unsigned ShiftBits = getEncodingValue(DestReg)*4; 805 // rlwinm r11, r11, 32-ShiftBits, 0, 31. 806 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg) 807 .addReg(Reg1, RegState::Kill).addImm(32-ShiftBits).addImm(0) 808 .addImm(31); 809 } 810 811 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg) 812 .addReg(Reg, RegState::Kill); 813 814 // Discard the pseudo instruction. 815 MBB.erase(II); 816 } 817 818 void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II, 819 unsigned FrameIndex) const { 820 // Get the instruction. 821 MachineInstr &MI = *II; // ; SPILL_CRBIT <SrcReg>, <offset> 822 // Get the instruction's basic block. 823 MachineBasicBlock &MBB = *MI.getParent(); 824 MachineFunction &MF = *MBB.getParent(); 825 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 826 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 827 const TargetRegisterInfo* TRI = Subtarget.getRegisterInfo(); 828 DebugLoc dl = MI.getDebugLoc(); 829 830 bool LP64 = TM.isPPC64(); 831 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 832 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 833 834 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 835 Register SrcReg = MI.getOperand(0).getReg(); 836 837 // Search up the BB to find the definition of the CR bit. 838 MachineBasicBlock::reverse_iterator Ins = MI; 839 MachineBasicBlock::reverse_iterator Rend = MBB.rend(); 840 ++Ins; 841 unsigned CRBitSpillDistance = 0; 842 bool SeenUse = false; 843 for (; Ins != Rend; ++Ins) { 844 // Definition found. 845 if (Ins->modifiesRegister(SrcReg, TRI)) 846 break; 847 // Use found. 848 if (Ins->readsRegister(SrcReg, TRI)) 849 SeenUse = true; 850 // Unable to find CR bit definition within maximum search distance. 851 if (CRBitSpillDistance == MaxCRBitSpillDist) { 852 Ins = MI; 853 break; 854 } 855 // Skip debug instructions when counting CR bit spill distance. 856 if (!Ins->isDebugInstr()) 857 CRBitSpillDistance++; 858 } 859 860 // Unable to find the definition of the CR bit in the MBB. 861 if (Ins == MBB.rend()) 862 Ins = MI; 863 864 bool SpillsKnownBit = false; 865 // There is no need to extract the CR bit if its value is already known. 866 switch (Ins->getOpcode()) { 867 case PPC::CRUNSET: 868 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LI8 : PPC::LI), Reg) 869 .addImm(0); 870 SpillsKnownBit = true; 871 break; 872 case PPC::CRSET: 873 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LIS8 : PPC::LIS), Reg) 874 .addImm(-32768); 875 SpillsKnownBit = true; 876 break; 877 default: 878 // On Power10, we can use SETNBC to spill all CR bits. SETNBC will set all 879 // bits (specifically, it produces a -1 if the CR bit is set). Ultimately, 880 // the bit that is of importance to us is bit 32 (bit 0 of a 32-bit 881 // register), and SETNBC will set this. 882 if (Subtarget.isISA3_1()) { 883 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETNBC8 : PPC::SETNBC), Reg) 884 .addReg(SrcReg, RegState::Undef); 885 break; 886 } 887 888 // On Power9, we can use SETB to extract the LT bit. This only works for 889 // the LT bit since SETB produces -1/1/0 for LT/GT/<neither>. So the value 890 // of the bit we care about (32-bit sign bit) will be set to the value of 891 // the LT bit (regardless of the other bits in the CR field). 892 if (Subtarget.isISA3_0()) { 893 if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR1LT || 894 SrcReg == PPC::CR2LT || SrcReg == PPC::CR3LT || 895 SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT || 896 SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) { 897 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETB8 : PPC::SETB), Reg) 898 .addReg(getCRFromCRBit(SrcReg), RegState::Undef); 899 break; 900 } 901 } 902 903 // We need to move the CR field that contains the CR bit we are spilling. 904 // The super register may not be explicitly defined (i.e. it can be defined 905 // by a CR-logical that only defines the subreg) so we state that the CR 906 // field is undef. Also, in order to preserve the kill flag on the CR bit, 907 // we add it as an implicit use. 908 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg) 909 .addReg(getCRFromCRBit(SrcReg), RegState::Undef) 910 .addReg(SrcReg, 911 RegState::Implicit | getKillRegState(MI.getOperand(0).isKill())); 912 913 // If the saved register wasn't CR0LT, shift the bits left so that the bit 914 // to store is the first one. Mask all but that bit. 915 Register Reg1 = Reg; 916 Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 917 918 // rlwinm rA, rA, ShiftBits, 0, 0. 919 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg) 920 .addReg(Reg1, RegState::Kill) 921 .addImm(getEncodingValue(SrcReg)) 922 .addImm(0).addImm(0); 923 } 924 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW)) 925 .addReg(Reg, RegState::Kill), 926 FrameIndex); 927 928 bool KillsCRBit = MI.killsRegister(SrcReg, TRI); 929 // Discard the pseudo instruction. 930 MBB.erase(II); 931 if (SpillsKnownBit && KillsCRBit && !SeenUse) { 932 Ins->setDesc(TII.get(PPC::UNENCODED_NOP)); 933 Ins->RemoveOperand(0); 934 } 935 } 936 937 void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II, 938 unsigned FrameIndex) const { 939 // Get the instruction. 940 MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CRBIT <offset> 941 // Get the instruction's basic block. 942 MachineBasicBlock &MBB = *MI.getParent(); 943 MachineFunction &MF = *MBB.getParent(); 944 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 945 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 946 DebugLoc dl = MI.getDebugLoc(); 947 948 bool LP64 = TM.isPPC64(); 949 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 950 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 951 952 Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 953 Register DestReg = MI.getOperand(0).getReg(); 954 assert(MI.definesRegister(DestReg) && 955 "RESTORE_CRBIT does not define its destination"); 956 957 addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ), 958 Reg), FrameIndex); 959 960 BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF), DestReg); 961 962 Register RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); 963 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO) 964 .addReg(getCRFromCRBit(DestReg)); 965 966 unsigned ShiftBits = getEncodingValue(DestReg); 967 // rlwimi r11, r10, 32-ShiftBits, ..., ... 968 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO) 969 .addReg(RegO, RegState::Kill) 970 .addReg(Reg, RegState::Kill) 971 .addImm(ShiftBits ? 32 - ShiftBits : 0) 972 .addImm(ShiftBits) 973 .addImm(ShiftBits); 974 975 BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), 976 getCRFromCRBit(DestReg)) 977 .addReg(RegO, RegState::Kill) 978 // Make sure we have a use dependency all the way through this 979 // sequence of instructions. We can't have the other bits in the CR 980 // modified in between the mfocrf and the mtocrf. 981 .addReg(getCRFromCRBit(DestReg), RegState::Implicit); 982 983 // Discard the pseudo instruction. 984 MBB.erase(II); 985 } 986 987 void PPCRegisterInfo::emitAccCopyInfo(MachineBasicBlock &MBB, 988 MCRegister DestReg, MCRegister SrcReg) { 989 #ifdef NDEBUG 990 return; 991 #else 992 if (ReportAccMoves) { 993 std::string Dest = PPC::ACCRCRegClass.contains(DestReg) ? "acc" : "uacc"; 994 std::string Src = PPC::ACCRCRegClass.contains(SrcReg) ? "acc" : "uacc"; 995 dbgs() << "Emitting copy from " << Src << " to " << Dest << ":\n"; 996 MBB.dump(); 997 } 998 #endif 999 } 1000 1001 static void emitAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsPrimed, 1002 bool IsRestore) { 1003 #ifdef NDEBUG 1004 return; 1005 #else 1006 if (ReportAccMoves) { 1007 dbgs() << "Emitting " << (IsPrimed ? "acc" : "uacc") << " register " 1008 << (IsRestore ? "restore" : "spill") << ":\n"; 1009 MBB.dump(); 1010 } 1011 #endif 1012 } 1013 1014 /// lowerACCSpilling - Generate the code for spilling the accumulator register. 1015 /// Similarly to other spills/reloads that use pseudo-ops, we do not actually 1016 /// eliminate the FrameIndex here nor compute the stack offset. We simply 1017 /// create a real instruction with an FI and rely on eliminateFrameIndex to 1018 /// handle the FI elimination. 1019 void PPCRegisterInfo::lowerACCSpilling(MachineBasicBlock::iterator II, 1020 unsigned FrameIndex) const { 1021 MachineInstr &MI = *II; // SPILL_ACC <SrcReg>, <offset> 1022 MachineBasicBlock &MBB = *MI.getParent(); 1023 MachineFunction &MF = *MBB.getParent(); 1024 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1025 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1026 DebugLoc DL = MI.getDebugLoc(); 1027 Register SrcReg = MI.getOperand(0).getReg(); 1028 bool IsKilled = MI.getOperand(0).isKill(); 1029 1030 bool IsPrimed = PPC::ACCRCRegClass.contains(SrcReg); 1031 Register Reg = 1032 PPC::VSRp0 + (SrcReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2; 1033 bool IsLittleEndian = Subtarget.isLittleEndian(); 1034 1035 emitAccSpillRestoreInfo(MBB, IsPrimed, false); 1036 1037 // De-prime the register being spilled, create two stores for the pair 1038 // subregisters accounting for endianness and then re-prime the register if 1039 // it isn't killed. This uses the Offset parameter to addFrameReference() to 1040 // adjust the offset of the store that is within the 64-byte stack slot. 1041 if (IsPrimed) 1042 BuildMI(MBB, II, DL, TII.get(PPC::XXMFACC), SrcReg).addReg(SrcReg); 1043 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP)) 1044 .addReg(Reg, getKillRegState(IsKilled)), 1045 FrameIndex, IsLittleEndian ? 32 : 0); 1046 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP)) 1047 .addReg(Reg + 1, getKillRegState(IsKilled)), 1048 FrameIndex, IsLittleEndian ? 0 : 32); 1049 if (IsPrimed && !IsKilled) 1050 BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), SrcReg).addReg(SrcReg); 1051 1052 // Discard the pseudo instruction. 1053 MBB.erase(II); 1054 } 1055 1056 /// lowerACCRestore - Generate the code to restore the accumulator register. 1057 void PPCRegisterInfo::lowerACCRestore(MachineBasicBlock::iterator II, 1058 unsigned FrameIndex) const { 1059 MachineInstr &MI = *II; // <DestReg> = RESTORE_ACC <offset> 1060 MachineBasicBlock &MBB = *MI.getParent(); 1061 MachineFunction &MF = *MBB.getParent(); 1062 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1063 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1064 DebugLoc DL = MI.getDebugLoc(); 1065 1066 Register DestReg = MI.getOperand(0).getReg(); 1067 assert(MI.definesRegister(DestReg) && 1068 "RESTORE_ACC does not define its destination"); 1069 1070 bool IsPrimed = PPC::ACCRCRegClass.contains(DestReg); 1071 Register Reg = 1072 PPC::VSRp0 + (DestReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2; 1073 bool IsLittleEndian = Subtarget.isLittleEndian(); 1074 1075 emitAccSpillRestoreInfo(MBB, IsPrimed, true); 1076 1077 // Create two loads for the pair subregisters accounting for endianness and 1078 // then prime the accumulator register being restored. 1079 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg), 1080 FrameIndex, IsLittleEndian ? 32 : 0); 1081 addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg + 1), 1082 FrameIndex, IsLittleEndian ? 0 : 32); 1083 if (IsPrimed) 1084 BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), DestReg).addReg(DestReg); 1085 1086 // Discard the pseudo instruction. 1087 MBB.erase(II); 1088 } 1089 1090 bool PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 1091 Register Reg, int &FrameIdx) const { 1092 // For the nonvolatile condition registers (CR2, CR3, CR4) return true to 1093 // prevent allocating an additional frame slot. 1094 // For 64-bit ELF and AIX, the CR save area is in the linkage area at SP+8, 1095 // for 32-bit AIX the CR save area is in the linkage area at SP+4. 1096 // We have created a FrameIndex to that spill slot to keep the CalleSaveInfos 1097 // valid. 1098 // For 32-bit ELF, we have previously created the stack slot if needed, so 1099 // return its FrameIdx. 1100 if (PPC::CR2 <= Reg && Reg <= PPC::CR4) { 1101 FrameIdx = MF.getInfo<PPCFunctionInfo>()->getCRSpillFrameIndex(); 1102 return true; 1103 } 1104 return false; 1105 } 1106 1107 // If the offset must be a multiple of some value, return what that value is. 1108 static unsigned offsetMinAlignForOpcode(unsigned OpC) { 1109 switch (OpC) { 1110 default: 1111 return 1; 1112 case PPC::LWA: 1113 case PPC::LWA_32: 1114 case PPC::LD: 1115 case PPC::LDU: 1116 case PPC::STD: 1117 case PPC::STDU: 1118 case PPC::DFLOADf32: 1119 case PPC::DFLOADf64: 1120 case PPC::DFSTOREf32: 1121 case PPC::DFSTOREf64: 1122 case PPC::LXSD: 1123 case PPC::LXSSP: 1124 case PPC::STXSD: 1125 case PPC::STXSSP: 1126 return 4; 1127 case PPC::EVLDD: 1128 case PPC::EVSTDD: 1129 return 8; 1130 case PPC::LXV: 1131 case PPC::STXV: 1132 return 16; 1133 } 1134 } 1135 1136 // If the offset must be a multiple of some value, return what that value is. 1137 static unsigned offsetMinAlign(const MachineInstr &MI) { 1138 unsigned OpC = MI.getOpcode(); 1139 return offsetMinAlignForOpcode(OpC); 1140 } 1141 1142 // Return the OffsetOperandNo given the FIOperandNum (and the instruction). 1143 static unsigned getOffsetONFromFION(const MachineInstr &MI, 1144 unsigned FIOperandNum) { 1145 // Take into account whether it's an add or mem instruction 1146 unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2; 1147 if (MI.isInlineAsm()) 1148 OffsetOperandNo = FIOperandNum - 1; 1149 else if (MI.getOpcode() == TargetOpcode::STACKMAP || 1150 MI.getOpcode() == TargetOpcode::PATCHPOINT) 1151 OffsetOperandNo = FIOperandNum + 1; 1152 1153 return OffsetOperandNo; 1154 } 1155 1156 void 1157 PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 1158 int SPAdj, unsigned FIOperandNum, 1159 RegScavenger *RS) const { 1160 assert(SPAdj == 0 && "Unexpected"); 1161 1162 // Get the instruction. 1163 MachineInstr &MI = *II; 1164 // Get the instruction's basic block. 1165 MachineBasicBlock &MBB = *MI.getParent(); 1166 // Get the basic block's function. 1167 MachineFunction &MF = *MBB.getParent(); 1168 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1169 // Get the instruction info. 1170 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1171 // Get the frame info. 1172 MachineFrameInfo &MFI = MF.getFrameInfo(); 1173 DebugLoc dl = MI.getDebugLoc(); 1174 1175 unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum); 1176 1177 // Get the frame index. 1178 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 1179 1180 // Get the frame pointer save index. Users of this index are primarily 1181 // DYNALLOC instructions. 1182 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1183 int FPSI = FI->getFramePointerSaveIndex(); 1184 // Get the instruction opcode. 1185 unsigned OpC = MI.getOpcode(); 1186 1187 if ((OpC == PPC::DYNAREAOFFSET || OpC == PPC::DYNAREAOFFSET8)) { 1188 lowerDynamicAreaOffset(II); 1189 return; 1190 } 1191 1192 // Special case for dynamic alloca. 1193 if (FPSI && FrameIndex == FPSI && 1194 (OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) { 1195 lowerDynamicAlloc(II); 1196 return; 1197 } 1198 1199 if (FPSI && FrameIndex == FPSI && 1200 (OpC == PPC::PREPARE_PROBED_ALLOCA_64 || 1201 OpC == PPC::PREPARE_PROBED_ALLOCA_32 || 1202 OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 || 1203 OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32)) { 1204 lowerPrepareProbedAlloca(II); 1205 return; 1206 } 1207 1208 // Special case for pseudo-ops SPILL_CR and RESTORE_CR, etc. 1209 if (OpC == PPC::SPILL_CR) { 1210 lowerCRSpilling(II, FrameIndex); 1211 return; 1212 } else if (OpC == PPC::RESTORE_CR) { 1213 lowerCRRestore(II, FrameIndex); 1214 return; 1215 } else if (OpC == PPC::SPILL_CRBIT) { 1216 lowerCRBitSpilling(II, FrameIndex); 1217 return; 1218 } else if (OpC == PPC::RESTORE_CRBIT) { 1219 lowerCRBitRestore(II, FrameIndex); 1220 return; 1221 } else if (OpC == PPC::SPILL_ACC || OpC == PPC::SPILL_UACC) { 1222 lowerACCSpilling(II, FrameIndex); 1223 return; 1224 } else if (OpC == PPC::RESTORE_ACC || OpC == PPC::RESTORE_UACC) { 1225 lowerACCRestore(II, FrameIndex); 1226 return; 1227 } 1228 1229 // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP). 1230 MI.getOperand(FIOperandNum).ChangeToRegister( 1231 FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), false); 1232 1233 // If the instruction is not present in ImmToIdxMap, then it has no immediate 1234 // form (and must be r+r). 1235 bool noImmForm = !MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP && 1236 OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(OpC); 1237 1238 // Now add the frame object offset to the offset from r1. 1239 int Offset = MFI.getObjectOffset(FrameIndex); 1240 Offset += MI.getOperand(OffsetOperandNo).getImm(); 1241 1242 // If we're not using a Frame Pointer that has been set to the value of the 1243 // SP before having the stack size subtracted from it, then add the stack size 1244 // to Offset to get the correct offset. 1245 // Naked functions have stack size 0, although getStackSize may not reflect 1246 // that because we didn't call all the pieces that compute it for naked 1247 // functions. 1248 if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) { 1249 if (!(hasBasePointer(MF) && FrameIndex < 0)) 1250 Offset += MFI.getStackSize(); 1251 } 1252 1253 // If we can, encode the offset directly into the instruction. If this is a 1254 // normal PPC "ri" instruction, any 16-bit value can be safely encoded. If 1255 // this is a PPC64 "ix" instruction, only a 16-bit value with the low two bits 1256 // clear can be encoded. This is extremely uncommon, because normally you 1257 // only "std" to a stack slot that is at least 4-byte aligned, but it can 1258 // happen in invalid code. 1259 assert(OpC != PPC::DBG_VALUE && 1260 "This should be handled in a target-independent way"); 1261 bool OffsetFitsMnemonic = (OpC == PPC::EVSTDD || OpC == PPC::EVLDD) ? 1262 isUInt<8>(Offset) : 1263 isInt<16>(Offset); 1264 if (!noImmForm && ((OffsetFitsMnemonic && 1265 ((Offset % offsetMinAlign(MI)) == 0)) || 1266 OpC == TargetOpcode::STACKMAP || 1267 OpC == TargetOpcode::PATCHPOINT)) { 1268 MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset); 1269 return; 1270 } 1271 1272 // The offset doesn't fit into a single register, scavenge one to build the 1273 // offset in. 1274 1275 bool is64Bit = TM.isPPC64(); 1276 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 1277 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 1278 const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC; 1279 Register SRegHi = MF.getRegInfo().createVirtualRegister(RC), 1280 SReg = MF.getRegInfo().createVirtualRegister(RC); 1281 1282 // Insert a set of rA with the full offset value before the ld, st, or add 1283 if (isInt<16>(Offset)) 1284 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), SReg) 1285 .addImm(Offset); 1286 else { 1287 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SRegHi) 1288 .addImm(Offset >> 16); 1289 BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::ORI8 : PPC::ORI), SReg) 1290 .addReg(SRegHi, RegState::Kill) 1291 .addImm(Offset); 1292 } 1293 1294 // Convert into indexed form of the instruction: 1295 // 1296 // sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0 1297 // addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0 1298 unsigned OperandBase; 1299 1300 if (noImmForm) 1301 OperandBase = 1; 1302 else if (OpC != TargetOpcode::INLINEASM && 1303 OpC != TargetOpcode::INLINEASM_BR) { 1304 assert(ImmToIdxMap.count(OpC) && 1305 "No indexed form of load or store available!"); 1306 unsigned NewOpcode = ImmToIdxMap.find(OpC)->second; 1307 MI.setDesc(TII.get(NewOpcode)); 1308 OperandBase = 1; 1309 } else { 1310 OperandBase = OffsetOperandNo; 1311 } 1312 1313 Register StackReg = MI.getOperand(FIOperandNum).getReg(); 1314 MI.getOperand(OperandBase).ChangeToRegister(StackReg, false); 1315 MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true); 1316 } 1317 1318 Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 1319 const PPCFrameLowering *TFI = getFrameLowering(MF); 1320 1321 if (!TM.isPPC64()) 1322 return TFI->hasFP(MF) ? PPC::R31 : PPC::R1; 1323 else 1324 return TFI->hasFP(MF) ? PPC::X31 : PPC::X1; 1325 } 1326 1327 Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const { 1328 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1329 if (!hasBasePointer(MF)) 1330 return getFrameRegister(MF); 1331 1332 if (TM.isPPC64()) 1333 return PPC::X30; 1334 1335 if (Subtarget.isSVR4ABI() && TM.isPositionIndependent()) 1336 return PPC::R29; 1337 1338 return PPC::R30; 1339 } 1340 1341 bool PPCRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 1342 if (!EnableBasePointer) 1343 return false; 1344 if (AlwaysBasePointer) 1345 return true; 1346 1347 // If we need to realign the stack, then the stack pointer can no longer 1348 // serve as an offset into the caller's stack space. As a result, we need a 1349 // base pointer. 1350 return hasStackRealignment(MF); 1351 } 1352 1353 /// Returns true if the instruction's frame index 1354 /// reference would be better served by a base register other than FP 1355 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 1356 /// references it should create new base registers for. 1357 bool PPCRegisterInfo:: 1358 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 1359 assert(Offset < 0 && "Local offset must be negative"); 1360 1361 // It's the load/store FI references that cause issues, as it can be difficult 1362 // to materialize the offset if it won't fit in the literal field. Estimate 1363 // based on the size of the local frame and some conservative assumptions 1364 // about the rest of the stack frame (note, this is pre-regalloc, so 1365 // we don't know everything for certain yet) whether this offset is likely 1366 // to be out of range of the immediate. Return true if so. 1367 1368 // We only generate virtual base registers for loads and stores that have 1369 // an r+i form. Return false for everything else. 1370 unsigned OpC = MI->getOpcode(); 1371 if (!ImmToIdxMap.count(OpC)) 1372 return false; 1373 1374 // Don't generate a new virtual base register just to add zero to it. 1375 if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) && 1376 MI->getOperand(2).getImm() == 0) 1377 return false; 1378 1379 MachineBasicBlock &MBB = *MI->getParent(); 1380 MachineFunction &MF = *MBB.getParent(); 1381 const PPCFrameLowering *TFI = getFrameLowering(MF); 1382 unsigned StackEst = TFI->determineFrameLayout(MF, true); 1383 1384 // If we likely don't need a stack frame, then we probably don't need a 1385 // virtual base register either. 1386 if (!StackEst) 1387 return false; 1388 1389 // Estimate an offset from the stack pointer. 1390 // The incoming offset is relating to the SP at the start of the function, 1391 // but when we access the local it'll be relative to the SP after local 1392 // allocation, so adjust our SP-relative offset by that allocation size. 1393 Offset += StackEst; 1394 1395 // The frame pointer will point to the end of the stack, so estimate the 1396 // offset as the difference between the object offset and the FP location. 1397 return !isFrameOffsetLegal(MI, getBaseRegister(MF), Offset); 1398 } 1399 1400 /// Insert defining instruction(s) for BaseReg to 1401 /// be a pointer to FrameIdx at the beginning of the basic block. 1402 Register PPCRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, 1403 int FrameIdx, 1404 int64_t Offset) const { 1405 unsigned ADDriOpc = TM.isPPC64() ? PPC::ADDI8 : PPC::ADDI; 1406 1407 MachineBasicBlock::iterator Ins = MBB->begin(); 1408 DebugLoc DL; // Defaults to "unknown" 1409 if (Ins != MBB->end()) 1410 DL = Ins->getDebugLoc(); 1411 1412 const MachineFunction &MF = *MBB->getParent(); 1413 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1414 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1415 const MCInstrDesc &MCID = TII.get(ADDriOpc); 1416 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1417 const TargetRegisterClass *RC = getPointerRegClass(MF); 1418 Register BaseReg = MRI.createVirtualRegister(RC); 1419 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 1420 1421 BuildMI(*MBB, Ins, DL, MCID, BaseReg) 1422 .addFrameIndex(FrameIdx).addImm(Offset); 1423 1424 return BaseReg; 1425 } 1426 1427 void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, 1428 int64_t Offset) const { 1429 unsigned FIOperandNum = 0; 1430 while (!MI.getOperand(FIOperandNum).isFI()) { 1431 ++FIOperandNum; 1432 assert(FIOperandNum < MI.getNumOperands() && 1433 "Instr doesn't have FrameIndex operand!"); 1434 } 1435 1436 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false); 1437 unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum); 1438 Offset += MI.getOperand(OffsetOperandNo).getImm(); 1439 MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset); 1440 1441 MachineBasicBlock &MBB = *MI.getParent(); 1442 MachineFunction &MF = *MBB.getParent(); 1443 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 1444 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1445 const MCInstrDesc &MCID = MI.getDesc(); 1446 MachineRegisterInfo &MRI = MF.getRegInfo(); 1447 MRI.constrainRegClass(BaseReg, 1448 TII.getRegClass(MCID, FIOperandNum, this, MF)); 1449 } 1450 1451 bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 1452 Register BaseReg, 1453 int64_t Offset) const { 1454 unsigned FIOperandNum = 0; 1455 while (!MI->getOperand(FIOperandNum).isFI()) { 1456 ++FIOperandNum; 1457 assert(FIOperandNum < MI->getNumOperands() && 1458 "Instr doesn't have FrameIndex operand!"); 1459 } 1460 1461 unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum); 1462 Offset += MI->getOperand(OffsetOperandNo).getImm(); 1463 1464 return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm 1465 MI->getOpcode() == TargetOpcode::STACKMAP || 1466 MI->getOpcode() == TargetOpcode::PATCHPOINT || 1467 (isInt<16>(Offset) && (Offset % offsetMinAlign(*MI)) == 0); 1468 } 1469