1 //===-- FixupStatepointCallerSaved.cpp - Fixup caller saved registers ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// Statepoint instruction in deopt parameters contains values which are 11 /// meaningful to the runtime and should be able to be read at the moment the 12 /// call returns. So we can say that we need to encode the fact that these 13 /// values are "late read" by runtime. If we could express this notion for 14 /// register allocator it would produce the right form for us. 15 /// The need to fixup (i.e this pass) is specifically handling the fact that 16 /// we cannot describe such a late read for the register allocator. 17 /// Register allocator may put the value on a register clobbered by the call. 18 /// This pass forces the spill of such registers and replaces corresponding 19 /// statepoint operands to added spill slots. 20 /// 21 //===----------------------------------------------------------------------===// 22 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunctionPass.h" 27 #include "llvm/CodeGen/StackMaps.h" 28 #include "llvm/CodeGen/TargetInstrInfo.h" 29 #include "llvm/IR/Statepoint.h" 30 #include "llvm/InitializePasses.h" 31 #include "llvm/Support/Debug.h" 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "fixup-statepoint-caller-saved" 36 STATISTIC(NumSpilledRegisters, "Number of spilled register"); 37 STATISTIC(NumSpillSlotsAllocated, "Number of spill slots allocated"); 38 STATISTIC(NumSpillSlotsExtended, "Number of spill slots extended"); 39 40 static cl::opt<bool> FixupSCSExtendSlotSize( 41 "fixup-scs-extend-slot-size", cl::Hidden, cl::init(false), 42 cl::desc("Allow spill in spill slot of greater size than register size"), 43 cl::Hidden); 44 45 static cl::opt<bool> PassGCPtrInCSR( 46 "fixup-allow-gcptr-in-csr", cl::Hidden, cl::init(false), 47 cl::desc("Allow passing GC Pointer arguments in callee saved registers")); 48 49 static cl::opt<bool> EnableCopyProp( 50 "fixup-scs-enable-copy-propagation", cl::Hidden, cl::init(true), 51 cl::desc("Enable simple copy propagation during register reloading")); 52 53 // This is purely debugging option. 54 // It may be handy for investigating statepoint spilling issues. 55 static cl::opt<unsigned> MaxStatepointsWithRegs( 56 "fixup-max-csr-statepoints", cl::Hidden, 57 cl::desc("Max number of statepoints allowed to pass GC Ptrs in registers")); 58 59 namespace { 60 61 class FixupStatepointCallerSaved : public MachineFunctionPass { 62 public: 63 static char ID; 64 65 FixupStatepointCallerSaved() : MachineFunctionPass(ID) { 66 initializeFixupStatepointCallerSavedPass(*PassRegistry::getPassRegistry()); 67 } 68 69 void getAnalysisUsage(AnalysisUsage &AU) const override { 70 AU.setPreservesCFG(); 71 MachineFunctionPass::getAnalysisUsage(AU); 72 } 73 74 StringRef getPassName() const override { 75 return "Fixup Statepoint Caller Saved"; 76 } 77 78 bool runOnMachineFunction(MachineFunction &MF) override; 79 }; 80 81 } // End anonymous namespace. 82 83 char FixupStatepointCallerSaved::ID = 0; 84 char &llvm::FixupStatepointCallerSavedID = FixupStatepointCallerSaved::ID; 85 86 INITIALIZE_PASS_BEGIN(FixupStatepointCallerSaved, DEBUG_TYPE, 87 "Fixup Statepoint Caller Saved", false, false) 88 INITIALIZE_PASS_END(FixupStatepointCallerSaved, DEBUG_TYPE, 89 "Fixup Statepoint Caller Saved", false, false) 90 91 // Utility function to get size of the register. 92 static unsigned getRegisterSize(const TargetRegisterInfo &TRI, Register Reg) { 93 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg); 94 return TRI.getSpillSize(*RC); 95 } 96 97 // Try to eliminate redundant copy to register which we're going to 98 // spill, i.e. try to change: 99 // X = COPY Y 100 // SPILL X 101 // to 102 // SPILL Y 103 // If there are no uses of X between copy and STATEPOINT, that COPY 104 // may be eliminated. 105 // Reg - register we're about to spill 106 // RI - On entry points to statepoint. 107 // On successful copy propagation set to new spill point. 108 // IsKill - set to true if COPY is Kill (there are no uses of Y) 109 // Returns either found source copy register or original one. 110 static Register performCopyPropagation(Register Reg, 111 MachineBasicBlock::iterator &RI, 112 bool &IsKill, const TargetInstrInfo &TII, 113 const TargetRegisterInfo &TRI) { 114 // First check if statepoint itself uses Reg in non-meta operands. 115 int Idx = RI->findRegisterUseOperandIdx(Reg, false, &TRI); 116 if (Idx >= 0 && (unsigned)Idx < StatepointOpers(&*RI).getNumDeoptArgsIdx()) { 117 IsKill = false; 118 return Reg; 119 } 120 121 if (!EnableCopyProp) 122 return Reg; 123 124 MachineBasicBlock *MBB = RI->getParent(); 125 MachineBasicBlock::reverse_iterator E = MBB->rend(); 126 MachineInstr *Def = nullptr, *Use = nullptr; 127 for (auto It = ++(RI.getReverse()); It != E; ++It) { 128 if (It->readsRegister(Reg, &TRI) && !Use) 129 Use = &*It; 130 if (It->modifiesRegister(Reg, &TRI)) { 131 Def = &*It; 132 break; 133 } 134 } 135 136 if (!Def) 137 return Reg; 138 139 auto DestSrc = TII.isCopyInstr(*Def); 140 if (!DestSrc || DestSrc->Destination->getReg() != Reg) 141 return Reg; 142 143 Register SrcReg = DestSrc->Source->getReg(); 144 145 if (getRegisterSize(TRI, Reg) != getRegisterSize(TRI, SrcReg)) 146 return Reg; 147 148 LLVM_DEBUG(dbgs() << "spillRegisters: perform copy propagation " 149 << printReg(Reg, &TRI) << " -> " << printReg(SrcReg, &TRI) 150 << "\n"); 151 152 // Insert spill immediately after Def 153 RI = ++MachineBasicBlock::iterator(Def); 154 IsKill = DestSrc->Source->isKill(); 155 156 // There are no uses of original register between COPY and STATEPOINT. 157 // There can't be any after STATEPOINT, so we can eliminate Def. 158 if (!Use) { 159 LLVM_DEBUG(dbgs() << "spillRegisters: removing dead copy " << *Def); 160 Def->eraseFromParent(); 161 } 162 return SrcReg; 163 } 164 165 namespace { 166 // Pair {Register, FrameIndex} 167 using RegSlotPair = std::pair<Register, int>; 168 169 // Keeps track of what reloads were inserted in MBB. 170 class RegReloadCache { 171 using ReloadSet = SmallSet<RegSlotPair, 8>; 172 DenseMap<const MachineBasicBlock *, ReloadSet> Reloads; 173 174 public: 175 RegReloadCache() = default; 176 177 // Record reload of Reg from FI in block MBB 178 void recordReload(Register Reg, int FI, const MachineBasicBlock *MBB) { 179 RegSlotPair RSP(Reg, FI); 180 auto Res = Reloads[MBB].insert(RSP); 181 (void)Res; 182 assert(Res.second && "reload already exists"); 183 } 184 185 // Does basic block MBB contains reload of Reg from FI? 186 bool hasReload(Register Reg, int FI, const MachineBasicBlock *MBB) { 187 RegSlotPair RSP(Reg, FI); 188 return Reloads.count(MBB) && Reloads[MBB].count(RSP); 189 } 190 }; 191 192 // Cache used frame indexes during statepoint re-write to re-use them in 193 // processing next statepoint instruction. 194 // Two strategies. One is to preserve the size of spill slot while another one 195 // extends the size of spill slots to reduce the number of them, causing 196 // the less total frame size. But unspill will have "implicit" any extend. 197 class FrameIndexesCache { 198 private: 199 struct FrameIndexesPerSize { 200 // List of used frame indexes during processing previous statepoints. 201 SmallVector<int, 8> Slots; 202 // Current index of un-used yet frame index. 203 unsigned Index = 0; 204 }; 205 MachineFrameInfo &MFI; 206 const TargetRegisterInfo &TRI; 207 // Map size to list of frame indexes of this size. If the mode is 208 // FixupSCSExtendSlotSize then the key 0 is used to keep all frame indexes. 209 // If the size of required spill slot is greater than in a cache then the 210 // size will be increased. 211 DenseMap<unsigned, FrameIndexesPerSize> Cache; 212 213 // Keeps track of slots reserved for the shared landing pad processing. 214 // Initialized from GlobalIndices for the current EHPad. 215 SmallSet<int, 8> ReservedSlots; 216 217 // Landing pad can be destination of several statepoints. Every register 218 // defined by such statepoints must be spilled to the same stack slot. 219 // This map keeps that information. 220 DenseMap<const MachineBasicBlock *, SmallVector<RegSlotPair, 8>> 221 GlobalIndices; 222 223 FrameIndexesPerSize &getCacheBucket(unsigned Size) { 224 // In FixupSCSExtendSlotSize mode the bucket with 0 index is used 225 // for all sizes. 226 return Cache[FixupSCSExtendSlotSize ? 0 : Size]; 227 } 228 229 public: 230 FrameIndexesCache(MachineFrameInfo &MFI, const TargetRegisterInfo &TRI) 231 : MFI(MFI), TRI(TRI) {} 232 // Reset the current state of used frame indexes. After invocation of 233 // this function all frame indexes are available for allocation with 234 // the exception of slots reserved for landing pad processing (if any). 235 void reset(const MachineBasicBlock *EHPad) { 236 for (auto &It : Cache) 237 It.second.Index = 0; 238 239 ReservedSlots.clear(); 240 if (EHPad && GlobalIndices.count(EHPad)) 241 for (auto &RSP : GlobalIndices[EHPad]) 242 ReservedSlots.insert(RSP.second); 243 } 244 245 // Get frame index to spill the register. 246 int getFrameIndex(Register Reg, MachineBasicBlock *EHPad) { 247 // Check if slot for Reg is already reserved at EHPad. 248 auto It = GlobalIndices.find(EHPad); 249 if (It != GlobalIndices.end()) { 250 auto &Vec = It->second; 251 auto Idx = llvm::find_if( 252 Vec, [Reg](RegSlotPair &RSP) { return Reg == RSP.first; }); 253 if (Idx != Vec.end()) { 254 int FI = Idx->second; 255 LLVM_DEBUG(dbgs() << "Found global FI " << FI << " for register " 256 << printReg(Reg, &TRI) << " at " 257 << printMBBReference(*EHPad) << "\n"); 258 assert(ReservedSlots.count(FI) && "using unreserved slot"); 259 return FI; 260 } 261 } 262 263 unsigned Size = getRegisterSize(TRI, Reg); 264 FrameIndexesPerSize &Line = getCacheBucket(Size); 265 while (Line.Index < Line.Slots.size()) { 266 int FI = Line.Slots[Line.Index++]; 267 if (ReservedSlots.count(FI)) 268 continue; 269 // If all sizes are kept together we probably need to extend the 270 // spill slot size. 271 if (MFI.getObjectSize(FI) < Size) { 272 MFI.setObjectSize(FI, Size); 273 MFI.setObjectAlignment(FI, Align(Size)); 274 NumSpillSlotsExtended++; 275 } 276 return FI; 277 } 278 int FI = MFI.CreateSpillStackObject(Size, Align(Size)); 279 NumSpillSlotsAllocated++; 280 Line.Slots.push_back(FI); 281 ++Line.Index; 282 283 // Remember assignment {Reg, FI} for EHPad 284 if (EHPad) { 285 GlobalIndices[EHPad].push_back(std::make_pair(Reg, FI)); 286 LLVM_DEBUG(dbgs() << "Reserved FI " << FI << " for spilling reg " 287 << printReg(Reg, &TRI) << " at landing pad " 288 << printMBBReference(*EHPad) << "\n"); 289 } 290 291 return FI; 292 } 293 294 // Sort all registers to spill in descendent order. In the 295 // FixupSCSExtendSlotSize mode it will minimize the total frame size. 296 // In non FixupSCSExtendSlotSize mode we can skip this step. 297 void sortRegisters(SmallVectorImpl<Register> &Regs) { 298 if (!FixupSCSExtendSlotSize) 299 return; 300 llvm::sort(Regs, [&](Register &A, Register &B) { 301 return getRegisterSize(TRI, A) > getRegisterSize(TRI, B); 302 }); 303 } 304 }; 305 306 // Describes the state of the current processing statepoint instruction. 307 class StatepointState { 308 private: 309 // statepoint instruction. 310 MachineInstr &MI; 311 MachineFunction &MF; 312 // If non-null then statepoint is invoke, and this points to the landing pad. 313 MachineBasicBlock *EHPad; 314 const TargetRegisterInfo &TRI; 315 const TargetInstrInfo &TII; 316 MachineFrameInfo &MFI; 317 // Mask with callee saved registers. 318 const uint32_t *Mask; 319 // Cache of frame indexes used on previous instruction processing. 320 FrameIndexesCache &CacheFI; 321 bool AllowGCPtrInCSR; 322 // Operands with physical registers requiring spilling. 323 SmallVector<unsigned, 8> OpsToSpill; 324 // Set of register to spill. 325 SmallVector<Register, 8> RegsToSpill; 326 // Set of registers to reload after statepoint. 327 SmallVector<Register, 8> RegsToReload; 328 // Map Register to Frame Slot index. 329 DenseMap<Register, int> RegToSlotIdx; 330 331 public: 332 StatepointState(MachineInstr &MI, const uint32_t *Mask, 333 FrameIndexesCache &CacheFI, bool AllowGCPtrInCSR) 334 : MI(MI), MF(*MI.getMF()), TRI(*MF.getSubtarget().getRegisterInfo()), 335 TII(*MF.getSubtarget().getInstrInfo()), MFI(MF.getFrameInfo()), 336 Mask(Mask), CacheFI(CacheFI), AllowGCPtrInCSR(AllowGCPtrInCSR) { 337 338 // Find statepoint's landing pad, if any. 339 EHPad = nullptr; 340 MachineBasicBlock *MBB = MI.getParent(); 341 // Invoke statepoint must be last one in block. 342 bool Last = std::none_of(++MI.getIterator(), MBB->end().getInstrIterator(), 343 [](MachineInstr &I) { 344 return I.getOpcode() == TargetOpcode::STATEPOINT; 345 }); 346 347 if (!Last) 348 return; 349 350 auto IsEHPad = [](MachineBasicBlock *B) { return B->isEHPad(); }; 351 352 assert(llvm::count_if(MBB->successors(), IsEHPad) < 2 && "multiple EHPads"); 353 354 auto It = llvm::find_if(MBB->successors(), IsEHPad); 355 if (It != MBB->succ_end()) 356 EHPad = *It; 357 } 358 359 MachineBasicBlock *getEHPad() const { return EHPad; } 360 361 // Return true if register is callee saved. 362 bool isCalleeSaved(Register Reg) { return (Mask[Reg / 32] >> Reg % 32) & 1; } 363 364 // Iterates over statepoint meta args to find caller saver registers. 365 // Also cache the size of found registers. 366 // Returns true if caller save registers found. 367 bool findRegistersToSpill() { 368 SmallSet<Register, 8> GCRegs; 369 // All GC pointer operands assigned to registers produce new value. 370 // Since they're tied to their defs, it is enough to collect def registers. 371 for (const auto &Def : MI.defs()) 372 GCRegs.insert(Def.getReg()); 373 374 SmallSet<Register, 8> VisitedRegs; 375 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(), 376 EndIdx = MI.getNumOperands(); 377 Idx < EndIdx; ++Idx) { 378 MachineOperand &MO = MI.getOperand(Idx); 379 // Leave `undef` operands as is, StackMaps will rewrite them 380 // into a constant. 381 if (!MO.isReg() || MO.isImplicit() || MO.isUndef()) 382 continue; 383 Register Reg = MO.getReg(); 384 assert(Reg.isPhysical() && "Only physical regs are expected"); 385 386 if (isCalleeSaved(Reg) && (AllowGCPtrInCSR || !is_contained(GCRegs, Reg))) 387 continue; 388 389 LLVM_DEBUG(dbgs() << "Will spill " << printReg(Reg, &TRI) << " at index " 390 << Idx << "\n"); 391 392 if (VisitedRegs.insert(Reg).second) 393 RegsToSpill.push_back(Reg); 394 OpsToSpill.push_back(Idx); 395 } 396 CacheFI.sortRegisters(RegsToSpill); 397 return !RegsToSpill.empty(); 398 } 399 400 // Spill all caller saved registers right before statepoint instruction. 401 // Remember frame index where register is spilled. 402 void spillRegisters() { 403 for (Register Reg : RegsToSpill) { 404 int FI = CacheFI.getFrameIndex(Reg, EHPad); 405 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg); 406 407 NumSpilledRegisters++; 408 RegToSlotIdx[Reg] = FI; 409 410 LLVM_DEBUG(dbgs() << "Spilling " << printReg(Reg, &TRI) << " to FI " << FI 411 << "\n"); 412 413 // Perform trivial copy propagation 414 bool IsKill = true; 415 MachineBasicBlock::iterator InsertBefore(MI); 416 Reg = performCopyPropagation(Reg, InsertBefore, IsKill, TII, TRI); 417 418 LLVM_DEBUG(dbgs() << "Insert spill before " << *InsertBefore); 419 TII.storeRegToStackSlot(*MI.getParent(), InsertBefore, Reg, IsKill, FI, 420 RC, &TRI); 421 } 422 } 423 424 void insertReloadBefore(unsigned Reg, MachineBasicBlock::iterator It, 425 MachineBasicBlock *MBB) { 426 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg); 427 int FI = RegToSlotIdx[Reg]; 428 if (It != MBB->end()) { 429 TII.loadRegFromStackSlot(*MBB, It, Reg, FI, RC, &TRI); 430 return; 431 } 432 433 // To insert reload at the end of MBB, insert it before last instruction 434 // and then swap them. 435 assert(!MBB->empty() && "Empty block"); 436 --It; 437 TII.loadRegFromStackSlot(*MBB, It, Reg, FI, RC, &TRI); 438 MachineInstr *Reload = It->getPrevNode(); 439 int Dummy = 0; 440 (void)Dummy; 441 assert(TII.isLoadFromStackSlot(*Reload, Dummy) == Reg); 442 assert(Dummy == FI); 443 MBB->remove(Reload); 444 MBB->insertAfter(It, Reload); 445 } 446 447 // Insert reloads of (relocated) registers spilled in statepoint. 448 void insertReloads(MachineInstr *NewStatepoint, RegReloadCache &RC) { 449 MachineBasicBlock *MBB = NewStatepoint->getParent(); 450 auto InsertPoint = std::next(NewStatepoint->getIterator()); 451 452 for (auto Reg : RegsToReload) { 453 insertReloadBefore(Reg, InsertPoint, MBB); 454 LLVM_DEBUG(dbgs() << "Reloading " << printReg(Reg, &TRI) << " from FI " 455 << RegToSlotIdx[Reg] << " after statepoint\n"); 456 457 if (EHPad && !RC.hasReload(Reg, RegToSlotIdx[Reg], EHPad)) { 458 RC.recordReload(Reg, RegToSlotIdx[Reg], EHPad); 459 auto EHPadInsertPoint = EHPad->SkipPHIsLabelsAndDebug(EHPad->begin()); 460 insertReloadBefore(Reg, EHPadInsertPoint, EHPad); 461 LLVM_DEBUG(dbgs() << "...also reload at EHPad " 462 << printMBBReference(*EHPad) << "\n"); 463 } 464 } 465 } 466 467 // Re-write statepoint machine instruction to replace caller saved operands 468 // with indirect memory location (frame index). 469 MachineInstr *rewriteStatepoint() { 470 MachineInstr *NewMI = 471 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); 472 MachineInstrBuilder MIB(MF, NewMI); 473 474 unsigned NumOps = MI.getNumOperands(); 475 476 // New indices for the remaining defs. 477 SmallVector<unsigned, 8> NewIndices; 478 unsigned NumDefs = MI.getNumDefs(); 479 for (unsigned I = 0; I < NumDefs; ++I) { 480 MachineOperand &DefMO = MI.getOperand(I); 481 assert(DefMO.isReg() && DefMO.isDef() && "Expected Reg Def operand"); 482 Register Reg = DefMO.getReg(); 483 assert(DefMO.isTied() && "Def is expected to be tied"); 484 // We skipped undef uses and did not spill them, so we should not 485 // proceed with defs here. 486 if (MI.getOperand(MI.findTiedOperandIdx(I)).isUndef()) { 487 if (AllowGCPtrInCSR) { 488 NewIndices.push_back(NewMI->getNumOperands()); 489 MIB.addReg(Reg, RegState::Define); 490 } 491 continue; 492 } 493 if (!AllowGCPtrInCSR) { 494 assert(is_contained(RegsToSpill, Reg)); 495 RegsToReload.push_back(Reg); 496 } else { 497 if (isCalleeSaved(Reg)) { 498 NewIndices.push_back(NewMI->getNumOperands()); 499 MIB.addReg(Reg, RegState::Define); 500 } else { 501 NewIndices.push_back(NumOps); 502 RegsToReload.push_back(Reg); 503 } 504 } 505 } 506 507 // Add End marker. 508 OpsToSpill.push_back(MI.getNumOperands()); 509 unsigned CurOpIdx = 0; 510 511 for (unsigned I = NumDefs; I < MI.getNumOperands(); ++I) { 512 MachineOperand &MO = MI.getOperand(I); 513 if (I == OpsToSpill[CurOpIdx]) { 514 int FI = RegToSlotIdx[MO.getReg()]; 515 MIB.addImm(StackMaps::IndirectMemRefOp); 516 MIB.addImm(getRegisterSize(TRI, MO.getReg())); 517 assert(MO.isReg() && "Should be register"); 518 assert(MO.getReg().isPhysical() && "Should be physical register"); 519 MIB.addFrameIndex(FI); 520 MIB.addImm(0); 521 ++CurOpIdx; 522 } else { 523 MIB.add(MO); 524 unsigned OldDef; 525 if (AllowGCPtrInCSR && MI.isRegTiedToDefOperand(I, &OldDef)) { 526 assert(OldDef < NumDefs); 527 assert(NewIndices[OldDef] < NumOps); 528 MIB->tieOperands(NewIndices[OldDef], MIB->getNumOperands() - 1); 529 } 530 } 531 } 532 assert(CurOpIdx == (OpsToSpill.size() - 1) && "Not all operands processed"); 533 // Add mem operands. 534 NewMI->setMemRefs(MF, MI.memoperands()); 535 for (auto It : RegToSlotIdx) { 536 Register R = It.first; 537 int FrameIndex = It.second; 538 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 539 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad; 540 if (is_contained(RegsToReload, R)) 541 Flags |= MachineMemOperand::MOStore; 542 auto *MMO = 543 MF.getMachineMemOperand(PtrInfo, Flags, getRegisterSize(TRI, R), 544 MFI.getObjectAlign(FrameIndex)); 545 NewMI->addMemOperand(MF, MMO); 546 } 547 548 // Insert new statepoint and erase old one. 549 MI.getParent()->insert(MI, NewMI); 550 551 LLVM_DEBUG(dbgs() << "rewritten statepoint to : " << *NewMI << "\n"); 552 MI.eraseFromParent(); 553 return NewMI; 554 } 555 }; 556 557 class StatepointProcessor { 558 private: 559 MachineFunction &MF; 560 const TargetRegisterInfo &TRI; 561 FrameIndexesCache CacheFI; 562 RegReloadCache ReloadCache; 563 564 public: 565 StatepointProcessor(MachineFunction &MF) 566 : MF(MF), TRI(*MF.getSubtarget().getRegisterInfo()), 567 CacheFI(MF.getFrameInfo(), TRI) {} 568 569 bool process(MachineInstr &MI, bool AllowGCPtrInCSR) { 570 StatepointOpers SO(&MI); 571 uint64_t Flags = SO.getFlags(); 572 // Do nothing for LiveIn, it supports all registers. 573 if (Flags & (uint64_t)StatepointFlags::DeoptLiveIn) 574 return false; 575 LLVM_DEBUG(dbgs() << "\nMBB " << MI.getParent()->getNumber() << " " 576 << MI.getParent()->getName() << " : process statepoint " 577 << MI); 578 CallingConv::ID CC = SO.getCallingConv(); 579 const uint32_t *Mask = TRI.getCallPreservedMask(MF, CC); 580 StatepointState SS(MI, Mask, CacheFI, AllowGCPtrInCSR); 581 CacheFI.reset(SS.getEHPad()); 582 583 if (!SS.findRegistersToSpill()) 584 return false; 585 586 SS.spillRegisters(); 587 auto *NewStatepoint = SS.rewriteStatepoint(); 588 SS.insertReloads(NewStatepoint, ReloadCache); 589 return true; 590 } 591 }; 592 } // namespace 593 594 bool FixupStatepointCallerSaved::runOnMachineFunction(MachineFunction &MF) { 595 if (skipFunction(MF.getFunction())) 596 return false; 597 598 const Function &F = MF.getFunction(); 599 if (!F.hasGC()) 600 return false; 601 602 SmallVector<MachineInstr *, 16> Statepoints; 603 for (MachineBasicBlock &BB : MF) 604 for (MachineInstr &I : BB) 605 if (I.getOpcode() == TargetOpcode::STATEPOINT) 606 Statepoints.push_back(&I); 607 608 if (Statepoints.empty()) 609 return false; 610 611 bool Changed = false; 612 StatepointProcessor SPP(MF); 613 unsigned NumStatepoints = 0; 614 bool AllowGCPtrInCSR = PassGCPtrInCSR; 615 for (MachineInstr *I : Statepoints) { 616 ++NumStatepoints; 617 if (MaxStatepointsWithRegs.getNumOccurrences() && 618 NumStatepoints >= MaxStatepointsWithRegs) 619 AllowGCPtrInCSR = false; 620 Changed |= SPP.process(*I, AllowGCPtrInCSR); 621 } 622 return Changed; 623 } 624