1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "Spiller.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/ADT/TinyPtrVector.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 21 #include "llvm/CodeGen/LiveRangeEdit.h" 22 #include "llvm/CodeGen/LiveStackAnalysis.h" 23 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 24 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h" 25 #include "llvm/CodeGen/MachineDominators.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineInstrBundle.h" 30 #include "llvm/CodeGen/MachineLoopInfo.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/VirtRegMap.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Target/TargetInstrInfo.h" 37 38 using namespace llvm; 39 40 #define DEBUG_TYPE "regalloc" 41 42 STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 43 STATISTIC(NumSnippets, "Number of spilled snippets"); 44 STATISTIC(NumSpills, "Number of spills inserted"); 45 STATISTIC(NumSpillsRemoved, "Number of spills removed"); 46 STATISTIC(NumReloads, "Number of reloads inserted"); 47 STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 48 STATISTIC(NumFolded, "Number of folded stack accesses"); 49 STATISTIC(NumFoldedLoads, "Number of folded loads"); 50 STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 51 STATISTIC(NumOmitReloadSpill, "Number of omitted spills of reloads"); 52 STATISTIC(NumHoists, "Number of hoisted spills"); 53 54 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 55 cl::desc("Disable inline spill hoisting")); 56 57 namespace { 58 class InlineSpiller : public Spiller { 59 MachineFunction &MF; 60 LiveIntervals &LIS; 61 LiveStacks &LSS; 62 AliasAnalysis *AA; 63 MachineDominatorTree &MDT; 64 MachineLoopInfo &Loops; 65 VirtRegMap &VRM; 66 MachineFrameInfo &MFI; 67 MachineRegisterInfo &MRI; 68 const TargetInstrInfo &TII; 69 const TargetRegisterInfo &TRI; 70 const MachineBlockFrequencyInfo &MBFI; 71 72 // Variables that are valid during spill(), but used by multiple methods. 73 LiveRangeEdit *Edit; 74 LiveInterval *StackInt; 75 int StackSlot; 76 unsigned Original; 77 78 // All registers to spill to StackSlot, including the main register. 79 SmallVector<unsigned, 8> RegsToSpill; 80 81 // All COPY instructions to/from snippets. 82 // They are ignored since both operands refer to the same stack slot. 83 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 84 85 // Values that failed to remat at some point. 86 SmallPtrSet<VNInfo*, 8> UsedValues; 87 88 public: 89 // Information about a value that was defined by a copy from a sibling 90 // register. 91 struct SibValueInfo { 92 // True when all reaching defs were reloads: No spill is necessary. 93 bool AllDefsAreReloads; 94 95 // True when value is defined by an original PHI not from splitting. 96 bool DefByOrigPHI; 97 98 // True when the COPY defining this value killed its source. 99 bool KillsSource; 100 101 // The preferred register to spill. 102 unsigned SpillReg; 103 104 // The value of SpillReg that should be spilled. 105 VNInfo *SpillVNI; 106 107 // The block where SpillVNI should be spilled. Currently, this must be the 108 // block containing SpillVNI->def. 109 MachineBasicBlock *SpillMBB; 110 111 // A defining instruction that is not a sibling copy or a reload, or NULL. 112 // This can be used as a template for rematerialization. 113 MachineInstr *DefMI; 114 115 // List of values that depend on this one. These values are actually the 116 // same, but live range splitting has placed them in different registers, 117 // or SSA update needed to insert PHI-defs to preserve SSA form. This is 118 // copies of the current value and phi-kills. Usually only phi-kills cause 119 // more than one dependent value. 120 TinyPtrVector<VNInfo*> Deps; 121 122 SibValueInfo(unsigned Reg, VNInfo *VNI) 123 : AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false), 124 SpillReg(Reg), SpillVNI(VNI), SpillMBB(nullptr), DefMI(nullptr) {} 125 126 // Returns true when a def has been found. 127 bool hasDef() const { return DefByOrigPHI || DefMI; } 128 }; 129 130 private: 131 // Values in RegsToSpill defined by sibling copies. 132 typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap; 133 SibValueMap SibValues; 134 135 // Dead defs generated during spilling. 136 SmallVector<MachineInstr*, 8> DeadDefs; 137 138 ~InlineSpiller() override {} 139 140 public: 141 InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm) 142 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()), 143 LSS(pass.getAnalysis<LiveStacks>()), 144 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()), 145 MDT(pass.getAnalysis<MachineDominatorTree>()), 146 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm), 147 MFI(*mf.getFrameInfo()), MRI(mf.getRegInfo()), 148 TII(*mf.getSubtarget().getInstrInfo()), 149 TRI(*mf.getSubtarget().getRegisterInfo()), 150 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()) {} 151 152 void spill(LiveRangeEdit &) override; 153 154 private: 155 bool isSnippet(const LiveInterval &SnipLI); 156 void collectRegsToSpill(); 157 158 bool isRegToSpill(unsigned Reg) { 159 return std::find(RegsToSpill.begin(), 160 RegsToSpill.end(), Reg) != RegsToSpill.end(); 161 } 162 163 bool isSibling(unsigned Reg); 164 MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*); 165 void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = nullptr); 166 void analyzeSiblingValues(); 167 168 bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI); 169 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 170 171 void markValueUsed(LiveInterval*, VNInfo*); 172 bool reMaterializeFor(LiveInterval&, MachineBasicBlock::iterator MI); 173 void reMaterializeAll(); 174 175 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg); 176 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >, 177 MachineInstr *LoadMI = nullptr); 178 void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI); 179 void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI); 180 181 void spillAroundUses(unsigned Reg); 182 void spillAll(); 183 }; 184 } 185 186 namespace llvm { 187 188 Spiller::~Spiller() { } 189 void Spiller::anchor() { } 190 191 Spiller *createInlineSpiller(MachineFunctionPass &pass, 192 MachineFunction &mf, 193 VirtRegMap &vrm) { 194 return new InlineSpiller(pass, mf, vrm); 195 } 196 197 } 198 199 //===----------------------------------------------------------------------===// 200 // Snippets 201 //===----------------------------------------------------------------------===// 202 203 // When spilling a virtual register, we also spill any snippets it is connected 204 // to. The snippets are small live ranges that only have a single real use, 205 // leftovers from live range splitting. Spilling them enables memory operand 206 // folding or tightens the live range around the single use. 207 // 208 // This minimizes register pressure and maximizes the store-to-load distance for 209 // spill slots which can be important in tight loops. 210 211 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 212 /// otherwise return 0. 213 static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) { 214 if (!MI->isFullCopy()) 215 return 0; 216 if (MI->getOperand(0).getReg() == Reg) 217 return MI->getOperand(1).getReg(); 218 if (MI->getOperand(1).getReg() == Reg) 219 return MI->getOperand(0).getReg(); 220 return 0; 221 } 222 223 /// isSnippet - Identify if a live interval is a snippet that should be spilled. 224 /// It is assumed that SnipLI is a virtual register with the same original as 225 /// Edit->getReg(). 226 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 227 unsigned Reg = Edit->getReg(); 228 229 // A snippet is a tiny live range with only a single instruction using it 230 // besides copies to/from Reg or spills/fills. We accept: 231 // 232 // %snip = COPY %Reg / FILL fi# 233 // %snip = USE %snip 234 // %Reg = COPY %snip / SPILL %snip, fi# 235 // 236 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) 237 return false; 238 239 MachineInstr *UseMI = nullptr; 240 241 // Check that all uses satisfy our criteria. 242 for (MachineRegisterInfo::reg_instr_nodbg_iterator 243 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg), 244 E = MRI.reg_instr_nodbg_end(); RI != E; ) { 245 MachineInstr *MI = &*(RI++); 246 247 // Allow copies to/from Reg. 248 if (isFullCopyOf(MI, Reg)) 249 continue; 250 251 // Allow stack slot loads. 252 int FI; 253 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 254 continue; 255 256 // Allow stack slot stores. 257 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 258 continue; 259 260 // Allow a single additional instruction. 261 if (UseMI && MI != UseMI) 262 return false; 263 UseMI = MI; 264 } 265 return true; 266 } 267 268 /// collectRegsToSpill - Collect live range snippets that only have a single 269 /// real use. 270 void InlineSpiller::collectRegsToSpill() { 271 unsigned Reg = Edit->getReg(); 272 273 // Main register always spills. 274 RegsToSpill.assign(1, Reg); 275 SnippetCopies.clear(); 276 277 // Snippets all have the same original, so there can't be any for an original 278 // register. 279 if (Original == Reg) 280 return; 281 282 for (MachineRegisterInfo::reg_instr_iterator 283 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) { 284 MachineInstr *MI = &*(RI++); 285 unsigned SnipReg = isFullCopyOf(MI, Reg); 286 if (!isSibling(SnipReg)) 287 continue; 288 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 289 if (!isSnippet(SnipLI)) 290 continue; 291 SnippetCopies.insert(MI); 292 if (isRegToSpill(SnipReg)) 293 continue; 294 RegsToSpill.push_back(SnipReg); 295 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 296 ++NumSnippets; 297 } 298 } 299 300 301 //===----------------------------------------------------------------------===// 302 // Sibling Values 303 //===----------------------------------------------------------------------===// 304 305 // After live range splitting, some values to be spilled may be defined by 306 // copies from sibling registers. We trace the sibling copies back to the 307 // original value if it still exists. We need it for rematerialization. 308 // 309 // Even when the value can't be rematerialized, we still want to determine if 310 // the value has already been spilled, or we may want to hoist the spill from a 311 // loop. 312 313 bool InlineSpiller::isSibling(unsigned Reg) { 314 return TargetRegisterInfo::isVirtualRegister(Reg) && 315 VRM.getOriginal(Reg) == Original; 316 } 317 318 #ifndef NDEBUG 319 static raw_ostream &operator<<(raw_ostream &OS, 320 const InlineSpiller::SibValueInfo &SVI) { 321 OS << "spill " << PrintReg(SVI.SpillReg) << ':' 322 << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def; 323 if (SVI.SpillMBB) 324 OS << " in BB#" << SVI.SpillMBB->getNumber(); 325 if (SVI.AllDefsAreReloads) 326 OS << " all-reloads"; 327 if (SVI.DefByOrigPHI) 328 OS << " orig-phi"; 329 if (SVI.KillsSource) 330 OS << " kill"; 331 OS << " deps["; 332 for (VNInfo *Dep : SVI.Deps) 333 OS << ' ' << Dep->id << '@' << Dep->def; 334 OS << " ]"; 335 if (SVI.DefMI) 336 OS << " def: " << *SVI.DefMI; 337 else 338 OS << '\n'; 339 return OS; 340 } 341 #endif 342 343 /// propagateSiblingValue - Propagate the value in SVI to dependents if it is 344 /// known. Otherwise remember the dependency for later. 345 /// 346 /// @param SVIIter SibValues entry to propagate. 347 /// @param VNI Dependent value, or NULL to propagate to all saved dependents. 348 void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter, 349 VNInfo *VNI) { 350 SibValueMap::value_type *SVI = &*SVIIter; 351 352 // When VNI is non-NULL, add it to SVI's deps, and only propagate to that. 353 TinyPtrVector<VNInfo*> FirstDeps; 354 if (VNI) { 355 FirstDeps.push_back(VNI); 356 SVI->second.Deps.push_back(VNI); 357 } 358 359 // Has the value been completely determined yet? If not, defer propagation. 360 if (!SVI->second.hasDef()) 361 return; 362 363 // Work list of values to propagate. 364 SmallSetVector<SibValueMap::value_type *, 8> WorkList; 365 WorkList.insert(SVI); 366 367 do { 368 SVI = WorkList.pop_back_val(); 369 TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps; 370 VNI = nullptr; 371 372 SibValueInfo &SV = SVI->second; 373 if (!SV.SpillMBB) 374 SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def); 375 376 DEBUG(dbgs() << " prop to " << Deps->size() << ": " 377 << SVI->first->id << '@' << SVI->first->def << ":\t" << SV); 378 379 assert(SV.hasDef() && "Propagating undefined value"); 380 381 // Should this value be propagated as a preferred spill candidate? We don't 382 // propagate values of registers that are about to spill. 383 bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg); 384 unsigned SpillDepth = ~0u; 385 386 for (VNInfo *Dep : *Deps) { 387 SibValueMap::iterator DepSVI = SibValues.find(Dep); 388 assert(DepSVI != SibValues.end() && "Dependent value not in SibValues"); 389 SibValueInfo &DepSV = DepSVI->second; 390 if (!DepSV.SpillMBB) 391 DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def); 392 393 bool Changed = false; 394 395 // Propagate defining instruction. 396 if (!DepSV.hasDef()) { 397 Changed = true; 398 DepSV.DefMI = SV.DefMI; 399 DepSV.DefByOrigPHI = SV.DefByOrigPHI; 400 } 401 402 // Propagate AllDefsAreReloads. For PHI values, this computes an AND of 403 // all predecessors. 404 if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) { 405 Changed = true; 406 DepSV.AllDefsAreReloads = false; 407 } 408 409 // Propagate best spill value. 410 if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) { 411 if (SV.SpillMBB == DepSV.SpillMBB) { 412 // DepSV is in the same block. Hoist when dominated. 413 if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) { 414 // This is an alternative def earlier in the same MBB. 415 // Hoist the spill as far as possible in SpillMBB. This can ease 416 // register pressure: 417 // 418 // x = def 419 // y = use x 420 // s = copy x 421 // 422 // Hoisting the spill of s to immediately after the def removes the 423 // interference between x and y: 424 // 425 // x = def 426 // spill x 427 // y = use x<kill> 428 // 429 // This hoist only helps when the DepSV copy kills its source. 430 Changed = true; 431 DepSV.SpillReg = SV.SpillReg; 432 DepSV.SpillVNI = SV.SpillVNI; 433 DepSV.SpillMBB = SV.SpillMBB; 434 } 435 } else { 436 // DepSV is in a different block. 437 if (SpillDepth == ~0u) 438 SpillDepth = Loops.getLoopDepth(SV.SpillMBB); 439 440 // Also hoist spills to blocks with smaller loop depth, but make sure 441 // that the new value dominates. Non-phi dependents are always 442 // dominated, phis need checking. 443 444 const BranchProbability MarginProb(4, 5); // 80% 445 // Hoist a spill to outer loop if there are multiple dependents (it 446 // can be beneficial if more than one dependents are hoisted) or 447 // if DepSV (the hoisting source) is hotter than SV (the hoisting 448 // destination) (we add a 80% margin to bias a little towards 449 // loop depth). 450 bool HoistCondition = 451 (MBFI.getBlockFreq(DepSV.SpillMBB) >= 452 (MBFI.getBlockFreq(SV.SpillMBB) * MarginProb)) || 453 Deps->size() > 1; 454 455 if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) && 456 HoistCondition && 457 (!DepSVI->first->isPHIDef() || 458 MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) { 459 Changed = true; 460 DepSV.SpillReg = SV.SpillReg; 461 DepSV.SpillVNI = SV.SpillVNI; 462 DepSV.SpillMBB = SV.SpillMBB; 463 } 464 } 465 } 466 467 if (!Changed) 468 continue; 469 470 // Something changed in DepSVI. Propagate to dependents. 471 WorkList.insert(&*DepSVI); 472 473 DEBUG(dbgs() << " update " << DepSVI->first->id << '@' 474 << DepSVI->first->def << " to:\t" << DepSV); 475 } 476 } while (!WorkList.empty()); 477 } 478 479 /// traceSiblingValue - Trace a value that is about to be spilled back to the 480 /// real defining instructions by looking through sibling copies. Always stay 481 /// within the range of OrigVNI so the registers are known to carry the same 482 /// value. 483 /// 484 /// Determine if the value is defined by all reloads, so spilling isn't 485 /// necessary - the value is already in the stack slot. 486 /// 487 /// Return a defining instruction that may be a candidate for rematerialization. 488 /// 489 MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, 490 VNInfo *OrigVNI) { 491 // Check if a cached value already exists. 492 SibValueMap::iterator SVI; 493 bool Inserted; 494 std::tie(SVI, Inserted) = 495 SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI))); 496 if (!Inserted) { 497 DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':' 498 << UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second); 499 return SVI->second.DefMI; 500 } 501 502 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':' 503 << UseVNI->id << '@' << UseVNI->def << '\n'); 504 505 // List of (Reg, VNI) that have been inserted into SibValues, but need to be 506 // processed. 507 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList; 508 WorkList.push_back(std::make_pair(UseReg, UseVNI)); 509 510 LiveInterval &OrigLI = LIS.getInterval(Original); 511 do { 512 unsigned Reg; 513 VNInfo *VNI; 514 std::tie(Reg, VNI) = WorkList.pop_back_val(); 515 DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def 516 << ":\t"); 517 518 // First check if this value has already been computed. 519 SVI = SibValues.find(VNI); 520 assert(SVI != SibValues.end() && "Missing SibValues entry"); 521 522 // Trace through PHI-defs created by live range splitting. 523 if (VNI->isPHIDef()) { 524 // Stop at original PHIs. We don't know the value at the 525 // predecessors. Look up the VNInfo for the current definition 526 // in OrigLI, to properly determine whether or not this phi was 527 // added by splitting. 528 if (VNI->def == OrigLI.getVNInfoAt(VNI->def)->def) { 529 DEBUG(dbgs() << "orig phi value\n"); 530 SVI->second.DefByOrigPHI = true; 531 SVI->second.AllDefsAreReloads = false; 532 propagateSiblingValue(SVI); 533 continue; 534 } 535 536 // This is a PHI inserted by live range splitting. We could trace the 537 // live-out value from predecessor blocks, but that search can be very 538 // expensive if there are many predecessors and many more PHIs as 539 // generated by tail-dup when it sees an indirectbr. Instead, look at 540 // all the non-PHI defs that have the same value as OrigVNI. They must 541 // jointly dominate VNI->def. This is not optimal since VNI may actually 542 // be jointly dominated by a smaller subset of defs, so there is a change 543 // we will miss a AllDefsAreReloads optimization. 544 545 // Separate all values dominated by OrigVNI into PHIs and non-PHIs. 546 SmallVector<VNInfo*, 8> PHIs, NonPHIs; 547 LiveInterval &LI = LIS.getInterval(Reg); 548 549 for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end(); 550 VI != VE; ++VI) { 551 VNInfo *VNI2 = *VI; 552 if (VNI2->isUnused()) 553 continue; 554 if (!OrigLI.containsOneValue() && 555 OrigLI.getVNInfoAt(VNI2->def) != OrigVNI) 556 continue; 557 if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def) 558 PHIs.push_back(VNI2); 559 else 560 NonPHIs.push_back(VNI2); 561 } 562 DEBUG(dbgs() << "split phi value, checking " << PHIs.size() 563 << " phi-defs, and " << NonPHIs.size() 564 << " non-phi/orig defs\n"); 565 566 // Create entries for all the PHIs. Don't add them to the worklist, we 567 // are processing all of them in one go here. 568 for (VNInfo *PHI : PHIs) 569 SibValues.insert(std::make_pair(PHI, SibValueInfo(Reg, PHI))); 570 571 // Add every PHI as a dependent of all the non-PHIs. 572 for (VNInfo *NonPHI : NonPHIs) { 573 // Known value? Try an insertion. 574 std::tie(SVI, Inserted) = 575 SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI))); 576 // Add all the PHIs as dependents of NonPHI. 577 SVI->second.Deps.insert(SVI->second.Deps.end(), PHIs.begin(), 578 PHIs.end()); 579 // This is the first time we see NonPHI, add it to the worklist. 580 if (Inserted) 581 WorkList.push_back(std::make_pair(Reg, NonPHI)); 582 else 583 // Propagate to all inserted PHIs, not just VNI. 584 propagateSiblingValue(SVI); 585 } 586 587 // Next work list item. 588 continue; 589 } 590 591 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 592 assert(MI && "Missing def"); 593 594 // Trace through sibling copies. 595 if (unsigned SrcReg = isFullCopyOf(MI, Reg)) { 596 if (isSibling(SrcReg)) { 597 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 598 LiveQueryResult SrcQ = SrcLI.Query(VNI->def); 599 assert(SrcQ.valueIn() && "Copy from non-existing value"); 600 // Check if this COPY kills its source. 601 SVI->second.KillsSource = SrcQ.isKill(); 602 VNInfo *SrcVNI = SrcQ.valueIn(); 603 DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':' 604 << SrcVNI->id << '@' << SrcVNI->def 605 << " kill=" << unsigned(SVI->second.KillsSource) << '\n'); 606 // Known sibling source value? Try an insertion. 607 std::tie(SVI, Inserted) = SibValues.insert( 608 std::make_pair(SrcVNI, SibValueInfo(SrcReg, SrcVNI))); 609 // This is the first time we see Src, add it to the worklist. 610 if (Inserted) 611 WorkList.push_back(std::make_pair(SrcReg, SrcVNI)); 612 propagateSiblingValue(SVI, VNI); 613 // Next work list item. 614 continue; 615 } 616 } 617 618 // Track reachable reloads. 619 SVI->second.DefMI = MI; 620 SVI->second.SpillMBB = MI->getParent(); 621 int FI; 622 if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) { 623 DEBUG(dbgs() << "reload\n"); 624 propagateSiblingValue(SVI); 625 // Next work list item. 626 continue; 627 } 628 629 // Potential remat candidate. 630 DEBUG(dbgs() << "def " << *MI); 631 SVI->second.AllDefsAreReloads = false; 632 propagateSiblingValue(SVI); 633 } while (!WorkList.empty()); 634 635 // Look up the value we were looking for. We already did this lookup at the 636 // top of the function, but SibValues may have been invalidated. 637 SVI = SibValues.find(UseVNI); 638 assert(SVI != SibValues.end() && "Didn't compute requested info"); 639 DEBUG(dbgs() << " traced to:\t" << SVI->second); 640 return SVI->second.DefMI; 641 } 642 643 /// analyzeSiblingValues - Trace values defined by sibling copies back to 644 /// something that isn't a sibling copy. 645 /// 646 /// Keep track of values that may be rematerializable. 647 void InlineSpiller::analyzeSiblingValues() { 648 SibValues.clear(); 649 650 // No siblings at all? 651 if (Edit->getReg() == Original) 652 return; 653 654 LiveInterval &OrigLI = LIS.getInterval(Original); 655 for (unsigned Reg : RegsToSpill) { 656 LiveInterval &LI = LIS.getInterval(Reg); 657 for (LiveInterval::const_vni_iterator VI = LI.vni_begin(), 658 VE = LI.vni_end(); VI != VE; ++VI) { 659 VNInfo *VNI = *VI; 660 if (VNI->isUnused()) 661 continue; 662 MachineInstr *DefMI = nullptr; 663 if (!VNI->isPHIDef()) { 664 DefMI = LIS.getInstructionFromIndex(VNI->def); 665 assert(DefMI && "No defining instruction"); 666 } 667 // Check possible sibling copies. 668 if (VNI->isPHIDef() || DefMI->isCopy()) { 669 VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def); 670 assert(OrigVNI && "Def outside original live range"); 671 if (OrigVNI->def != VNI->def) 672 DefMI = traceSiblingValue(Reg, VNI, OrigVNI); 673 } 674 if (DefMI && Edit->checkRematerializable(VNI, DefMI, AA)) { 675 DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@' 676 << VNI->def << " may remat from " << *DefMI); 677 } 678 } 679 } 680 } 681 682 /// hoistSpill - Given a sibling copy that defines a value to be spilled, insert 683 /// a spill at a better location. 684 bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) { 685 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 686 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot()); 687 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"); 688 SibValueMap::iterator I = SibValues.find(VNI); 689 if (I == SibValues.end()) 690 return false; 691 692 const SibValueInfo &SVI = I->second; 693 694 // Let the normal folding code deal with the boring case. 695 if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI) 696 return false; 697 698 // SpillReg may have been deleted by remat and DCE. 699 if (!LIS.hasInterval(SVI.SpillReg)) { 700 DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI.SpillReg) << '\n'); 701 SibValues.erase(I); 702 return false; 703 } 704 705 LiveInterval &SibLI = LIS.getInterval(SVI.SpillReg); 706 if (!SibLI.containsValue(SVI.SpillVNI)) { 707 DEBUG(dbgs() << "Stale value: " << PrintReg(SVI.SpillReg) << '\n'); 708 SibValues.erase(I); 709 return false; 710 } 711 712 // Conservatively extend the stack slot range to the range of the original 713 // value. We may be able to do better with stack slot coloring by being more 714 // careful here. 715 assert(StackInt && "No stack slot assigned yet."); 716 LiveInterval &OrigLI = LIS.getInterval(Original); 717 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 718 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 719 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 720 << *StackInt << '\n'); 721 722 // Already spilled everywhere. 723 if (SVI.AllDefsAreReloads) { 724 DEBUG(dbgs() << "\tno spill needed: " << SVI); 725 ++NumOmitReloadSpill; 726 return true; 727 } 728 // We are going to spill SVI.SpillVNI immediately after its def, so clear out 729 // any later spills of the same value. 730 eliminateRedundantSpills(SibLI, SVI.SpillVNI); 731 732 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def); 733 MachineBasicBlock::iterator MII; 734 if (SVI.SpillVNI->isPHIDef()) 735 MII = MBB->SkipPHIsAndLabels(MBB->begin()); 736 else { 737 MachineInstr *DefMI = LIS.getInstructionFromIndex(SVI.SpillVNI->def); 738 assert(DefMI && "Defining instruction disappeared"); 739 MII = DefMI; 740 ++MII; 741 } 742 // Insert spill without kill flag immediately after def. 743 TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot, 744 MRI.getRegClass(SVI.SpillReg), &TRI); 745 --MII; // Point to store instruction. 746 LIS.InsertMachineInstrInMaps(MII); 747 DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII); 748 749 ++NumSpills; 750 ++NumHoists; 751 return true; 752 } 753 754 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 755 /// redundant spills of this value in SLI.reg and sibling copies. 756 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 757 assert(VNI && "Missing value"); 758 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 759 WorkList.push_back(std::make_pair(&SLI, VNI)); 760 assert(StackInt && "No stack slot assigned yet."); 761 762 do { 763 LiveInterval *LI; 764 std::tie(LI, VNI) = WorkList.pop_back_val(); 765 unsigned Reg = LI->reg; 766 DEBUG(dbgs() << "Checking redundant spills for " 767 << VNI->id << '@' << VNI->def << " in " << *LI << '\n'); 768 769 // Regs to spill are taken care of. 770 if (isRegToSpill(Reg)) 771 continue; 772 773 // Add all of VNI's live range to StackInt. 774 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 775 DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 776 777 // Find all spills and copies of VNI. 778 for (MachineRegisterInfo::use_instr_nodbg_iterator 779 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); 780 UI != E; ) { 781 MachineInstr *MI = &*(UI++); 782 if (!MI->isCopy() && !MI->mayStore()) 783 continue; 784 SlotIndex Idx = LIS.getInstructionIndex(MI); 785 if (LI->getVNInfoAt(Idx) != VNI) 786 continue; 787 788 // Follow sibling copies down the dominator tree. 789 if (unsigned DstReg = isFullCopyOf(MI, Reg)) { 790 if (isSibling(DstReg)) { 791 LiveInterval &DstLI = LIS.getInterval(DstReg); 792 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot()); 793 assert(DstVNI && "Missing defined value"); 794 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"); 795 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 796 } 797 continue; 798 } 799 800 // Erase spills. 801 int FI; 802 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 803 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI); 804 // eliminateDeadDefs won't normally remove stores, so switch opcode. 805 MI->setDesc(TII.get(TargetOpcode::KILL)); 806 DeadDefs.push_back(MI); 807 ++NumSpillsRemoved; 808 --NumSpills; 809 } 810 } 811 } while (!WorkList.empty()); 812 } 813 814 815 //===----------------------------------------------------------------------===// 816 // Rematerialization 817 //===----------------------------------------------------------------------===// 818 819 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining 820 /// instruction cannot be eliminated. See through snippet copies 821 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 822 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 823 WorkList.push_back(std::make_pair(LI, VNI)); 824 do { 825 std::tie(LI, VNI) = WorkList.pop_back_val(); 826 if (!UsedValues.insert(VNI).second) 827 continue; 828 829 if (VNI->isPHIDef()) { 830 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 831 for (MachineBasicBlock *P : MBB->predecessors()) { 832 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P)); 833 if (PVNI) 834 WorkList.push_back(std::make_pair(LI, PVNI)); 835 } 836 continue; 837 } 838 839 // Follow snippet copies. 840 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 841 if (!SnippetCopies.count(MI)) 842 continue; 843 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 844 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy"); 845 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); 846 assert(SnipVNI && "Snippet undefined before copy"); 847 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 848 } while (!WorkList.empty()); 849 } 850 851 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 852 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, 853 MachineBasicBlock::iterator MI) { 854 855 // Analyze instruction 856 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops; 857 MIBundleOperands::VirtRegInfo RI = 858 MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops); 859 860 if (!RI.Reads) 861 return false; 862 863 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); 864 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 865 866 if (!ParentVNI) { 867 DEBUG(dbgs() << "\tadding <undef> flags: "); 868 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 869 MachineOperand &MO = MI->getOperand(i); 870 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) 871 MO.setIsUndef(); 872 } 873 DEBUG(dbgs() << UseIdx << '\t' << *MI); 874 return true; 875 } 876 877 if (SnippetCopies.count(MI)) 878 return false; 879 880 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy. 881 LiveRangeEdit::Remat RM(ParentVNI); 882 SibValueMap::const_iterator SibI = SibValues.find(ParentVNI); 883 if (SibI != SibValues.end()) 884 RM.OrigMI = SibI->second.DefMI; 885 if (!Edit->canRematerializeAt(RM, UseIdx, false)) { 886 markValueUsed(&VirtReg, ParentVNI); 887 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 888 return false; 889 } 890 891 // If the instruction also writes VirtReg.reg, it had better not require the 892 // same register for uses and defs. 893 if (RI.Tied) { 894 markValueUsed(&VirtReg, ParentVNI); 895 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 896 return false; 897 } 898 899 // Before rematerializing into a register for a single instruction, try to 900 // fold a load into the instruction. That avoids allocating a new register. 901 if (RM.OrigMI->canFoldAsLoad() && 902 foldMemoryOperand(Ops, RM.OrigMI)) { 903 Edit->markRematerialized(RM.ParentVNI); 904 ++NumFoldedLoads; 905 return true; 906 } 907 908 // Alocate a new register for the remat. 909 unsigned NewVReg = Edit->createFrom(Original); 910 911 // Finally we can rematerialize OrigMI before MI. 912 SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewVReg, RM, 913 TRI); 914 (void)DefIdx; 915 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 916 << *LIS.getInstructionFromIndex(DefIdx)); 917 918 // Replace operands 919 for (const auto &OpPair : Ops) { 920 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 921 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) { 922 MO.setReg(NewVReg); 923 MO.setIsKill(); 924 } 925 } 926 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI << '\n'); 927 928 ++NumRemats; 929 return true; 930 } 931 932 /// reMaterializeAll - Try to rematerialize as many uses as possible, 933 /// and trim the live ranges after. 934 void InlineSpiller::reMaterializeAll() { 935 // analyzeSiblingValues has already tested all relevant defining instructions. 936 if (!Edit->anyRematerializable(AA)) 937 return; 938 939 UsedValues.clear(); 940 941 // Try to remat before all uses of snippets. 942 bool anyRemat = false; 943 for (unsigned Reg : RegsToSpill) { 944 LiveInterval &LI = LIS.getInterval(Reg); 945 for (MachineRegisterInfo::reg_bundle_iterator 946 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end(); 947 RegI != E; ) { 948 MachineInstr *MI = &*(RegI++); 949 950 // Debug values are not allowed to affect codegen. 951 if (MI->isDebugValue()) 952 continue; 953 954 anyRemat |= reMaterializeFor(LI, MI); 955 } 956 } 957 if (!anyRemat) 958 return; 959 960 // Remove any values that were completely rematted. 961 for (unsigned Reg : RegsToSpill) { 962 LiveInterval &LI = LIS.getInterval(Reg); 963 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end(); 964 I != E; ++I) { 965 VNInfo *VNI = *I; 966 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 967 continue; 968 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 969 MI->addRegisterDead(Reg, &TRI); 970 if (!MI->allDefsAreDead()) 971 continue; 972 DEBUG(dbgs() << "All defs dead: " << *MI); 973 DeadDefs.push_back(MI); 974 } 975 } 976 977 // Eliminate dead code after remat. Note that some snippet copies may be 978 // deleted here. 979 if (DeadDefs.empty()) 980 return; 981 DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 982 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 983 984 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions 985 // after rematerialization. To remove a VNI for a vreg from its LiveInterval, 986 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all 987 // removed, PHI VNI are still left in the LiveInterval. 988 // So to get rid of unused reg, we need to check whether it has non-dbg 989 // reference instead of whether it has non-empty interval. 990 unsigned ResultPos = 0; 991 for (unsigned Reg : RegsToSpill) { 992 if (MRI.reg_nodbg_empty(Reg)) { 993 Edit->eraseVirtReg(Reg); 994 continue; 995 } 996 assert((LIS.hasInterval(Reg) && !LIS.getInterval(Reg).empty()) && 997 "Reg with empty interval has reference"); 998 RegsToSpill[ResultPos++] = Reg; 999 } 1000 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); 1001 DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"); 1002 } 1003 1004 1005 //===----------------------------------------------------------------------===// 1006 // Spilling 1007 //===----------------------------------------------------------------------===// 1008 1009 /// If MI is a load or store of StackSlot, it can be removed. 1010 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) { 1011 int FI = 0; 1012 unsigned InstrReg = TII.isLoadFromStackSlot(MI, FI); 1013 bool IsLoad = InstrReg; 1014 if (!IsLoad) 1015 InstrReg = TII.isStoreToStackSlot(MI, FI); 1016 1017 // We have a stack access. Is it the right register and slot? 1018 if (InstrReg != Reg || FI != StackSlot) 1019 return false; 1020 1021 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 1022 LIS.RemoveMachineInstrFromMaps(MI); 1023 MI->eraseFromParent(); 1024 1025 if (IsLoad) { 1026 ++NumReloadsRemoved; 1027 --NumReloads; 1028 } else { 1029 ++NumSpillsRemoved; 1030 --NumSpills; 1031 } 1032 1033 return true; 1034 } 1035 1036 #if !defined(NDEBUG) 1037 // Dump the range of instructions from B to E with their slot indexes. 1038 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, 1039 MachineBasicBlock::iterator E, 1040 LiveIntervals const &LIS, 1041 const char *const header, 1042 unsigned VReg =0) { 1043 char NextLine = '\n'; 1044 char SlotIndent = '\t'; 1045 1046 if (std::next(B) == E) { 1047 NextLine = ' '; 1048 SlotIndent = ' '; 1049 } 1050 1051 dbgs() << '\t' << header << ": " << NextLine; 1052 1053 for (MachineBasicBlock::iterator I = B; I != E; ++I) { 1054 SlotIndex Idx = LIS.getInstructionIndex(I).getRegSlot(); 1055 1056 // If a register was passed in and this instruction has it as a 1057 // destination that is marked as an early clobber, print the 1058 // early-clobber slot index. 1059 if (VReg) { 1060 MachineOperand *MO = I->findRegisterDefOperand(VReg); 1061 if (MO && MO->isEarlyClobber()) 1062 Idx = Idx.getRegSlot(true); 1063 } 1064 1065 dbgs() << SlotIndent << Idx << '\t' << *I; 1066 } 1067 } 1068 #endif 1069 1070 /// foldMemoryOperand - Try folding stack slot references in Ops into their 1071 /// instructions. 1072 /// 1073 /// @param Ops Operand indices from analyzeVirtReg(). 1074 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 1075 /// @return True on success. 1076 bool InlineSpiller:: 1077 foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops, 1078 MachineInstr *LoadMI) { 1079 if (Ops.empty()) 1080 return false; 1081 // Don't attempt folding in bundles. 1082 MachineInstr *MI = Ops.front().first; 1083 if (Ops.back().first != MI || MI->isBundled()) 1084 return false; 1085 1086 bool WasCopy = MI->isCopy(); 1087 unsigned ImpReg = 0; 1088 1089 bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::STATEPOINT || 1090 MI->getOpcode() == TargetOpcode::PATCHPOINT || 1091 MI->getOpcode() == TargetOpcode::STACKMAP); 1092 1093 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 1094 // operands. 1095 SmallVector<unsigned, 8> FoldOps; 1096 for (const auto &OpPair : Ops) { 1097 unsigned Idx = OpPair.second; 1098 assert(MI == OpPair.first && "Instruction conflict during operand folding"); 1099 MachineOperand &MO = MI->getOperand(Idx); 1100 if (MO.isImplicit()) { 1101 ImpReg = MO.getReg(); 1102 continue; 1103 } 1104 // FIXME: Teach targets to deal with subregs. 1105 if (!SpillSubRegs && MO.getSubReg()) 1106 return false; 1107 // We cannot fold a load instruction into a def. 1108 if (LoadMI && MO.isDef()) 1109 return false; 1110 // Tied use operands should not be passed to foldMemoryOperand. 1111 if (!MI->isRegTiedToDefOperand(Idx)) 1112 FoldOps.push_back(Idx); 1113 } 1114 1115 MachineInstrSpan MIS(MI); 1116 1117 MachineInstr *FoldMI = 1118 LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI) 1119 : TII.foldMemoryOperand(MI, FoldOps, StackSlot); 1120 if (!FoldMI) 1121 return false; 1122 1123 // Remove LIS for any dead defs in the original MI not in FoldMI. 1124 for (MIBundleOperands MO(MI); MO.isValid(); ++MO) { 1125 if (!MO->isReg()) 1126 continue; 1127 unsigned Reg = MO->getReg(); 1128 if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) || 1129 MRI.isReserved(Reg)) { 1130 continue; 1131 } 1132 // Skip non-Defs, including undef uses and internal reads. 1133 if (MO->isUse()) 1134 continue; 1135 MIBundleOperands::PhysRegInfo RI = 1136 MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI); 1137 if (RI.FullyDefined) 1138 continue; 1139 // FoldMI does not define this physreg. Remove the LI segment. 1140 assert(MO->isDead() && "Cannot fold physreg def"); 1141 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 1142 LIS.removePhysRegDefAt(Reg, Idx); 1143 } 1144 1145 LIS.ReplaceMachineInstrInMaps(MI, FoldMI); 1146 MI->eraseFromParent(); 1147 1148 // Insert any new instructions other than FoldMI into the LIS maps. 1149 assert(!MIS.empty() && "Unexpected empty span of instructions!"); 1150 for (MachineInstr &MI : MIS) 1151 if (&MI != FoldMI) 1152 LIS.InsertMachineInstrInMaps(&MI); 1153 1154 // TII.foldMemoryOperand may have left some implicit operands on the 1155 // instruction. Strip them. 1156 if (ImpReg) 1157 for (unsigned i = FoldMI->getNumOperands(); i; --i) { 1158 MachineOperand &MO = FoldMI->getOperand(i - 1); 1159 if (!MO.isReg() || !MO.isImplicit()) 1160 break; 1161 if (MO.getReg() == ImpReg) 1162 FoldMI->RemoveOperand(i - 1); 1163 } 1164 1165 DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, 1166 "folded")); 1167 1168 if (!WasCopy) 1169 ++NumFolded; 1170 else if (Ops.front().second == 0) 1171 ++NumSpills; 1172 else 1173 ++NumReloads; 1174 return true; 1175 } 1176 1177 void InlineSpiller::insertReload(unsigned NewVReg, 1178 SlotIndex Idx, 1179 MachineBasicBlock::iterator MI) { 1180 MachineBasicBlock &MBB = *MI->getParent(); 1181 1182 MachineInstrSpan MIS(MI); 1183 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot, 1184 MRI.getRegClass(NewVReg), &TRI); 1185 1186 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI); 1187 1188 DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload", 1189 NewVReg)); 1190 ++NumReloads; 1191 } 1192 1193 /// insertSpill - Insert a spill of NewVReg after MI. 1194 void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill, 1195 MachineBasicBlock::iterator MI) { 1196 MachineBasicBlock &MBB = *MI->getParent(); 1197 1198 MachineInstrSpan MIS(MI); 1199 TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot, 1200 MRI.getRegClass(NewVReg), &TRI); 1201 1202 LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end()); 1203 1204 DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS, 1205 "spill")); 1206 ++NumSpills; 1207 } 1208 1209 /// spillAroundUses - insert spill code around each use of Reg. 1210 void InlineSpiller::spillAroundUses(unsigned Reg) { 1211 DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n'); 1212 LiveInterval &OldLI = LIS.getInterval(Reg); 1213 1214 // Iterate over instructions using Reg. 1215 for (MachineRegisterInfo::reg_bundle_iterator 1216 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end(); 1217 RegI != E; ) { 1218 MachineInstr *MI = &*(RegI++); 1219 1220 // Debug values are not allowed to affect codegen. 1221 if (MI->isDebugValue()) { 1222 // Modify DBG_VALUE now that the value is in a spill slot. 1223 bool IsIndirect = MI->isIndirectDebugValue(); 1224 uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0; 1225 const MDNode *Var = MI->getDebugVariable(); 1226 const MDNode *Expr = MI->getDebugExpression(); 1227 DebugLoc DL = MI->getDebugLoc(); 1228 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1229 MachineBasicBlock *MBB = MI->getParent(); 1230 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 1231 "Expected inlined-at fields to agree"); 1232 BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE)) 1233 .addFrameIndex(StackSlot) 1234 .addImm(Offset) 1235 .addMetadata(Var) 1236 .addMetadata(Expr); 1237 continue; 1238 } 1239 1240 // Ignore copies to/from snippets. We'll delete them. 1241 if (SnippetCopies.count(MI)) 1242 continue; 1243 1244 // Stack slot accesses may coalesce away. 1245 if (coalesceStackAccess(MI, Reg)) 1246 continue; 1247 1248 // Analyze instruction. 1249 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; 1250 MIBundleOperands::VirtRegInfo RI = 1251 MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops); 1252 1253 // Find the slot index where this instruction reads and writes OldLI. 1254 // This is usually the def slot, except for tied early clobbers. 1255 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 1256 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) 1257 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1258 Idx = VNI->def; 1259 1260 // Check for a sibling copy. 1261 unsigned SibReg = isFullCopyOf(MI, Reg); 1262 if (SibReg && isSibling(SibReg)) { 1263 // This may actually be a copy between snippets. 1264 if (isRegToSpill(SibReg)) { 1265 DEBUG(dbgs() << "Found new snippet copy: " << *MI); 1266 SnippetCopies.insert(MI); 1267 continue; 1268 } 1269 if (RI.Writes) { 1270 // Hoist the spill of a sib-reg copy. 1271 if (hoistSpill(OldLI, MI)) { 1272 // This COPY is now dead, the value is already in the stack slot. 1273 MI->getOperand(0).setIsDead(); 1274 DeadDefs.push_back(MI); 1275 continue; 1276 } 1277 } else { 1278 // This is a reload for a sib-reg copy. Drop spills downstream. 1279 LiveInterval &SibLI = LIS.getInterval(SibReg); 1280 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1281 // The COPY will fold to a reload below. 1282 } 1283 } 1284 1285 // Attempt to fold memory ops. 1286 if (foldMemoryOperand(Ops)) 1287 continue; 1288 1289 // Create a new virtual register for spill/fill. 1290 // FIXME: Infer regclass from instruction alone. 1291 unsigned NewVReg = Edit->createFrom(Reg); 1292 1293 if (RI.Reads) 1294 insertReload(NewVReg, Idx, MI); 1295 1296 // Rewrite instruction operands. 1297 bool hasLiveDef = false; 1298 for (const auto &OpPair : Ops) { 1299 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 1300 MO.setReg(NewVReg); 1301 if (MO.isUse()) { 1302 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second)) 1303 MO.setIsKill(); 1304 } else { 1305 if (!MO.isDead()) 1306 hasLiveDef = true; 1307 } 1308 } 1309 DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n'); 1310 1311 // FIXME: Use a second vreg if instruction has no tied ops. 1312 if (RI.Writes) 1313 if (hasLiveDef) 1314 insertSpill(NewVReg, true, MI); 1315 } 1316 } 1317 1318 /// spillAll - Spill all registers remaining after rematerialization. 1319 void InlineSpiller::spillAll() { 1320 // Update LiveStacks now that we are committed to spilling. 1321 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1322 StackSlot = VRM.assignVirt2StackSlot(Original); 1323 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1324 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); 1325 } else 1326 StackInt = &LSS.getInterval(StackSlot); 1327 1328 if (Original != Edit->getReg()) 1329 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1330 1331 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1332 for (unsigned Reg : RegsToSpill) 1333 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg), 1334 StackInt->getValNumInfo(0)); 1335 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1336 1337 // Spill around uses of all RegsToSpill. 1338 for (unsigned Reg : RegsToSpill) 1339 spillAroundUses(Reg); 1340 1341 // Hoisted spills may cause dead code. 1342 if (!DeadDefs.empty()) { 1343 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1344 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 1345 } 1346 1347 // Finally delete the SnippetCopies. 1348 for (unsigned Reg : RegsToSpill) { 1349 for (MachineRegisterInfo::reg_instr_iterator 1350 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); 1351 RI != E; ) { 1352 MachineInstr *MI = &*(RI++); 1353 assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy"); 1354 // FIXME: Do this with a LiveRangeEdit callback. 1355 LIS.RemoveMachineInstrFromMaps(MI); 1356 MI->eraseFromParent(); 1357 } 1358 } 1359 1360 // Delete all spilled registers. 1361 for (unsigned Reg : RegsToSpill) 1362 Edit->eraseVirtReg(Reg); 1363 } 1364 1365 void InlineSpiller::spill(LiveRangeEdit &edit) { 1366 ++NumSpilledRanges; 1367 Edit = &edit; 1368 assert(!TargetRegisterInfo::isStackSlot(edit.getReg()) 1369 && "Trying to spill a stack slot."); 1370 // Share a stack slot among all descendants of Original. 1371 Original = VRM.getOriginal(edit.getReg()); 1372 StackSlot = VRM.getStackSlot(Original); 1373 StackInt = nullptr; 1374 1375 DEBUG(dbgs() << "Inline spilling " 1376 << TRI.getRegClassName(MRI.getRegClass(edit.getReg())) 1377 << ':' << edit.getParent() 1378 << "\nFrom original " << PrintReg(Original) << '\n'); 1379 assert(edit.getParent().isSpillable() && 1380 "Attempting to spill already spilled value."); 1381 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1382 1383 collectRegsToSpill(); 1384 analyzeSiblingValues(); 1385 reMaterializeAll(); 1386 1387 // Remat may handle everything. 1388 if (!RegsToSpill.empty()) 1389 spillAll(); 1390 1391 Edit->calculateRegClassAndHint(MF, Loops, MBFI); 1392 } 1393