1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "Spiller.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/ADT/TinyPtrVector.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 21 #include "llvm/CodeGen/LiveRangeEdit.h" 22 #include "llvm/CodeGen/LiveStackAnalysis.h" 23 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 24 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h" 25 #include "llvm/CodeGen/MachineDominators.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineInstrBundle.h" 30 #include "llvm/CodeGen/MachineLoopInfo.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/VirtRegMap.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Target/TargetInstrInfo.h" 37 38 using namespace llvm; 39 40 #define DEBUG_TYPE "regalloc" 41 42 STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 43 STATISTIC(NumSnippets, "Number of spilled snippets"); 44 STATISTIC(NumSpills, "Number of spills inserted"); 45 STATISTIC(NumSpillsRemoved, "Number of spills removed"); 46 STATISTIC(NumReloads, "Number of reloads inserted"); 47 STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 48 STATISTIC(NumFolded, "Number of folded stack accesses"); 49 STATISTIC(NumFoldedLoads, "Number of folded loads"); 50 STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 51 STATISTIC(NumOmitReloadSpill, "Number of omitted spills of reloads"); 52 STATISTIC(NumHoists, "Number of hoisted spills"); 53 54 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 55 cl::desc("Disable inline spill hoisting")); 56 57 namespace { 58 class InlineSpiller : public Spiller { 59 MachineFunction &MF; 60 LiveIntervals &LIS; 61 LiveStacks &LSS; 62 AliasAnalysis *AA; 63 MachineDominatorTree &MDT; 64 MachineLoopInfo &Loops; 65 VirtRegMap &VRM; 66 MachineFrameInfo &MFI; 67 MachineRegisterInfo &MRI; 68 const TargetInstrInfo &TII; 69 const TargetRegisterInfo &TRI; 70 const MachineBlockFrequencyInfo &MBFI; 71 72 // Variables that are valid during spill(), but used by multiple methods. 73 LiveRangeEdit *Edit; 74 LiveInterval *StackInt; 75 int StackSlot; 76 unsigned Original; 77 78 // All registers to spill to StackSlot, including the main register. 79 SmallVector<unsigned, 8> RegsToSpill; 80 81 // All COPY instructions to/from snippets. 82 // They are ignored since both operands refer to the same stack slot. 83 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 84 85 // Values that failed to remat at some point. 86 SmallPtrSet<VNInfo*, 8> UsedValues; 87 88 public: 89 // Information about a value that was defined by a copy from a sibling 90 // register. 91 struct SibValueInfo { 92 // True when all reaching defs were reloads: No spill is necessary. 93 bool AllDefsAreReloads; 94 95 // True when value is defined by an original PHI not from splitting. 96 bool DefByOrigPHI; 97 98 // True when the COPY defining this value killed its source. 99 bool KillsSource; 100 101 // The preferred register to spill. 102 unsigned SpillReg; 103 104 // The value of SpillReg that should be spilled. 105 VNInfo *SpillVNI; 106 107 // The block where SpillVNI should be spilled. Currently, this must be the 108 // block containing SpillVNI->def. 109 MachineBasicBlock *SpillMBB; 110 111 // A defining instruction that is not a sibling copy or a reload, or NULL. 112 // This can be used as a template for rematerialization. 113 MachineInstr *DefMI; 114 115 // List of values that depend on this one. These values are actually the 116 // same, but live range splitting has placed them in different registers, 117 // or SSA update needed to insert PHI-defs to preserve SSA form. This is 118 // copies of the current value and phi-kills. Usually only phi-kills cause 119 // more than one dependent value. 120 TinyPtrVector<VNInfo*> Deps; 121 122 SibValueInfo(unsigned Reg, VNInfo *VNI) 123 : AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false), 124 SpillReg(Reg), SpillVNI(VNI), SpillMBB(nullptr), DefMI(nullptr) {} 125 126 // Returns true when a def has been found. 127 bool hasDef() const { return DefByOrigPHI || DefMI; } 128 }; 129 130 private: 131 // Values in RegsToSpill defined by sibling copies. 132 typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap; 133 SibValueMap SibValues; 134 135 // Dead defs generated during spilling. 136 SmallVector<MachineInstr*, 8> DeadDefs; 137 138 ~InlineSpiller() override {} 139 140 public: 141 InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm) 142 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()), 143 LSS(pass.getAnalysis<LiveStacks>()), 144 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()), 145 MDT(pass.getAnalysis<MachineDominatorTree>()), 146 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm), 147 MFI(*mf.getFrameInfo()), MRI(mf.getRegInfo()), 148 TII(*mf.getSubtarget().getInstrInfo()), 149 TRI(*mf.getSubtarget().getRegisterInfo()), 150 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()) {} 151 152 void spill(LiveRangeEdit &) override; 153 154 private: 155 bool isSnippet(const LiveInterval &SnipLI); 156 void collectRegsToSpill(); 157 158 bool isRegToSpill(unsigned Reg) { 159 return std::find(RegsToSpill.begin(), 160 RegsToSpill.end(), Reg) != RegsToSpill.end(); 161 } 162 163 bool isSibling(unsigned Reg); 164 MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*); 165 void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = nullptr); 166 void analyzeSiblingValues(); 167 168 bool hoistSpill(LiveInterval &SpillLI, MachineInstr &CopyMI); 169 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 170 171 void markValueUsed(LiveInterval*, VNInfo*); 172 bool reMaterializeFor(LiveInterval &, MachineInstr &MI); 173 void reMaterializeAll(); 174 175 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg); 176 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >, 177 MachineInstr *LoadMI = nullptr); 178 void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI); 179 void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI); 180 181 void spillAroundUses(unsigned Reg); 182 void spillAll(); 183 }; 184 } 185 186 namespace llvm { 187 188 Spiller::~Spiller() { } 189 void Spiller::anchor() { } 190 191 Spiller *createInlineSpiller(MachineFunctionPass &pass, 192 MachineFunction &mf, 193 VirtRegMap &vrm) { 194 return new InlineSpiller(pass, mf, vrm); 195 } 196 197 } 198 199 //===----------------------------------------------------------------------===// 200 // Snippets 201 //===----------------------------------------------------------------------===// 202 203 // When spilling a virtual register, we also spill any snippets it is connected 204 // to. The snippets are small live ranges that only have a single real use, 205 // leftovers from live range splitting. Spilling them enables memory operand 206 // folding or tightens the live range around the single use. 207 // 208 // This minimizes register pressure and maximizes the store-to-load distance for 209 // spill slots which can be important in tight loops. 210 211 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 212 /// otherwise return 0. 213 static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) { 214 if (!MI->isFullCopy()) 215 return 0; 216 if (MI->getOperand(0).getReg() == Reg) 217 return MI->getOperand(1).getReg(); 218 if (MI->getOperand(1).getReg() == Reg) 219 return MI->getOperand(0).getReg(); 220 return 0; 221 } 222 223 /// isSnippet - Identify if a live interval is a snippet that should be spilled. 224 /// It is assumed that SnipLI is a virtual register with the same original as 225 /// Edit->getReg(). 226 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 227 unsigned Reg = Edit->getReg(); 228 229 // A snippet is a tiny live range with only a single instruction using it 230 // besides copies to/from Reg or spills/fills. We accept: 231 // 232 // %snip = COPY %Reg / FILL fi# 233 // %snip = USE %snip 234 // %Reg = COPY %snip / SPILL %snip, fi# 235 // 236 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) 237 return false; 238 239 MachineInstr *UseMI = nullptr; 240 241 // Check that all uses satisfy our criteria. 242 for (MachineRegisterInfo::reg_instr_nodbg_iterator 243 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg), 244 E = MRI.reg_instr_nodbg_end(); RI != E; ) { 245 MachineInstr *MI = &*(RI++); 246 247 // Allow copies to/from Reg. 248 if (isFullCopyOf(MI, Reg)) 249 continue; 250 251 // Allow stack slot loads. 252 int FI; 253 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 254 continue; 255 256 // Allow stack slot stores. 257 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 258 continue; 259 260 // Allow a single additional instruction. 261 if (UseMI && MI != UseMI) 262 return false; 263 UseMI = MI; 264 } 265 return true; 266 } 267 268 /// collectRegsToSpill - Collect live range snippets that only have a single 269 /// real use. 270 void InlineSpiller::collectRegsToSpill() { 271 unsigned Reg = Edit->getReg(); 272 273 // Main register always spills. 274 RegsToSpill.assign(1, Reg); 275 SnippetCopies.clear(); 276 277 // Snippets all have the same original, so there can't be any for an original 278 // register. 279 if (Original == Reg) 280 return; 281 282 for (MachineRegisterInfo::reg_instr_iterator 283 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) { 284 MachineInstr *MI = &*(RI++); 285 unsigned SnipReg = isFullCopyOf(MI, Reg); 286 if (!isSibling(SnipReg)) 287 continue; 288 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 289 if (!isSnippet(SnipLI)) 290 continue; 291 SnippetCopies.insert(MI); 292 if (isRegToSpill(SnipReg)) 293 continue; 294 RegsToSpill.push_back(SnipReg); 295 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 296 ++NumSnippets; 297 } 298 } 299 300 301 //===----------------------------------------------------------------------===// 302 // Sibling Values 303 //===----------------------------------------------------------------------===// 304 305 // After live range splitting, some values to be spilled may be defined by 306 // copies from sibling registers. We trace the sibling copies back to the 307 // original value if it still exists. We need it for rematerialization. 308 // 309 // Even when the value can't be rematerialized, we still want to determine if 310 // the value has already been spilled, or we may want to hoist the spill from a 311 // loop. 312 313 bool InlineSpiller::isSibling(unsigned Reg) { 314 return TargetRegisterInfo::isVirtualRegister(Reg) && 315 VRM.getOriginal(Reg) == Original; 316 } 317 318 #ifndef NDEBUG 319 static raw_ostream &operator<<(raw_ostream &OS, 320 const InlineSpiller::SibValueInfo &SVI) { 321 OS << "spill " << PrintReg(SVI.SpillReg) << ':' 322 << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def; 323 if (SVI.SpillMBB) 324 OS << " in BB#" << SVI.SpillMBB->getNumber(); 325 if (SVI.AllDefsAreReloads) 326 OS << " all-reloads"; 327 if (SVI.DefByOrigPHI) 328 OS << " orig-phi"; 329 if (SVI.KillsSource) 330 OS << " kill"; 331 OS << " deps["; 332 for (VNInfo *Dep : SVI.Deps) 333 OS << ' ' << Dep->id << '@' << Dep->def; 334 OS << " ]"; 335 if (SVI.DefMI) 336 OS << " def: " << *SVI.DefMI; 337 else 338 OS << '\n'; 339 return OS; 340 } 341 #endif 342 343 /// propagateSiblingValue - Propagate the value in SVI to dependents if it is 344 /// known. Otherwise remember the dependency for later. 345 /// 346 /// @param SVIIter SibValues entry to propagate. 347 /// @param VNI Dependent value, or NULL to propagate to all saved dependents. 348 void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter, 349 VNInfo *VNI) { 350 SibValueMap::value_type *SVI = &*SVIIter; 351 352 // When VNI is non-NULL, add it to SVI's deps, and only propagate to that. 353 TinyPtrVector<VNInfo*> FirstDeps; 354 if (VNI) { 355 FirstDeps.push_back(VNI); 356 SVI->second.Deps.push_back(VNI); 357 } 358 359 // Has the value been completely determined yet? If not, defer propagation. 360 if (!SVI->second.hasDef()) 361 return; 362 363 // Work list of values to propagate. 364 SmallSetVector<SibValueMap::value_type *, 8> WorkList; 365 WorkList.insert(SVI); 366 367 do { 368 SVI = WorkList.pop_back_val(); 369 TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps; 370 VNI = nullptr; 371 372 SibValueInfo &SV = SVI->second; 373 if (!SV.SpillMBB) 374 SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def); 375 376 DEBUG(dbgs() << " prop to " << Deps->size() << ": " 377 << SVI->first->id << '@' << SVI->first->def << ":\t" << SV); 378 379 assert(SV.hasDef() && "Propagating undefined value"); 380 381 // Should this value be propagated as a preferred spill candidate? We don't 382 // propagate values of registers that are about to spill. 383 bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg); 384 unsigned SpillDepth = ~0u; 385 386 for (VNInfo *Dep : *Deps) { 387 SibValueMap::iterator DepSVI = SibValues.find(Dep); 388 assert(DepSVI != SibValues.end() && "Dependent value not in SibValues"); 389 SibValueInfo &DepSV = DepSVI->second; 390 if (!DepSV.SpillMBB) 391 DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def); 392 393 bool Changed = false; 394 395 // Propagate defining instruction. 396 if (!DepSV.hasDef()) { 397 Changed = true; 398 DepSV.DefMI = SV.DefMI; 399 DepSV.DefByOrigPHI = SV.DefByOrigPHI; 400 } 401 402 // Propagate AllDefsAreReloads. For PHI values, this computes an AND of 403 // all predecessors. 404 if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) { 405 Changed = true; 406 DepSV.AllDefsAreReloads = false; 407 } 408 409 // Propagate best spill value. 410 if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) { 411 if (SV.SpillMBB == DepSV.SpillMBB) { 412 // DepSV is in the same block. Hoist when dominated. 413 if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) { 414 // This is an alternative def earlier in the same MBB. 415 // Hoist the spill as far as possible in SpillMBB. This can ease 416 // register pressure: 417 // 418 // x = def 419 // y = use x 420 // s = copy x 421 // 422 // Hoisting the spill of s to immediately after the def removes the 423 // interference between x and y: 424 // 425 // x = def 426 // spill x 427 // y = use x<kill> 428 // 429 // This hoist only helps when the DepSV copy kills its source. 430 Changed = true; 431 DepSV.SpillReg = SV.SpillReg; 432 DepSV.SpillVNI = SV.SpillVNI; 433 DepSV.SpillMBB = SV.SpillMBB; 434 } 435 } else { 436 // DepSV is in a different block. 437 if (SpillDepth == ~0u) 438 SpillDepth = Loops.getLoopDepth(SV.SpillMBB); 439 440 // Also hoist spills to blocks with smaller loop depth, but make sure 441 // that the new value dominates. Non-phi dependents are always 442 // dominated, phis need checking. 443 444 const BranchProbability MarginProb(4, 5); // 80% 445 // Hoist a spill to outer loop if there are multiple dependents (it 446 // can be beneficial if more than one dependents are hoisted) or 447 // if DepSV (the hoisting source) is hotter than SV (the hoisting 448 // destination) (we add a 80% margin to bias a little towards 449 // loop depth). 450 bool HoistCondition = 451 (MBFI.getBlockFreq(DepSV.SpillMBB) >= 452 (MBFI.getBlockFreq(SV.SpillMBB) * MarginProb)) || 453 Deps->size() > 1; 454 455 if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) && 456 HoistCondition && 457 (!DepSVI->first->isPHIDef() || 458 MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) { 459 Changed = true; 460 DepSV.SpillReg = SV.SpillReg; 461 DepSV.SpillVNI = SV.SpillVNI; 462 DepSV.SpillMBB = SV.SpillMBB; 463 } 464 } 465 } 466 467 if (!Changed) 468 continue; 469 470 // Something changed in DepSVI. Propagate to dependents. 471 WorkList.insert(&*DepSVI); 472 473 DEBUG(dbgs() << " update " << DepSVI->first->id << '@' 474 << DepSVI->first->def << " to:\t" << DepSV); 475 } 476 } while (!WorkList.empty()); 477 } 478 479 /// traceSiblingValue - Trace a value that is about to be spilled back to the 480 /// real defining instructions by looking through sibling copies. Always stay 481 /// within the range of OrigVNI so the registers are known to carry the same 482 /// value. 483 /// 484 /// Determine if the value is defined by all reloads, so spilling isn't 485 /// necessary - the value is already in the stack slot. 486 /// 487 /// Return a defining instruction that may be a candidate for rematerialization. 488 /// 489 MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, 490 VNInfo *OrigVNI) { 491 // Check if a cached value already exists. 492 SibValueMap::iterator SVI; 493 bool Inserted; 494 std::tie(SVI, Inserted) = 495 SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI))); 496 if (!Inserted) { 497 DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':' 498 << UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second); 499 return SVI->second.DefMI; 500 } 501 502 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':' 503 << UseVNI->id << '@' << UseVNI->def << '\n'); 504 505 // List of (Reg, VNI) that have been inserted into SibValues, but need to be 506 // processed. 507 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList; 508 WorkList.push_back(std::make_pair(UseReg, UseVNI)); 509 510 LiveInterval &OrigLI = LIS.getInterval(Original); 511 do { 512 unsigned Reg; 513 VNInfo *VNI; 514 std::tie(Reg, VNI) = WorkList.pop_back_val(); 515 DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def 516 << ":\t"); 517 518 // First check if this value has already been computed. 519 SVI = SibValues.find(VNI); 520 assert(SVI != SibValues.end() && "Missing SibValues entry"); 521 522 // Trace through PHI-defs created by live range splitting. 523 if (VNI->isPHIDef()) { 524 // Stop at original PHIs. We don't know the value at the 525 // predecessors. Look up the VNInfo for the current definition 526 // in OrigLI, to properly determine whether or not this phi was 527 // added by splitting. 528 if (VNI->def == OrigLI.getVNInfoAt(VNI->def)->def) { 529 DEBUG(dbgs() << "orig phi value\n"); 530 SVI->second.DefByOrigPHI = true; 531 SVI->second.AllDefsAreReloads = false; 532 propagateSiblingValue(SVI); 533 continue; 534 } 535 536 // This is a PHI inserted by live range splitting. We could trace the 537 // live-out value from predecessor blocks, but that search can be very 538 // expensive if there are many predecessors and many more PHIs as 539 // generated by tail-dup when it sees an indirectbr. Instead, look at 540 // all the non-PHI defs that have the same value as OrigVNI. They must 541 // jointly dominate VNI->def. This is not optimal since VNI may actually 542 // be jointly dominated by a smaller subset of defs, so there is a change 543 // we will miss a AllDefsAreReloads optimization. 544 545 // Separate all values dominated by OrigVNI into PHIs and non-PHIs. 546 SmallVector<VNInfo*, 8> PHIs, NonPHIs; 547 LiveInterval &LI = LIS.getInterval(Reg); 548 549 for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end(); 550 VI != VE; ++VI) { 551 VNInfo *VNI2 = *VI; 552 if (VNI2->isUnused()) 553 continue; 554 if (!OrigLI.containsOneValue() && 555 OrigLI.getVNInfoAt(VNI2->def) != OrigVNI) 556 continue; 557 if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def) 558 PHIs.push_back(VNI2); 559 else 560 NonPHIs.push_back(VNI2); 561 } 562 DEBUG(dbgs() << "split phi value, checking " << PHIs.size() 563 << " phi-defs, and " << NonPHIs.size() 564 << " non-phi/orig defs\n"); 565 566 // Create entries for all the PHIs. Don't add them to the worklist, we 567 // are processing all of them in one go here. 568 for (VNInfo *PHI : PHIs) 569 SibValues.insert(std::make_pair(PHI, SibValueInfo(Reg, PHI))); 570 571 // Add every PHI as a dependent of all the non-PHIs. 572 for (VNInfo *NonPHI : NonPHIs) { 573 // Known value? Try an insertion. 574 std::tie(SVI, Inserted) = 575 SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI))); 576 // Add all the PHIs as dependents of NonPHI. 577 SVI->second.Deps.insert(SVI->second.Deps.end(), PHIs.begin(), 578 PHIs.end()); 579 // This is the first time we see NonPHI, add it to the worklist. 580 if (Inserted) 581 WorkList.push_back(std::make_pair(Reg, NonPHI)); 582 else 583 // Propagate to all inserted PHIs, not just VNI. 584 propagateSiblingValue(SVI); 585 } 586 587 // Next work list item. 588 continue; 589 } 590 591 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 592 assert(MI && "Missing def"); 593 594 // Trace through sibling copies. 595 if (unsigned SrcReg = isFullCopyOf(MI, Reg)) { 596 if (isSibling(SrcReg)) { 597 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 598 LiveQueryResult SrcQ = SrcLI.Query(VNI->def); 599 assert(SrcQ.valueIn() && "Copy from non-existing value"); 600 // Check if this COPY kills its source. 601 SVI->second.KillsSource = SrcQ.isKill(); 602 VNInfo *SrcVNI = SrcQ.valueIn(); 603 DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':' 604 << SrcVNI->id << '@' << SrcVNI->def 605 << " kill=" << unsigned(SVI->second.KillsSource) << '\n'); 606 // Known sibling source value? Try an insertion. 607 std::tie(SVI, Inserted) = SibValues.insert( 608 std::make_pair(SrcVNI, SibValueInfo(SrcReg, SrcVNI))); 609 // This is the first time we see Src, add it to the worklist. 610 if (Inserted) 611 WorkList.push_back(std::make_pair(SrcReg, SrcVNI)); 612 propagateSiblingValue(SVI, VNI); 613 // Next work list item. 614 continue; 615 } 616 } 617 618 // Track reachable reloads. 619 SVI->second.DefMI = MI; 620 SVI->second.SpillMBB = MI->getParent(); 621 int FI; 622 if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) { 623 DEBUG(dbgs() << "reload\n"); 624 propagateSiblingValue(SVI); 625 // Next work list item. 626 continue; 627 } 628 629 // Potential remat candidate. 630 DEBUG(dbgs() << "def " << *MI); 631 SVI->second.AllDefsAreReloads = false; 632 propagateSiblingValue(SVI); 633 } while (!WorkList.empty()); 634 635 // Look up the value we were looking for. We already did this lookup at the 636 // top of the function, but SibValues may have been invalidated. 637 SVI = SibValues.find(UseVNI); 638 assert(SVI != SibValues.end() && "Didn't compute requested info"); 639 DEBUG(dbgs() << " traced to:\t" << SVI->second); 640 return SVI->second.DefMI; 641 } 642 643 /// analyzeSiblingValues - Trace values defined by sibling copies back to 644 /// something that isn't a sibling copy. 645 /// 646 /// Keep track of values that may be rematerializable. 647 void InlineSpiller::analyzeSiblingValues() { 648 SibValues.clear(); 649 650 // No siblings at all? 651 if (Edit->getReg() == Original) 652 return; 653 654 LiveInterval &OrigLI = LIS.getInterval(Original); 655 for (unsigned Reg : RegsToSpill) { 656 LiveInterval &LI = LIS.getInterval(Reg); 657 for (LiveInterval::const_vni_iterator VI = LI.vni_begin(), 658 VE = LI.vni_end(); VI != VE; ++VI) { 659 VNInfo *VNI = *VI; 660 if (VNI->isUnused()) 661 continue; 662 MachineInstr *DefMI = nullptr; 663 if (!VNI->isPHIDef()) { 664 DefMI = LIS.getInstructionFromIndex(VNI->def); 665 assert(DefMI && "No defining instruction"); 666 } 667 // Check possible sibling copies. 668 if (VNI->isPHIDef() || DefMI->isCopy()) { 669 VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def); 670 assert(OrigVNI && "Def outside original live range"); 671 if (OrigVNI->def != VNI->def) 672 DefMI = traceSiblingValue(Reg, VNI, OrigVNI); 673 } 674 if (DefMI && Edit->checkRematerializable(VNI, DefMI, AA)) { 675 DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@' 676 << VNI->def << " may remat from " << *DefMI); 677 } 678 } 679 } 680 } 681 682 /// hoistSpill - Given a sibling copy that defines a value to be spilled, insert 683 /// a spill at a better location. 684 bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr &CopyMI) { 685 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 686 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot()); 687 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"); 688 SibValueMap::iterator I = SibValues.find(VNI); 689 if (I == SibValues.end()) 690 return false; 691 692 const SibValueInfo &SVI = I->second; 693 694 // Let the normal folding code deal with the boring case. 695 if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI) 696 return false; 697 698 // SpillReg may have been deleted by remat and DCE. 699 if (!LIS.hasInterval(SVI.SpillReg)) { 700 DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI.SpillReg) << '\n'); 701 SibValues.erase(I); 702 return false; 703 } 704 705 LiveInterval &SibLI = LIS.getInterval(SVI.SpillReg); 706 if (!SibLI.containsValue(SVI.SpillVNI)) { 707 DEBUG(dbgs() << "Stale value: " << PrintReg(SVI.SpillReg) << '\n'); 708 SibValues.erase(I); 709 return false; 710 } 711 712 // Conservatively extend the stack slot range to the range of the original 713 // value. We may be able to do better with stack slot coloring by being more 714 // careful here. 715 assert(StackInt && "No stack slot assigned yet."); 716 LiveInterval &OrigLI = LIS.getInterval(Original); 717 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 718 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 719 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 720 << *StackInt << '\n'); 721 722 // Already spilled everywhere. 723 if (SVI.AllDefsAreReloads) { 724 DEBUG(dbgs() << "\tno spill needed: " << SVI); 725 ++NumOmitReloadSpill; 726 return true; 727 } 728 // We are going to spill SVI.SpillVNI immediately after its def, so clear out 729 // any later spills of the same value. 730 eliminateRedundantSpills(SibLI, SVI.SpillVNI); 731 732 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def); 733 MachineBasicBlock::iterator MII; 734 if (SVI.SpillVNI->isPHIDef()) 735 MII = MBB->SkipPHIsAndLabels(MBB->begin()); 736 else { 737 MachineInstr *DefMI = LIS.getInstructionFromIndex(SVI.SpillVNI->def); 738 assert(DefMI && "Defining instruction disappeared"); 739 MII = DefMI; 740 ++MII; 741 } 742 // Insert spill without kill flag immediately after def. 743 TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot, 744 MRI.getRegClass(SVI.SpillReg), &TRI); 745 --MII; // Point to store instruction. 746 LIS.InsertMachineInstrInMaps(*MII); 747 DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII); 748 749 ++NumSpills; 750 ++NumHoists; 751 return true; 752 } 753 754 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 755 /// redundant spills of this value in SLI.reg and sibling copies. 756 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 757 assert(VNI && "Missing value"); 758 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 759 WorkList.push_back(std::make_pair(&SLI, VNI)); 760 assert(StackInt && "No stack slot assigned yet."); 761 762 do { 763 LiveInterval *LI; 764 std::tie(LI, VNI) = WorkList.pop_back_val(); 765 unsigned Reg = LI->reg; 766 DEBUG(dbgs() << "Checking redundant spills for " 767 << VNI->id << '@' << VNI->def << " in " << *LI << '\n'); 768 769 // Regs to spill are taken care of. 770 if (isRegToSpill(Reg)) 771 continue; 772 773 // Add all of VNI's live range to StackInt. 774 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 775 DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 776 777 // Find all spills and copies of VNI. 778 for (MachineRegisterInfo::use_instr_nodbg_iterator 779 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); 780 UI != E; ) { 781 MachineInstr *MI = &*(UI++); 782 if (!MI->isCopy() && !MI->mayStore()) 783 continue; 784 SlotIndex Idx = LIS.getInstructionIndex(*MI); 785 if (LI->getVNInfoAt(Idx) != VNI) 786 continue; 787 788 // Follow sibling copies down the dominator tree. 789 if (unsigned DstReg = isFullCopyOf(MI, Reg)) { 790 if (isSibling(DstReg)) { 791 LiveInterval &DstLI = LIS.getInterval(DstReg); 792 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot()); 793 assert(DstVNI && "Missing defined value"); 794 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"); 795 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 796 } 797 continue; 798 } 799 800 // Erase spills. 801 int FI; 802 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 803 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI); 804 // eliminateDeadDefs won't normally remove stores, so switch opcode. 805 MI->setDesc(TII.get(TargetOpcode::KILL)); 806 DeadDefs.push_back(MI); 807 ++NumSpillsRemoved; 808 --NumSpills; 809 } 810 } 811 } while (!WorkList.empty()); 812 } 813 814 815 //===----------------------------------------------------------------------===// 816 // Rematerialization 817 //===----------------------------------------------------------------------===// 818 819 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining 820 /// instruction cannot be eliminated. See through snippet copies 821 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 822 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 823 WorkList.push_back(std::make_pair(LI, VNI)); 824 do { 825 std::tie(LI, VNI) = WorkList.pop_back_val(); 826 if (!UsedValues.insert(VNI).second) 827 continue; 828 829 if (VNI->isPHIDef()) { 830 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 831 for (MachineBasicBlock *P : MBB->predecessors()) { 832 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P)); 833 if (PVNI) 834 WorkList.push_back(std::make_pair(LI, PVNI)); 835 } 836 continue; 837 } 838 839 // Follow snippet copies. 840 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 841 if (!SnippetCopies.count(MI)) 842 continue; 843 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 844 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy"); 845 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); 846 assert(SnipVNI && "Snippet undefined before copy"); 847 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 848 } while (!WorkList.empty()); 849 } 850 851 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 852 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { 853 854 // Analyze instruction 855 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops; 856 MIBundleOperands::VirtRegInfo RI = 857 MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops); 858 859 if (!RI.Reads) 860 return false; 861 862 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); 863 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 864 865 if (!ParentVNI) { 866 DEBUG(dbgs() << "\tadding <undef> flags: "); 867 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 868 MachineOperand &MO = MI.getOperand(i); 869 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) 870 MO.setIsUndef(); 871 } 872 DEBUG(dbgs() << UseIdx << '\t' << MI); 873 return true; 874 } 875 876 if (SnippetCopies.count(&MI)) 877 return false; 878 879 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy. 880 LiveRangeEdit::Remat RM(ParentVNI); 881 SibValueMap::const_iterator SibI = SibValues.find(ParentVNI); 882 if (SibI != SibValues.end()) 883 RM.OrigMI = SibI->second.DefMI; 884 if (!Edit->canRematerializeAt(RM, UseIdx, false)) { 885 markValueUsed(&VirtReg, ParentVNI); 886 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); 887 return false; 888 } 889 890 // If the instruction also writes VirtReg.reg, it had better not require the 891 // same register for uses and defs. 892 if (RI.Tied) { 893 markValueUsed(&VirtReg, ParentVNI); 894 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI); 895 return false; 896 } 897 898 // Before rematerializing into a register for a single instruction, try to 899 // fold a load into the instruction. That avoids allocating a new register. 900 if (RM.OrigMI->canFoldAsLoad() && 901 foldMemoryOperand(Ops, RM.OrigMI)) { 902 Edit->markRematerialized(RM.ParentVNI); 903 ++NumFoldedLoads; 904 return true; 905 } 906 907 // Alocate a new register for the remat. 908 unsigned NewVReg = Edit->createFrom(Original); 909 910 // Finally we can rematerialize OrigMI before MI. 911 SlotIndex DefIdx = 912 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI); 913 (void)DefIdx; 914 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 915 << *LIS.getInstructionFromIndex(DefIdx)); 916 917 // Replace operands 918 for (const auto &OpPair : Ops) { 919 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 920 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) { 921 MO.setReg(NewVReg); 922 MO.setIsKill(); 923 } 924 } 925 DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n'); 926 927 ++NumRemats; 928 return true; 929 } 930 931 /// reMaterializeAll - Try to rematerialize as many uses as possible, 932 /// and trim the live ranges after. 933 void InlineSpiller::reMaterializeAll() { 934 // analyzeSiblingValues has already tested all relevant defining instructions. 935 if (!Edit->anyRematerializable(AA)) 936 return; 937 938 UsedValues.clear(); 939 940 // Try to remat before all uses of snippets. 941 bool anyRemat = false; 942 for (unsigned Reg : RegsToSpill) { 943 LiveInterval &LI = LIS.getInterval(Reg); 944 for (MachineRegisterInfo::reg_bundle_iterator 945 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end(); 946 RegI != E; ) { 947 MachineInstr &MI = *RegI++; 948 949 // Debug values are not allowed to affect codegen. 950 if (MI.isDebugValue()) 951 continue; 952 953 anyRemat |= reMaterializeFor(LI, MI); 954 } 955 } 956 if (!anyRemat) 957 return; 958 959 // Remove any values that were completely rematted. 960 for (unsigned Reg : RegsToSpill) { 961 LiveInterval &LI = LIS.getInterval(Reg); 962 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end(); 963 I != E; ++I) { 964 VNInfo *VNI = *I; 965 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 966 continue; 967 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 968 MI->addRegisterDead(Reg, &TRI); 969 if (!MI->allDefsAreDead()) 970 continue; 971 DEBUG(dbgs() << "All defs dead: " << *MI); 972 DeadDefs.push_back(MI); 973 } 974 } 975 976 // Eliminate dead code after remat. Note that some snippet copies may be 977 // deleted here. 978 if (DeadDefs.empty()) 979 return; 980 DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 981 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 982 983 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions 984 // after rematerialization. To remove a VNI for a vreg from its LiveInterval, 985 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all 986 // removed, PHI VNI are still left in the LiveInterval. 987 // So to get rid of unused reg, we need to check whether it has non-dbg 988 // reference instead of whether it has non-empty interval. 989 unsigned ResultPos = 0; 990 for (unsigned Reg : RegsToSpill) { 991 if (MRI.reg_nodbg_empty(Reg)) { 992 Edit->eraseVirtReg(Reg); 993 continue; 994 } 995 assert((LIS.hasInterval(Reg) && !LIS.getInterval(Reg).empty()) && 996 "Reg with empty interval has reference"); 997 RegsToSpill[ResultPos++] = Reg; 998 } 999 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); 1000 DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"); 1001 } 1002 1003 1004 //===----------------------------------------------------------------------===// 1005 // Spilling 1006 //===----------------------------------------------------------------------===// 1007 1008 /// If MI is a load or store of StackSlot, it can be removed. 1009 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) { 1010 int FI = 0; 1011 unsigned InstrReg = TII.isLoadFromStackSlot(MI, FI); 1012 bool IsLoad = InstrReg; 1013 if (!IsLoad) 1014 InstrReg = TII.isStoreToStackSlot(MI, FI); 1015 1016 // We have a stack access. Is it the right register and slot? 1017 if (InstrReg != Reg || FI != StackSlot) 1018 return false; 1019 1020 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 1021 LIS.RemoveMachineInstrFromMaps(*MI); 1022 MI->eraseFromParent(); 1023 1024 if (IsLoad) { 1025 ++NumReloadsRemoved; 1026 --NumReloads; 1027 } else { 1028 ++NumSpillsRemoved; 1029 --NumSpills; 1030 } 1031 1032 return true; 1033 } 1034 1035 #if !defined(NDEBUG) 1036 // Dump the range of instructions from B to E with their slot indexes. 1037 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, 1038 MachineBasicBlock::iterator E, 1039 LiveIntervals const &LIS, 1040 const char *const header, 1041 unsigned VReg =0) { 1042 char NextLine = '\n'; 1043 char SlotIndent = '\t'; 1044 1045 if (std::next(B) == E) { 1046 NextLine = ' '; 1047 SlotIndent = ' '; 1048 } 1049 1050 dbgs() << '\t' << header << ": " << NextLine; 1051 1052 for (MachineBasicBlock::iterator I = B; I != E; ++I) { 1053 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot(); 1054 1055 // If a register was passed in and this instruction has it as a 1056 // destination that is marked as an early clobber, print the 1057 // early-clobber slot index. 1058 if (VReg) { 1059 MachineOperand *MO = I->findRegisterDefOperand(VReg); 1060 if (MO && MO->isEarlyClobber()) 1061 Idx = Idx.getRegSlot(true); 1062 } 1063 1064 dbgs() << SlotIndent << Idx << '\t' << *I; 1065 } 1066 } 1067 #endif 1068 1069 /// foldMemoryOperand - Try folding stack slot references in Ops into their 1070 /// instructions. 1071 /// 1072 /// @param Ops Operand indices from analyzeVirtReg(). 1073 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 1074 /// @return True on success. 1075 bool InlineSpiller:: 1076 foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops, 1077 MachineInstr *LoadMI) { 1078 if (Ops.empty()) 1079 return false; 1080 // Don't attempt folding in bundles. 1081 MachineInstr *MI = Ops.front().first; 1082 if (Ops.back().first != MI || MI->isBundled()) 1083 return false; 1084 1085 bool WasCopy = MI->isCopy(); 1086 unsigned ImpReg = 0; 1087 1088 bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::STATEPOINT || 1089 MI->getOpcode() == TargetOpcode::PATCHPOINT || 1090 MI->getOpcode() == TargetOpcode::STACKMAP); 1091 1092 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 1093 // operands. 1094 SmallVector<unsigned, 8> FoldOps; 1095 for (const auto &OpPair : Ops) { 1096 unsigned Idx = OpPair.second; 1097 assert(MI == OpPair.first && "Instruction conflict during operand folding"); 1098 MachineOperand &MO = MI->getOperand(Idx); 1099 if (MO.isImplicit()) { 1100 ImpReg = MO.getReg(); 1101 continue; 1102 } 1103 // FIXME: Teach targets to deal with subregs. 1104 if (!SpillSubRegs && MO.getSubReg()) 1105 return false; 1106 // We cannot fold a load instruction into a def. 1107 if (LoadMI && MO.isDef()) 1108 return false; 1109 // Tied use operands should not be passed to foldMemoryOperand. 1110 if (!MI->isRegTiedToDefOperand(Idx)) 1111 FoldOps.push_back(Idx); 1112 } 1113 1114 MachineInstrSpan MIS(MI); 1115 1116 MachineInstr *FoldMI = 1117 LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI) 1118 : TII.foldMemoryOperand(MI, FoldOps, StackSlot); 1119 if (!FoldMI) 1120 return false; 1121 1122 // Remove LIS for any dead defs in the original MI not in FoldMI. 1123 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) { 1124 if (!MO->isReg()) 1125 continue; 1126 unsigned Reg = MO->getReg(); 1127 if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) || 1128 MRI.isReserved(Reg)) { 1129 continue; 1130 } 1131 // Skip non-Defs, including undef uses and internal reads. 1132 if (MO->isUse()) 1133 continue; 1134 MIBundleOperands::PhysRegInfo RI = 1135 MIBundleOperands(*FoldMI).analyzePhysReg(Reg, &TRI); 1136 if (RI.FullyDefined) 1137 continue; 1138 // FoldMI does not define this physreg. Remove the LI segment. 1139 assert(MO->isDead() && "Cannot fold physreg def"); 1140 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot(); 1141 LIS.removePhysRegDefAt(Reg, Idx); 1142 } 1143 1144 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI); 1145 MI->eraseFromParent(); 1146 1147 // Insert any new instructions other than FoldMI into the LIS maps. 1148 assert(!MIS.empty() && "Unexpected empty span of instructions!"); 1149 for (MachineInstr &MI : MIS) 1150 if (&MI != FoldMI) 1151 LIS.InsertMachineInstrInMaps(MI); 1152 1153 // TII.foldMemoryOperand may have left some implicit operands on the 1154 // instruction. Strip them. 1155 if (ImpReg) 1156 for (unsigned i = FoldMI->getNumOperands(); i; --i) { 1157 MachineOperand &MO = FoldMI->getOperand(i - 1); 1158 if (!MO.isReg() || !MO.isImplicit()) 1159 break; 1160 if (MO.getReg() == ImpReg) 1161 FoldMI->RemoveOperand(i - 1); 1162 } 1163 1164 DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, 1165 "folded")); 1166 1167 if (!WasCopy) 1168 ++NumFolded; 1169 else if (Ops.front().second == 0) 1170 ++NumSpills; 1171 else 1172 ++NumReloads; 1173 return true; 1174 } 1175 1176 void InlineSpiller::insertReload(unsigned NewVReg, 1177 SlotIndex Idx, 1178 MachineBasicBlock::iterator MI) { 1179 MachineBasicBlock &MBB = *MI->getParent(); 1180 1181 MachineInstrSpan MIS(MI); 1182 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot, 1183 MRI.getRegClass(NewVReg), &TRI); 1184 1185 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI); 1186 1187 DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload", 1188 NewVReg)); 1189 ++NumReloads; 1190 } 1191 1192 /// insertSpill - Insert a spill of NewVReg after MI. 1193 void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill, 1194 MachineBasicBlock::iterator MI) { 1195 MachineBasicBlock &MBB = *MI->getParent(); 1196 1197 MachineInstrSpan MIS(MI); 1198 TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot, 1199 MRI.getRegClass(NewVReg), &TRI); 1200 1201 LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end()); 1202 1203 DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS, 1204 "spill")); 1205 ++NumSpills; 1206 } 1207 1208 /// spillAroundUses - insert spill code around each use of Reg. 1209 void InlineSpiller::spillAroundUses(unsigned Reg) { 1210 DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n'); 1211 LiveInterval &OldLI = LIS.getInterval(Reg); 1212 1213 // Iterate over instructions using Reg. 1214 for (MachineRegisterInfo::reg_bundle_iterator 1215 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end(); 1216 RegI != E; ) { 1217 MachineInstr *MI = &*(RegI++); 1218 1219 // Debug values are not allowed to affect codegen. 1220 if (MI->isDebugValue()) { 1221 // Modify DBG_VALUE now that the value is in a spill slot. 1222 bool IsIndirect = MI->isIndirectDebugValue(); 1223 uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0; 1224 const MDNode *Var = MI->getDebugVariable(); 1225 const MDNode *Expr = MI->getDebugExpression(); 1226 DebugLoc DL = MI->getDebugLoc(); 1227 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1228 MachineBasicBlock *MBB = MI->getParent(); 1229 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 1230 "Expected inlined-at fields to agree"); 1231 BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE)) 1232 .addFrameIndex(StackSlot) 1233 .addImm(Offset) 1234 .addMetadata(Var) 1235 .addMetadata(Expr); 1236 continue; 1237 } 1238 1239 // Ignore copies to/from snippets. We'll delete them. 1240 if (SnippetCopies.count(MI)) 1241 continue; 1242 1243 // Stack slot accesses may coalesce away. 1244 if (coalesceStackAccess(MI, Reg)) 1245 continue; 1246 1247 // Analyze instruction. 1248 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; 1249 MIBundleOperands::VirtRegInfo RI = 1250 MIBundleOperands(*MI).analyzeVirtReg(Reg, &Ops); 1251 1252 // Find the slot index where this instruction reads and writes OldLI. 1253 // This is usually the def slot, except for tied early clobbers. 1254 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot(); 1255 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) 1256 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1257 Idx = VNI->def; 1258 1259 // Check for a sibling copy. 1260 unsigned SibReg = isFullCopyOf(MI, Reg); 1261 if (SibReg && isSibling(SibReg)) { 1262 // This may actually be a copy between snippets. 1263 if (isRegToSpill(SibReg)) { 1264 DEBUG(dbgs() << "Found new snippet copy: " << *MI); 1265 SnippetCopies.insert(MI); 1266 continue; 1267 } 1268 if (RI.Writes) { 1269 // Hoist the spill of a sib-reg copy. 1270 if (hoistSpill(OldLI, *MI)) { 1271 // This COPY is now dead, the value is already in the stack slot. 1272 MI->getOperand(0).setIsDead(); 1273 DeadDefs.push_back(MI); 1274 continue; 1275 } 1276 } else { 1277 // This is a reload for a sib-reg copy. Drop spills downstream. 1278 LiveInterval &SibLI = LIS.getInterval(SibReg); 1279 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1280 // The COPY will fold to a reload below. 1281 } 1282 } 1283 1284 // Attempt to fold memory ops. 1285 if (foldMemoryOperand(Ops)) 1286 continue; 1287 1288 // Create a new virtual register for spill/fill. 1289 // FIXME: Infer regclass from instruction alone. 1290 unsigned NewVReg = Edit->createFrom(Reg); 1291 1292 if (RI.Reads) 1293 insertReload(NewVReg, Idx, MI); 1294 1295 // Rewrite instruction operands. 1296 bool hasLiveDef = false; 1297 for (const auto &OpPair : Ops) { 1298 MachineOperand &MO = OpPair.first->getOperand(OpPair.second); 1299 MO.setReg(NewVReg); 1300 if (MO.isUse()) { 1301 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second)) 1302 MO.setIsKill(); 1303 } else { 1304 if (!MO.isDead()) 1305 hasLiveDef = true; 1306 } 1307 } 1308 DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n'); 1309 1310 // FIXME: Use a second vreg if instruction has no tied ops. 1311 if (RI.Writes) 1312 if (hasLiveDef) 1313 insertSpill(NewVReg, true, MI); 1314 } 1315 } 1316 1317 /// spillAll - Spill all registers remaining after rematerialization. 1318 void InlineSpiller::spillAll() { 1319 // Update LiveStacks now that we are committed to spilling. 1320 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1321 StackSlot = VRM.assignVirt2StackSlot(Original); 1322 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1323 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); 1324 } else 1325 StackInt = &LSS.getInterval(StackSlot); 1326 1327 if (Original != Edit->getReg()) 1328 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1329 1330 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1331 for (unsigned Reg : RegsToSpill) 1332 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg), 1333 StackInt->getValNumInfo(0)); 1334 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1335 1336 // Spill around uses of all RegsToSpill. 1337 for (unsigned Reg : RegsToSpill) 1338 spillAroundUses(Reg); 1339 1340 // Hoisted spills may cause dead code. 1341 if (!DeadDefs.empty()) { 1342 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1343 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 1344 } 1345 1346 // Finally delete the SnippetCopies. 1347 for (unsigned Reg : RegsToSpill) { 1348 for (MachineRegisterInfo::reg_instr_iterator 1349 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); 1350 RI != E; ) { 1351 MachineInstr &MI = *(RI++); 1352 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy"); 1353 // FIXME: Do this with a LiveRangeEdit callback. 1354 LIS.RemoveMachineInstrFromMaps(MI); 1355 MI.eraseFromParent(); 1356 } 1357 } 1358 1359 // Delete all spilled registers. 1360 for (unsigned Reg : RegsToSpill) 1361 Edit->eraseVirtReg(Reg); 1362 } 1363 1364 void InlineSpiller::spill(LiveRangeEdit &edit) { 1365 ++NumSpilledRanges; 1366 Edit = &edit; 1367 assert(!TargetRegisterInfo::isStackSlot(edit.getReg()) 1368 && "Trying to spill a stack slot."); 1369 // Share a stack slot among all descendants of Original. 1370 Original = VRM.getOriginal(edit.getReg()); 1371 StackSlot = VRM.getStackSlot(Original); 1372 StackInt = nullptr; 1373 1374 DEBUG(dbgs() << "Inline spilling " 1375 << TRI.getRegClassName(MRI.getRegClass(edit.getReg())) 1376 << ':' << edit.getParent() 1377 << "\nFrom original " << PrintReg(Original) << '\n'); 1378 assert(edit.getParent().isSpillable() && 1379 "Attempting to spill already spilled value."); 1380 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1381 1382 collectRegsToSpill(); 1383 analyzeSiblingValues(); 1384 reMaterializeAll(); 1385 1386 // Remat may handle everything. 1387 if (!RegsToSpill.empty()) 1388 spillAll(); 1389 1390 Edit->calculateRegClassAndHint(MF, Loops, MBFI); 1391 } 1392