1 //===- LiveIntervals.cpp - Live Interval Analysis -------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file This file implements the LiveInterval analysis pass which is used
11 /// by the Linear Scan Register allocator. This pass linearizes the
12 /// basic blocks of the function in DFS order and computes live intervals for
13 /// each virtual and physical register.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/CodeGen/LiveIntervals.h"
18 #include "LiveRangeCalc.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/iterator_range.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/CodeGen/LiveInterval.h"
26 #include "llvm/CodeGen/LiveVariables.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
29 #include "llvm/CodeGen/MachineDominators.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstr.h"
32 #include "llvm/CodeGen/MachineInstrBundle.h"
33 #include "llvm/CodeGen/MachineOperand.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/Passes.h"
36 #include "llvm/CodeGen/SlotIndexes.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/CodeGen/TargetSubtargetInfo.h"
39 #include "llvm/CodeGen/VirtRegMap.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/MC/LaneBitmask.h"
42 #include "llvm/MC/MCRegisterInfo.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/BlockFrequency.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/MathExtras.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <cstdint>
53 #include <iterator>
54 #include <tuple>
55 #include <utility>
56 
57 using namespace llvm;
58 
59 #define DEBUG_TYPE "regalloc"
60 
61 char LiveIntervals::ID = 0;
62 char &llvm::LiveIntervalsID = LiveIntervals::ID;
63 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
64                 "Live Interval Analysis", false, false)
65 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
66 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
67 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
68 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69                 "Live Interval Analysis", false, false)
70 
71 #ifndef NDEBUG
72 static cl::opt<bool> EnablePrecomputePhysRegs(
73   "precompute-phys-liveness", cl::Hidden,
74   cl::desc("Eagerly compute live intervals for all physreg units."));
75 #else
76 static bool EnablePrecomputePhysRegs = false;
77 #endif // NDEBUG
78 
79 namespace llvm {
80 
81 cl::opt<bool> UseSegmentSetForPhysRegs(
82     "use-segment-set-for-physregs", cl::Hidden, cl::init(true),
83     cl::desc(
84         "Use segment set for the computation of the live ranges of physregs."));
85 
86 } // end namespace llvm
87 
88 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
89   AU.setPreservesCFG();
90   AU.addRequired<AAResultsWrapperPass>();
91   AU.addPreserved<AAResultsWrapperPass>();
92   AU.addPreserved<LiveVariables>();
93   AU.addPreservedID(MachineLoopInfoID);
94   AU.addRequiredTransitiveID(MachineDominatorsID);
95   AU.addPreservedID(MachineDominatorsID);
96   AU.addPreserved<SlotIndexes>();
97   AU.addRequiredTransitive<SlotIndexes>();
98   MachineFunctionPass::getAnalysisUsage(AU);
99 }
100 
101 LiveIntervals::LiveIntervals() : MachineFunctionPass(ID) {
102   initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
103 }
104 
105 LiveIntervals::~LiveIntervals() {
106   delete LRCalc;
107 }
108 
109 void LiveIntervals::releaseMemory() {
110   // Free the live intervals themselves.
111   for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
112     delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)];
113   VirtRegIntervals.clear();
114   RegMaskSlots.clear();
115   RegMaskBits.clear();
116   RegMaskBlocks.clear();
117 
118   for (LiveRange *LR : RegUnitRanges)
119     delete LR;
120   RegUnitRanges.clear();
121 
122   // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
123   VNInfoAllocator.Reset();
124 }
125 
126 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
127   MF = &fn;
128   MRI = &MF->getRegInfo();
129   TRI = MF->getSubtarget().getRegisterInfo();
130   TII = MF->getSubtarget().getInstrInfo();
131   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
132   Indexes = &getAnalysis<SlotIndexes>();
133   DomTree = &getAnalysis<MachineDominatorTree>();
134 
135   if (!LRCalc)
136     LRCalc = new LiveRangeCalc();
137 
138   // Allocate space for all virtual registers.
139   VirtRegIntervals.resize(MRI->getNumVirtRegs());
140 
141   computeVirtRegs();
142   computeRegMasks();
143   computeLiveInRegUnits();
144 
145   if (EnablePrecomputePhysRegs) {
146     // For stress testing, precompute live ranges of all physical register
147     // units, including reserved registers.
148     for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
149       getRegUnit(i);
150   }
151   LLVM_DEBUG(dump());
152   return true;
153 }
154 
155 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
156   OS << "********** INTERVALS **********\n";
157 
158   // Dump the regunits.
159   for (unsigned Unit = 0, UnitE = RegUnitRanges.size(); Unit != UnitE; ++Unit)
160     if (LiveRange *LR = RegUnitRanges[Unit])
161       OS << printRegUnit(Unit, TRI) << ' ' << *LR << '\n';
162 
163   // Dump the virtregs.
164   for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
165     unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
166     if (hasInterval(Reg))
167       OS << getInterval(Reg) << '\n';
168   }
169 
170   OS << "RegMasks:";
171   for (SlotIndex Idx : RegMaskSlots)
172     OS << ' ' << Idx;
173   OS << '\n';
174 
175   printInstrs(OS);
176 }
177 
178 void LiveIntervals::printInstrs(raw_ostream &OS) const {
179   OS << "********** MACHINEINSTRS **********\n";
180   MF->print(OS, Indexes);
181 }
182 
183 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
184 LLVM_DUMP_METHOD void LiveIntervals::dumpInstrs() const {
185   printInstrs(dbgs());
186 }
187 #endif
188 
189 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
190   float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? huge_valf : 0.0F;
191   return new LiveInterval(reg, Weight);
192 }
193 
194 /// Compute the live interval of a virtual register, based on defs and uses.
195 void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
196   assert(LRCalc && "LRCalc not initialized.");
197   assert(LI.empty() && "Should only compute empty intervals.");
198   LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
199   LRCalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg));
200   computeDeadValues(LI, nullptr);
201 }
202 
203 void LiveIntervals::computeVirtRegs() {
204   for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
205     unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
206     if (MRI->reg_nodbg_empty(Reg))
207       continue;
208     createAndComputeVirtRegInterval(Reg);
209   }
210 }
211 
212 void LiveIntervals::computeRegMasks() {
213   RegMaskBlocks.resize(MF->getNumBlockIDs());
214 
215   // Find all instructions with regmask operands.
216   for (const MachineBasicBlock &MBB : *MF) {
217     std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB.getNumber()];
218     RMB.first = RegMaskSlots.size();
219 
220     // Some block starts, such as EH funclets, create masks.
221     if (const uint32_t *Mask = MBB.getBeginClobberMask(TRI)) {
222       RegMaskSlots.push_back(Indexes->getMBBStartIdx(&MBB));
223       RegMaskBits.push_back(Mask);
224     }
225 
226     for (const MachineInstr &MI : MBB) {
227       for (const MachineOperand &MO : MI.operands()) {
228         if (!MO.isRegMask())
229           continue;
230         RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
231         RegMaskBits.push_back(MO.getRegMask());
232       }
233     }
234 
235     // Some block ends, such as funclet returns, create masks. Put the mask on
236     // the last instruction of the block, because MBB slot index intervals are
237     // half-open.
238     if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) {
239       assert(!MBB.empty() && "empty return block?");
240       RegMaskSlots.push_back(
241           Indexes->getInstructionIndex(MBB.back()).getRegSlot());
242       RegMaskBits.push_back(Mask);
243     }
244 
245     // Compute the number of register mask instructions in this block.
246     RMB.second = RegMaskSlots.size() - RMB.first;
247   }
248 }
249 
250 //===----------------------------------------------------------------------===//
251 //                           Register Unit Liveness
252 //===----------------------------------------------------------------------===//
253 //
254 // Fixed interference typically comes from ABI boundaries: Function arguments
255 // and return values are passed in fixed registers, and so are exception
256 // pointers entering landing pads. Certain instructions require values to be
257 // present in specific registers. That is also represented through fixed
258 // interference.
259 //
260 
261 /// Compute the live range of a register unit, based on the uses and defs of
262 /// aliasing registers.  The range should be empty, or contain only dead
263 /// phi-defs from ABI blocks.
264 void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
265   assert(LRCalc && "LRCalc not initialized.");
266   LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
267 
268   // The physregs aliasing Unit are the roots and their super-registers.
269   // Create all values as dead defs before extending to uses. Note that roots
270   // may share super-registers. That's OK because createDeadDefs() is
271   // idempotent. It is very rare for a register unit to have multiple roots, so
272   // uniquing super-registers is probably not worthwhile.
273   bool IsReserved = false;
274   for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
275     bool IsRootReserved = true;
276     for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
277          Super.isValid(); ++Super) {
278       unsigned Reg = *Super;
279       if (!MRI->reg_empty(Reg))
280         LRCalc->createDeadDefs(LR, Reg);
281       // A register unit is considered reserved if all its roots and all their
282       // super registers are reserved.
283       if (!MRI->isReserved(Reg))
284         IsRootReserved = false;
285     }
286     IsReserved |= IsRootReserved;
287   }
288   assert(IsReserved == MRI->isReservedRegUnit(Unit) &&
289          "reserved computation mismatch");
290 
291   // Now extend LR to reach all uses.
292   // Ignore uses of reserved registers. We only track defs of those.
293   if (!IsReserved) {
294     for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
295       for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
296            Super.isValid(); ++Super) {
297         unsigned Reg = *Super;
298         if (!MRI->reg_empty(Reg))
299           LRCalc->extendToUses(LR, Reg);
300       }
301     }
302   }
303 
304   // Flush the segment set to the segment vector.
305   if (UseSegmentSetForPhysRegs)
306     LR.flushSegmentSet();
307 }
308 
309 /// Precompute the live ranges of any register units that are live-in to an ABI
310 /// block somewhere. Register values can appear without a corresponding def when
311 /// entering the entry block or a landing pad.
312 void LiveIntervals::computeLiveInRegUnits() {
313   RegUnitRanges.resize(TRI->getNumRegUnits());
314   LLVM_DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
315 
316   // Keep track of the live range sets allocated.
317   SmallVector<unsigned, 8> NewRanges;
318 
319   // Check all basic blocks for live-ins.
320   for (const MachineBasicBlock &MBB : *MF) {
321     // We only care about ABI blocks: Entry + landing pads.
322     if ((&MBB != &MF->front() && !MBB.isEHPad()) || MBB.livein_empty())
323       continue;
324 
325     // Create phi-defs at Begin for all live-in registers.
326     SlotIndex Begin = Indexes->getMBBStartIdx(&MBB);
327     LLVM_DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB));
328     for (const auto &LI : MBB.liveins()) {
329       for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) {
330         unsigned Unit = *Units;
331         LiveRange *LR = RegUnitRanges[Unit];
332         if (!LR) {
333           // Use segment set to speed-up initial computation of the live range.
334           LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs);
335           NewRanges.push_back(Unit);
336         }
337         VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator());
338         (void)VNI;
339         LLVM_DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI) << '#' << VNI->id);
340       }
341     }
342     LLVM_DEBUG(dbgs() << '\n');
343   }
344   LLVM_DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n");
345 
346   // Compute the 'normal' part of the ranges.
347   for (unsigned Unit : NewRanges)
348     computeRegUnitRange(*RegUnitRanges[Unit], Unit);
349 }
350 
351 static void createSegmentsForValues(LiveRange &LR,
352     iterator_range<LiveInterval::vni_iterator> VNIs) {
353   for (VNInfo *VNI : VNIs) {
354     if (VNI->isUnused())
355       continue;
356     SlotIndex Def = VNI->def;
357     LR.addSegment(LiveRange::Segment(Def, Def.getDeadSlot(), VNI));
358   }
359 }
360 
361 using ShrinkToUsesWorkList = SmallVector<std::pair<SlotIndex, VNInfo*>, 16>;
362 
363 static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
364                                  ShrinkToUsesWorkList &WorkList,
365                                  const LiveRange &OldRange) {
366   // Keep track of the PHIs that are in use.
367   SmallPtrSet<VNInfo*, 8> UsedPHIs;
368   // Blocks that have already been added to WorkList as live-out.
369   SmallPtrSet<const MachineBasicBlock*, 16> LiveOut;
370 
371   // Extend intervals to reach all uses in WorkList.
372   while (!WorkList.empty()) {
373     SlotIndex Idx = WorkList.back().first;
374     VNInfo *VNI = WorkList.back().second;
375     WorkList.pop_back();
376     const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Idx.getPrevSlot());
377     SlotIndex BlockStart = Indexes.getMBBStartIdx(MBB);
378 
379     // Extend the live range for VNI to be live at Idx.
380     if (VNInfo *ExtVNI = LR.extendInBlock(BlockStart, Idx)) {
381       assert(ExtVNI == VNI && "Unexpected existing value number");
382       (void)ExtVNI;
383       // Is this a PHIDef we haven't seen before?
384       if (!VNI->isPHIDef() || VNI->def != BlockStart ||
385           !UsedPHIs.insert(VNI).second)
386         continue;
387       // The PHI is live, make sure the predecessors are live-out.
388       for (const MachineBasicBlock *Pred : MBB->predecessors()) {
389         if (!LiveOut.insert(Pred).second)
390           continue;
391         SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
392         // A predecessor is not required to have a live-out value for a PHI.
393         if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop))
394           WorkList.push_back(std::make_pair(Stop, PVNI));
395       }
396       continue;
397     }
398 
399     // VNI is live-in to MBB.
400     LLVM_DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
401     LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));
402 
403     // Make sure VNI is live-out from the predecessors.
404     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
405       if (!LiveOut.insert(Pred).second)
406         continue;
407       SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
408       assert(OldRange.getVNInfoBefore(Stop) == VNI &&
409              "Wrong value out of predecessor");
410       WorkList.push_back(std::make_pair(Stop, VNI));
411     }
412   }
413 }
414 
415 bool LiveIntervals::shrinkToUses(LiveInterval *li,
416                                  SmallVectorImpl<MachineInstr*> *dead) {
417   LLVM_DEBUG(dbgs() << "Shrink: " << *li << '\n');
418   assert(TargetRegisterInfo::isVirtualRegister(li->reg)
419          && "Can only shrink virtual registers");
420 
421   // Shrink subregister live ranges.
422   bool NeedsCleanup = false;
423   for (LiveInterval::SubRange &S : li->subranges()) {
424     shrinkToUses(S, li->reg);
425     if (S.empty())
426       NeedsCleanup = true;
427   }
428   if (NeedsCleanup)
429     li->removeEmptySubRanges();
430 
431   // Find all the values used, including PHI kills.
432   ShrinkToUsesWorkList WorkList;
433 
434   // Visit all instructions reading li->reg.
435   unsigned Reg = li->reg;
436   for (MachineInstr &UseMI : MRI->reg_instructions(Reg)) {
437     if (UseMI.isDebugValue() || !UseMI.readsVirtualRegister(Reg))
438       continue;
439     SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
440     LiveQueryResult LRQ = li->Query(Idx);
441     VNInfo *VNI = LRQ.valueIn();
442     if (!VNI) {
443       // This shouldn't happen: readsVirtualRegister returns true, but there is
444       // no live value. It is likely caused by a target getting <undef> flags
445       // wrong.
446       LLVM_DEBUG(
447           dbgs() << Idx << '\t' << UseMI
448                  << "Warning: Instr claims to read non-existent value in "
449                  << *li << '\n');
450       continue;
451     }
452     // Special case: An early-clobber tied operand reads and writes the
453     // register one slot early.
454     if (VNInfo *DefVNI = LRQ.valueDefined())
455       Idx = DefVNI->def;
456 
457     WorkList.push_back(std::make_pair(Idx, VNI));
458   }
459 
460   // Create new live ranges with only minimal live segments per def.
461   LiveRange NewLR;
462   createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end()));
463   extendSegmentsToUses(NewLR, *Indexes, WorkList, *li);
464 
465   // Move the trimmed segments back.
466   li->segments.swap(NewLR.segments);
467 
468   // Handle dead values.
469   bool CanSeparate = computeDeadValues(*li, dead);
470   LLVM_DEBUG(dbgs() << "Shrunk: " << *li << '\n');
471   return CanSeparate;
472 }
473 
474 bool LiveIntervals::computeDeadValues(LiveInterval &LI,
475                                       SmallVectorImpl<MachineInstr*> *dead) {
476   bool MayHaveSplitComponents = false;
477   for (VNInfo *VNI : LI.valnos) {
478     if (VNI->isUnused())
479       continue;
480     SlotIndex Def = VNI->def;
481     LiveRange::iterator I = LI.FindSegmentContaining(Def);
482     assert(I != LI.end() && "Missing segment for VNI");
483 
484     // Is the register live before? Otherwise we may have to add a read-undef
485     // flag for subregister defs.
486     unsigned VReg = LI.reg;
487     if (MRI->shouldTrackSubRegLiveness(VReg)) {
488       if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) {
489         MachineInstr *MI = getInstructionFromIndex(Def);
490         MI->setRegisterDefReadUndef(VReg);
491       }
492     }
493 
494     if (I->end != Def.getDeadSlot())
495       continue;
496     if (VNI->isPHIDef()) {
497       // This is a dead PHI. Remove it.
498       VNI->markUnused();
499       LI.removeSegment(I);
500       LLVM_DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n");
501       MayHaveSplitComponents = true;
502     } else {
503       // This is a dead def. Make sure the instruction knows.
504       MachineInstr *MI = getInstructionFromIndex(Def);
505       assert(MI && "No instruction defining live value");
506       MI->addRegisterDead(LI.reg, TRI);
507       if (dead && MI->allDefsAreDead()) {
508         LLVM_DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
509         dead->push_back(MI);
510       }
511     }
512   }
513   return MayHaveSplitComponents;
514 }
515 
516 void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
517   LLVM_DEBUG(dbgs() << "Shrink: " << SR << '\n');
518   assert(TargetRegisterInfo::isVirtualRegister(Reg)
519          && "Can only shrink virtual registers");
520   // Find all the values used, including PHI kills.
521   ShrinkToUsesWorkList WorkList;
522 
523   // Visit all instructions reading Reg.
524   SlotIndex LastIdx;
525   for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
526     // Skip "undef" uses.
527     if (!MO.readsReg())
528       continue;
529     // Maybe the operand is for a subregister we don't care about.
530     unsigned SubReg = MO.getSubReg();
531     if (SubReg != 0) {
532       LaneBitmask LaneMask = TRI->getSubRegIndexLaneMask(SubReg);
533       if ((LaneMask & SR.LaneMask).none())
534         continue;
535     }
536     // We only need to visit each instruction once.
537     MachineInstr *UseMI = MO.getParent();
538     SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot();
539     if (Idx == LastIdx)
540       continue;
541     LastIdx = Idx;
542 
543     LiveQueryResult LRQ = SR.Query(Idx);
544     VNInfo *VNI = LRQ.valueIn();
545     // For Subranges it is possible that only undef values are left in that
546     // part of the subregister, so there is no real liverange at the use
547     if (!VNI)
548       continue;
549 
550     // Special case: An early-clobber tied operand reads and writes the
551     // register one slot early.
552     if (VNInfo *DefVNI = LRQ.valueDefined())
553       Idx = DefVNI->def;
554 
555     WorkList.push_back(std::make_pair(Idx, VNI));
556   }
557 
558   // Create a new live ranges with only minimal live segments per def.
559   LiveRange NewLR;
560   createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end()));
561   extendSegmentsToUses(NewLR, *Indexes, WorkList, SR);
562 
563   // Move the trimmed ranges back.
564   SR.segments.swap(NewLR.segments);
565 
566   // Remove dead PHI value numbers
567   for (VNInfo *VNI : SR.valnos) {
568     if (VNI->isUnused())
569       continue;
570     const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def);
571     assert(Segment != nullptr && "Missing segment for VNI");
572     if (Segment->end != VNI->def.getDeadSlot())
573       continue;
574     if (VNI->isPHIDef()) {
575       // This is a dead PHI. Remove it.
576       LLVM_DEBUG(dbgs() << "Dead PHI at " << VNI->def
577                         << " may separate interval\n");
578       VNI->markUnused();
579       SR.removeSegment(*Segment);
580     }
581   }
582 
583   LLVM_DEBUG(dbgs() << "Shrunk: " << SR << '\n');
584 }
585 
586 void LiveIntervals::extendToIndices(LiveRange &LR,
587                                     ArrayRef<SlotIndex> Indices,
588                                     ArrayRef<SlotIndex> Undefs) {
589   assert(LRCalc && "LRCalc not initialized.");
590   LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
591   for (SlotIndex Idx : Indices)
592     LRCalc->extend(LR, Idx, /*PhysReg=*/0, Undefs);
593 }
594 
595 void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill,
596                                SmallVectorImpl<SlotIndex> *EndPoints) {
597   LiveQueryResult LRQ = LR.Query(Kill);
598   VNInfo *VNI = LRQ.valueOutOrDead();
599   if (!VNI)
600     return;
601 
602   MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
603   SlotIndex MBBEnd = Indexes->getMBBEndIdx(KillMBB);
604 
605   // If VNI isn't live out from KillMBB, the value is trivially pruned.
606   if (LRQ.endPoint() < MBBEnd) {
607     LR.removeSegment(Kill, LRQ.endPoint());
608     if (EndPoints) EndPoints->push_back(LRQ.endPoint());
609     return;
610   }
611 
612   // VNI is live out of KillMBB.
613   LR.removeSegment(Kill, MBBEnd);
614   if (EndPoints) EndPoints->push_back(MBBEnd);
615 
616   // Find all blocks that are reachable from KillMBB without leaving VNI's live
617   // range. It is possible that KillMBB itself is reachable, so start a DFS
618   // from each successor.
619   using VisitedTy = df_iterator_default_set<MachineBasicBlock*,9>;
620   VisitedTy Visited;
621   for (MachineBasicBlock *Succ : KillMBB->successors()) {
622     for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
623          I = df_ext_begin(Succ, Visited), E = df_ext_end(Succ, Visited);
624          I != E;) {
625       MachineBasicBlock *MBB = *I;
626 
627       // Check if VNI is live in to MBB.
628       SlotIndex MBBStart, MBBEnd;
629       std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
630       LiveQueryResult LRQ = LR.Query(MBBStart);
631       if (LRQ.valueIn() != VNI) {
632         // This block isn't part of the VNI segment. Prune the search.
633         I.skipChildren();
634         continue;
635       }
636 
637       // Prune the search if VNI is killed in MBB.
638       if (LRQ.endPoint() < MBBEnd) {
639         LR.removeSegment(MBBStart, LRQ.endPoint());
640         if (EndPoints) EndPoints->push_back(LRQ.endPoint());
641         I.skipChildren();
642         continue;
643       }
644 
645       // VNI is live through MBB.
646       LR.removeSegment(MBBStart, MBBEnd);
647       if (EndPoints) EndPoints->push_back(MBBEnd);
648       ++I;
649     }
650   }
651 }
652 
653 //===----------------------------------------------------------------------===//
654 // Register allocator hooks.
655 //
656 
657 void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
658   // Keep track of regunit ranges.
659   SmallVector<std::pair<const LiveRange*, LiveRange::const_iterator>, 8> RU;
660   // Keep track of subregister ranges.
661   SmallVector<std::pair<const LiveInterval::SubRange*,
662                         LiveRange::const_iterator>, 4> SRs;
663 
664   for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
665     unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
666     if (MRI->reg_nodbg_empty(Reg))
667       continue;
668     const LiveInterval &LI = getInterval(Reg);
669     if (LI.empty())
670       continue;
671 
672     // Find the regunit intervals for the assigned register. They may overlap
673     // the virtual register live range, cancelling any kills.
674     RU.clear();
675     for (MCRegUnitIterator Unit(VRM->getPhys(Reg), TRI); Unit.isValid();
676          ++Unit) {
677       const LiveRange &RURange = getRegUnit(*Unit);
678       if (RURange.empty())
679         continue;
680       RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end)));
681     }
682 
683     if (MRI->subRegLivenessEnabled()) {
684       SRs.clear();
685       for (const LiveInterval::SubRange &SR : LI.subranges()) {
686         SRs.push_back(std::make_pair(&SR, SR.find(LI.begin()->end)));
687       }
688     }
689 
690     // Every instruction that kills Reg corresponds to a segment range end
691     // point.
692     for (LiveInterval::const_iterator RI = LI.begin(), RE = LI.end(); RI != RE;
693          ++RI) {
694       // A block index indicates an MBB edge.
695       if (RI->end.isBlock())
696         continue;
697       MachineInstr *MI = getInstructionFromIndex(RI->end);
698       if (!MI)
699         continue;
700 
701       // Check if any of the regunits are live beyond the end of RI. That could
702       // happen when a physreg is defined as a copy of a virtreg:
703       //
704       //   %eax = COPY %5
705       //   FOO %5             <--- MI, cancel kill because %eax is live.
706       //   BAR killed %eax
707       //
708       // There should be no kill flag on FOO when %5 is rewritten as %eax.
709       for (auto &RUP : RU) {
710         const LiveRange &RURange = *RUP.first;
711         LiveRange::const_iterator &I = RUP.second;
712         if (I == RURange.end())
713           continue;
714         I = RURange.advanceTo(I, RI->end);
715         if (I == RURange.end() || I->start >= RI->end)
716           continue;
717         // I is overlapping RI.
718         goto CancelKill;
719       }
720 
721       if (MRI->subRegLivenessEnabled()) {
722         // When reading a partial undefined value we must not add a kill flag.
723         // The regalloc might have used the undef lane for something else.
724         // Example:
725         //     %1 = ...                  ; R32: %1
726         //     %2:high16 = ...           ; R64: %2
727         //        = read killed %2        ; R64: %2
728         //        = read %1              ; R32: %1
729         // The <kill> flag is correct for %2, but the register allocator may
730         // assign R0L to %1, and R0 to %2 because the low 32bits of R0
731         // are actually never written by %2. After assignment the <kill>
732         // flag at the read instruction is invalid.
733         LaneBitmask DefinedLanesMask;
734         if (!SRs.empty()) {
735           // Compute a mask of lanes that are defined.
736           DefinedLanesMask = LaneBitmask::getNone();
737           for (auto &SRP : SRs) {
738             const LiveInterval::SubRange &SR = *SRP.first;
739             LiveRange::const_iterator &I = SRP.second;
740             if (I == SR.end())
741               continue;
742             I = SR.advanceTo(I, RI->end);
743             if (I == SR.end() || I->start >= RI->end)
744               continue;
745             // I is overlapping RI
746             DefinedLanesMask |= SR.LaneMask;
747           }
748         } else
749           DefinedLanesMask = LaneBitmask::getAll();
750 
751         bool IsFullWrite = false;
752         for (const MachineOperand &MO : MI->operands()) {
753           if (!MO.isReg() || MO.getReg() != Reg)
754             continue;
755           if (MO.isUse()) {
756             // Reading any undefined lanes?
757             LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
758             if ((UseMask & ~DefinedLanesMask).any())
759               goto CancelKill;
760           } else if (MO.getSubReg() == 0) {
761             // Writing to the full register?
762             assert(MO.isDef());
763             IsFullWrite = true;
764           }
765         }
766 
767         // If an instruction writes to a subregister, a new segment starts in
768         // the LiveInterval. But as this is only overriding part of the register
769         // adding kill-flags is not correct here after registers have been
770         // assigned.
771         if (!IsFullWrite) {
772           // Next segment has to be adjacent in the subregister write case.
773           LiveRange::const_iterator N = std::next(RI);
774           if (N != LI.end() && N->start == RI->end)
775             goto CancelKill;
776         }
777       }
778 
779       MI->addRegisterKilled(Reg, nullptr);
780       continue;
781 CancelKill:
782       MI->clearRegisterKills(Reg, nullptr);
783     }
784   }
785 }
786 
787 MachineBasicBlock*
788 LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
789   // A local live range must be fully contained inside the block, meaning it is
790   // defined and killed at instructions, not at block boundaries. It is not
791   // live in or out of any block.
792   //
793   // It is technically possible to have a PHI-defined live range identical to a
794   // single block, but we are going to return false in that case.
795 
796   SlotIndex Start = LI.beginIndex();
797   if (Start.isBlock())
798     return nullptr;
799 
800   SlotIndex Stop = LI.endIndex();
801   if (Stop.isBlock())
802     return nullptr;
803 
804   // getMBBFromIndex doesn't need to search the MBB table when both indexes
805   // belong to proper instructions.
806   MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
807   MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
808   return MBB1 == MBB2 ? MBB1 : nullptr;
809 }
810 
811 bool
812 LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
813   for (const VNInfo *PHI : LI.valnos) {
814     if (PHI->isUnused() || !PHI->isPHIDef())
815       continue;
816     const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
817     // Conservatively return true instead of scanning huge predecessor lists.
818     if (PHIMBB->pred_size() > 100)
819       return true;
820     for (const MachineBasicBlock *Pred : PHIMBB->predecessors())
821       if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(Pred)))
822         return true;
823   }
824   return false;
825 }
826 
827 float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
828                                     const MachineBlockFrequencyInfo *MBFI,
829                                     const MachineInstr &MI) {
830   return getSpillWeight(isDef, isUse, MBFI, MI.getParent());
831 }
832 
833 float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
834                                     const MachineBlockFrequencyInfo *MBFI,
835                                     const MachineBasicBlock *MBB) {
836   BlockFrequency Freq = MBFI->getBlockFreq(MBB);
837   const float Scale = 1.0f / MBFI->getEntryFreq();
838   return (isDef + isUse) * (Freq.getFrequency() * Scale);
839 }
840 
841 LiveRange::Segment
842 LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr &startInst) {
843   LiveInterval& Interval = createEmptyInterval(reg);
844   VNInfo *VN = Interval.getNextValue(
845       SlotIndex(getInstructionIndex(startInst).getRegSlot()),
846       getVNInfoAllocator());
847   LiveRange::Segment S(SlotIndex(getInstructionIndex(startInst).getRegSlot()),
848                        getMBBEndIdx(startInst.getParent()), VN);
849   Interval.addSegment(S);
850 
851   return S;
852 }
853 
854 //===----------------------------------------------------------------------===//
855 //                          Register mask functions
856 //===----------------------------------------------------------------------===//
857 
858 bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
859                                              BitVector &UsableRegs) {
860   if (LI.empty())
861     return false;
862   LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
863 
864   // Use a smaller arrays for local live ranges.
865   ArrayRef<SlotIndex> Slots;
866   ArrayRef<const uint32_t*> Bits;
867   if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
868     Slots = getRegMaskSlotsInBlock(MBB->getNumber());
869     Bits = getRegMaskBitsInBlock(MBB->getNumber());
870   } else {
871     Slots = getRegMaskSlots();
872     Bits = getRegMaskBits();
873   }
874 
875   // We are going to enumerate all the register mask slots contained in LI.
876   // Start with a binary search of RegMaskSlots to find a starting point.
877   ArrayRef<SlotIndex>::iterator SlotI =
878     std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
879   ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
880 
881   // No slots in range, LI begins after the last call.
882   if (SlotI == SlotE)
883     return false;
884 
885   bool Found = false;
886   while (true) {
887     assert(*SlotI >= LiveI->start);
888     // Loop over all slots overlapping this segment.
889     while (*SlotI < LiveI->end) {
890       // *SlotI overlaps LI. Collect mask bits.
891       if (!Found) {
892         // This is the first overlap. Initialize UsableRegs to all ones.
893         UsableRegs.clear();
894         UsableRegs.resize(TRI->getNumRegs(), true);
895         Found = true;
896       }
897       // Remove usable registers clobbered by this mask.
898       UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
899       if (++SlotI == SlotE)
900         return Found;
901     }
902     // *SlotI is beyond the current LI segment.
903     LiveI = LI.advanceTo(LiveI, *SlotI);
904     if (LiveI == LiveE)
905       return Found;
906     // Advance SlotI until it overlaps.
907     while (*SlotI < LiveI->start)
908       if (++SlotI == SlotE)
909         return Found;
910   }
911 }
912 
913 //===----------------------------------------------------------------------===//
914 //                         IntervalUpdate class.
915 //===----------------------------------------------------------------------===//
916 
917 /// Toolkit used by handleMove to trim or extend live intervals.
918 class LiveIntervals::HMEditor {
919 private:
920   LiveIntervals& LIS;
921   const MachineRegisterInfo& MRI;
922   const TargetRegisterInfo& TRI;
923   SlotIndex OldIdx;
924   SlotIndex NewIdx;
925   SmallPtrSet<LiveRange*, 8> Updated;
926   bool UpdateFlags;
927 
928 public:
929   HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
930            const TargetRegisterInfo& TRI,
931            SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags)
932     : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx),
933       UpdateFlags(UpdateFlags) {}
934 
935   // FIXME: UpdateFlags is a workaround that creates live intervals for all
936   // physregs, even those that aren't needed for regalloc, in order to update
937   // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
938   // flags, and postRA passes will use a live register utility instead.
939   LiveRange *getRegUnitLI(unsigned Unit) {
940     if (UpdateFlags && !MRI.isReservedRegUnit(Unit))
941       return &LIS.getRegUnit(Unit);
942     return LIS.getCachedRegUnit(Unit);
943   }
944 
945   /// Update all live ranges touched by MI, assuming a move from OldIdx to
946   /// NewIdx.
947   void updateAllRanges(MachineInstr *MI) {
948     LLVM_DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": "
949                       << *MI);
950     bool hasRegMask = false;
951     for (MachineOperand &MO : MI->operands()) {
952       if (MO.isRegMask())
953         hasRegMask = true;
954       if (!MO.isReg())
955         continue;
956       if (MO.isUse()) {
957         if (!MO.readsReg())
958           continue;
959         // Aggressively clear all kill flags.
960         // They are reinserted by VirtRegRewriter.
961         MO.setIsKill(false);
962       }
963 
964       unsigned Reg = MO.getReg();
965       if (!Reg)
966         continue;
967       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
968         LiveInterval &LI = LIS.getInterval(Reg);
969         if (LI.hasSubRanges()) {
970           unsigned SubReg = MO.getSubReg();
971           LaneBitmask LaneMask = SubReg ? TRI.getSubRegIndexLaneMask(SubReg)
972                                         : MRI.getMaxLaneMaskForVReg(Reg);
973           for (LiveInterval::SubRange &S : LI.subranges()) {
974             if ((S.LaneMask & LaneMask).none())
975               continue;
976             updateRange(S, Reg, S.LaneMask);
977           }
978         }
979         updateRange(LI, Reg, LaneBitmask::getNone());
980         continue;
981       }
982 
983       // For physregs, only update the regunits that actually have a
984       // precomputed live range.
985       for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
986         if (LiveRange *LR = getRegUnitLI(*Units))
987           updateRange(*LR, *Units, LaneBitmask::getNone());
988     }
989     if (hasRegMask)
990       updateRegMaskSlots();
991   }
992 
993 private:
994   /// Update a single live range, assuming an instruction has been moved from
995   /// OldIdx to NewIdx.
996   void updateRange(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
997     if (!Updated.insert(&LR).second)
998       return;
999     LLVM_DEBUG({
1000       dbgs() << "     ";
1001       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1002         dbgs() << printReg(Reg);
1003         if (LaneMask.any())
1004           dbgs() << " L" << PrintLaneMask(LaneMask);
1005       } else {
1006         dbgs() << printRegUnit(Reg, &TRI);
1007       }
1008       dbgs() << ":\t" << LR << '\n';
1009     });
1010     if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
1011       handleMoveDown(LR);
1012     else
1013       handleMoveUp(LR, Reg, LaneMask);
1014     LLVM_DEBUG(dbgs() << "        -->\t" << LR << '\n');
1015     LR.verify();
1016   }
1017 
1018   /// Update LR to reflect an instruction has been moved downwards from OldIdx
1019   /// to NewIdx (OldIdx < NewIdx).
1020   void handleMoveDown(LiveRange &LR) {
1021     LiveRange::iterator E = LR.end();
1022     // Segment going into OldIdx.
1023     LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
1024 
1025     // No value live before or after OldIdx? Nothing to do.
1026     if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
1027       return;
1028 
1029     LiveRange::iterator OldIdxOut;
1030     // Do we have a value live-in to OldIdx?
1031     if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
1032       // If the live-in value already extends to NewIdx, there is nothing to do.
1033       if (SlotIndex::isEarlierEqualInstr(NewIdx, OldIdxIn->end))
1034         return;
1035       // Aggressively remove all kill flags from the old kill point.
1036       // Kill flags shouldn't be used while live intervals exist, they will be
1037       // reinserted by VirtRegRewriter.
1038       if (MachineInstr *KillMI = LIS.getInstructionFromIndex(OldIdxIn->end))
1039         for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO)
1040           if (MO->isReg() && MO->isUse())
1041             MO->setIsKill(false);
1042 
1043       // Is there a def before NewIdx which is not OldIdx?
1044       LiveRange::iterator Next = std::next(OldIdxIn);
1045       if (Next != E && !SlotIndex::isSameInstr(OldIdx, Next->start) &&
1046           SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
1047         // If we are here then OldIdx was just a use but not a def. We only have
1048         // to ensure liveness extends to NewIdx.
1049         LiveRange::iterator NewIdxIn =
1050           LR.advanceTo(Next, NewIdx.getBaseIndex());
1051         // Extend the segment before NewIdx if necessary.
1052         if (NewIdxIn == E ||
1053             !SlotIndex::isEarlierInstr(NewIdxIn->start, NewIdx)) {
1054           LiveRange::iterator Prev = std::prev(NewIdxIn);
1055           Prev->end = NewIdx.getRegSlot();
1056         }
1057         // Extend OldIdxIn.
1058         OldIdxIn->end = Next->start;
1059         return;
1060       }
1061 
1062       // Adjust OldIdxIn->end to reach NewIdx. This may temporarily make LR
1063       // invalid by overlapping ranges.
1064       bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
1065       OldIdxIn->end = NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber());
1066       // If this was not a kill, then there was no def and we're done.
1067       if (!isKill)
1068         return;
1069 
1070       // Did we have a Def at OldIdx?
1071       OldIdxOut = Next;
1072       if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
1073         return;
1074     } else {
1075       OldIdxOut = OldIdxIn;
1076     }
1077 
1078     // If we are here then there is a Definition at OldIdx. OldIdxOut points
1079     // to the segment starting there.
1080     assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
1081            "No def?");
1082     VNInfo *OldIdxVNI = OldIdxOut->valno;
1083     assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
1084 
1085     // If the defined value extends beyond NewIdx, just move the beginning
1086     // of the segment to NewIdx.
1087     SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
1088     if (SlotIndex::isEarlierInstr(NewIdxDef, OldIdxOut->end)) {
1089       OldIdxVNI->def = NewIdxDef;
1090       OldIdxOut->start = OldIdxVNI->def;
1091       return;
1092     }
1093 
1094     // If we are here then we have a Definition at OldIdx which ends before
1095     // NewIdx.
1096 
1097     // Is there an existing Def at NewIdx?
1098     LiveRange::iterator AfterNewIdx
1099       = LR.advanceTo(OldIdxOut, NewIdx.getRegSlot());
1100     bool OldIdxDefIsDead = OldIdxOut->end.isDead();
1101     if (!OldIdxDefIsDead &&
1102         SlotIndex::isEarlierInstr(OldIdxOut->end, NewIdxDef)) {
1103       // OldIdx is not a dead def, and NewIdxDef is inside a new interval.
1104       VNInfo *DefVNI;
1105       if (OldIdxOut != LR.begin() &&
1106           !SlotIndex::isEarlierInstr(std::prev(OldIdxOut)->end,
1107                                      OldIdxOut->start)) {
1108         // There is no gap between OldIdxOut and its predecessor anymore,
1109         // merge them.
1110         LiveRange::iterator IPrev = std::prev(OldIdxOut);
1111         DefVNI = OldIdxVNI;
1112         IPrev->end = OldIdxOut->end;
1113       } else {
1114         // The value is live in to OldIdx
1115         LiveRange::iterator INext = std::next(OldIdxOut);
1116         assert(INext != E && "Must have following segment");
1117         // We merge OldIdxOut and its successor. As we're dealing with subreg
1118         // reordering, there is always a successor to OldIdxOut in the same BB
1119         // We don't need INext->valno anymore and will reuse for the new segment
1120         // we create later.
1121         DefVNI = OldIdxVNI;
1122         INext->start = OldIdxOut->end;
1123         INext->valno->def = INext->start;
1124       }
1125       // If NewIdx is behind the last segment, extend that and append a new one.
1126       if (AfterNewIdx == E) {
1127         // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
1128         // one position.
1129         //    |-  ?/OldIdxOut -| |- X0 -| ... |- Xn -| end
1130         // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS -| end
1131         std::copy(std::next(OldIdxOut), E, OldIdxOut);
1132         // The last segment is undefined now, reuse it for a dead def.
1133         LiveRange::iterator NewSegment = std::prev(E);
1134         *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
1135                                          DefVNI);
1136         DefVNI->def = NewIdxDef;
1137 
1138         LiveRange::iterator Prev = std::prev(NewSegment);
1139         Prev->end = NewIdxDef;
1140       } else {
1141         // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
1142         // one position.
1143         //    |-  ?/OldIdxOut -| |- X0 -| ... |- Xn/AfterNewIdx -| |- Next -|
1144         // => |- X0/OldIdxOut -| ... |- Xn -| |- Xn/AfterNewIdx -| |- Next -|
1145         std::copy(std::next(OldIdxOut), std::next(AfterNewIdx), OldIdxOut);
1146         LiveRange::iterator Prev = std::prev(AfterNewIdx);
1147         // We have two cases:
1148         if (SlotIndex::isEarlierInstr(Prev->start, NewIdxDef)) {
1149           // Case 1: NewIdx is inside a liverange. Split this liverange at
1150           // NewIdxDef into the segment "Prev" followed by "NewSegment".
1151           LiveRange::iterator NewSegment = AfterNewIdx;
1152           *NewSegment = LiveRange::Segment(NewIdxDef, Prev->end, Prev->valno);
1153           Prev->valno->def = NewIdxDef;
1154 
1155           *Prev = LiveRange::Segment(Prev->start, NewIdxDef, DefVNI);
1156           DefVNI->def = Prev->start;
1157         } else {
1158           // Case 2: NewIdx is in a lifetime hole. Keep AfterNewIdx as is and
1159           // turn Prev into a segment from NewIdx to AfterNewIdx->start.
1160           *Prev = LiveRange::Segment(NewIdxDef, AfterNewIdx->start, DefVNI);
1161           DefVNI->def = NewIdxDef;
1162           assert(DefVNI != AfterNewIdx->valno);
1163         }
1164       }
1165       return;
1166     }
1167 
1168     if (AfterNewIdx != E &&
1169         SlotIndex::isSameInstr(AfterNewIdx->start, NewIdxDef)) {
1170       // There is an existing def at NewIdx. The def at OldIdx is coalesced into
1171       // that value.
1172       assert(AfterNewIdx->valno != OldIdxVNI && "Multiple defs of value?");
1173       LR.removeValNo(OldIdxVNI);
1174     } else {
1175       // There was no existing def at NewIdx. We need to create a dead def
1176       // at NewIdx. Shift segments over the old OldIdxOut segment, this frees
1177       // a new segment at the place where we want to construct the dead def.
1178       //    |- OldIdxOut -| |- X0 -| ... |- Xn -| |- AfterNewIdx -|
1179       // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS. -| |- AfterNewIdx -|
1180       assert(AfterNewIdx != OldIdxOut && "Inconsistent iterators");
1181       std::copy(std::next(OldIdxOut), AfterNewIdx, OldIdxOut);
1182       // We can reuse OldIdxVNI now.
1183       LiveRange::iterator NewSegment = std::prev(AfterNewIdx);
1184       VNInfo *NewSegmentVNI = OldIdxVNI;
1185       NewSegmentVNI->def = NewIdxDef;
1186       *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
1187                                        NewSegmentVNI);
1188     }
1189   }
1190 
1191   /// Update LR to reflect an instruction has been moved upwards from OldIdx
1192   /// to NewIdx (NewIdx < OldIdx).
1193   void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
1194     LiveRange::iterator E = LR.end();
1195     // Segment going into OldIdx.
1196     LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
1197 
1198     // No value live before or after OldIdx? Nothing to do.
1199     if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
1200       return;
1201 
1202     LiveRange::iterator OldIdxOut;
1203     // Do we have a value live-in to OldIdx?
1204     if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
1205       // If the live-in value isn't killed here, then we have no Def at
1206       // OldIdx, moreover the value must be live at NewIdx so there is nothing
1207       // to do.
1208       bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
1209       if (!isKill)
1210         return;
1211 
1212       // At this point we have to move OldIdxIn->end back to the nearest
1213       // previous use or (dead-)def but no further than NewIdx.
1214       SlotIndex DefBeforeOldIdx
1215         = std::max(OldIdxIn->start.getDeadSlot(),
1216                    NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber()));
1217       OldIdxIn->end = findLastUseBefore(DefBeforeOldIdx, Reg, LaneMask);
1218 
1219       // Did we have a Def at OldIdx? If not we are done now.
1220       OldIdxOut = std::next(OldIdxIn);
1221       if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
1222         return;
1223     } else {
1224       OldIdxOut = OldIdxIn;
1225       OldIdxIn = OldIdxOut != LR.begin() ? std::prev(OldIdxOut) : E;
1226     }
1227 
1228     // If we are here then there is a Definition at OldIdx. OldIdxOut points
1229     // to the segment starting there.
1230     assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
1231            "No def?");
1232     VNInfo *OldIdxVNI = OldIdxOut->valno;
1233     assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
1234     bool OldIdxDefIsDead = OldIdxOut->end.isDead();
1235 
1236     // Is there an existing def at NewIdx?
1237     SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
1238     LiveRange::iterator NewIdxOut = LR.find(NewIdx.getRegSlot());
1239     if (SlotIndex::isSameInstr(NewIdxOut->start, NewIdx)) {
1240       assert(NewIdxOut->valno != OldIdxVNI &&
1241              "Same value defined more than once?");
1242       // If OldIdx was a dead def remove it.
1243       if (!OldIdxDefIsDead) {
1244         // Remove segment starting at NewIdx and move begin of OldIdxOut to
1245         // NewIdx so it can take its place.
1246         OldIdxVNI->def = NewIdxDef;
1247         OldIdxOut->start = NewIdxDef;
1248         LR.removeValNo(NewIdxOut->valno);
1249       } else {
1250         // Simply remove the dead def at OldIdx.
1251         LR.removeValNo(OldIdxVNI);
1252       }
1253     } else {
1254       // Previously nothing was live after NewIdx, so all we have to do now is
1255       // move the begin of OldIdxOut to NewIdx.
1256       if (!OldIdxDefIsDead) {
1257         // Do we have any intermediate Defs between OldIdx and NewIdx?
1258         if (OldIdxIn != E &&
1259             SlotIndex::isEarlierInstr(NewIdxDef, OldIdxIn->start)) {
1260           // OldIdx is not a dead def and NewIdx is before predecessor start.
1261           LiveRange::iterator NewIdxIn = NewIdxOut;
1262           assert(NewIdxIn == LR.find(NewIdx.getBaseIndex()));
1263           const SlotIndex SplitPos = NewIdxDef;
1264           OldIdxVNI = OldIdxIn->valno;
1265 
1266           // Merge the OldIdxIn and OldIdxOut segments into OldIdxOut.
1267           OldIdxOut->valno->def = OldIdxIn->start;
1268           *OldIdxOut = LiveRange::Segment(OldIdxIn->start, OldIdxOut->end,
1269                                           OldIdxOut->valno);
1270           // OldIdxIn and OldIdxVNI are now undef and can be overridden.
1271           // We Slide [NewIdxIn, OldIdxIn) down one position.
1272           //    |- X0/NewIdxIn -| ... |- Xn-1 -||- Xn/OldIdxIn -||- OldIdxOut -|
1273           // => |- undef/NexIdxIn -| |- X0 -| ... |- Xn-1 -| |- Xn/OldIdxOut -|
1274           std::copy_backward(NewIdxIn, OldIdxIn, OldIdxOut);
1275           // NewIdxIn is now considered undef so we can reuse it for the moved
1276           // value.
1277           LiveRange::iterator NewSegment = NewIdxIn;
1278           LiveRange::iterator Next = std::next(NewSegment);
1279           if (SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
1280             // There is no gap between NewSegment and its predecessor.
1281             *NewSegment = LiveRange::Segment(Next->start, SplitPos,
1282                                              Next->valno);
1283             *Next = LiveRange::Segment(SplitPos, Next->end, OldIdxVNI);
1284             Next->valno->def = SplitPos;
1285           } else {
1286             // There is a gap between NewSegment and its predecessor
1287             // Value becomes live in.
1288             *NewSegment = LiveRange::Segment(SplitPos, Next->start, OldIdxVNI);
1289             NewSegment->valno->def = SplitPos;
1290           }
1291         } else {
1292           // Leave the end point of a live def.
1293           OldIdxOut->start = NewIdxDef;
1294           OldIdxVNI->def = NewIdxDef;
1295           if (OldIdxIn != E && SlotIndex::isEarlierInstr(NewIdx, OldIdxIn->end))
1296             OldIdxIn->end = NewIdx.getRegSlot();
1297         }
1298       } else if (OldIdxIn != E
1299           && SlotIndex::isEarlierInstr(NewIdxOut->start, NewIdx)
1300           && SlotIndex::isEarlierInstr(NewIdx, NewIdxOut->end)) {
1301         // OldIdxVNI is a dead def that has been moved into the middle of
1302         // another value in LR. That can happen when LR is a whole register,
1303         // but the dead def is a write to a subreg that is dead at NewIdx.
1304         // The dead def may have been moved across other values
1305         // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
1306         // down one position.
1307         //    |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
1308         // => |- X0/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
1309         std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
1310         // Modify the segment at NewIdxOut and the following segment to meet at
1311         // the point of the dead def, with the following segment getting
1312         // OldIdxVNI as its value number.
1313         *NewIdxOut = LiveRange::Segment(
1314             NewIdxOut->start, NewIdxDef.getRegSlot(), NewIdxOut->valno);
1315         *(NewIdxOut + 1) = LiveRange::Segment(
1316             NewIdxDef.getRegSlot(), (NewIdxOut + 1)->end, OldIdxVNI);
1317         OldIdxVNI->def = NewIdxDef;
1318         // Modify subsequent segments to be defined by the moved def OldIdxVNI.
1319         for (auto Idx = NewIdxOut + 2; Idx <= OldIdxOut; ++Idx)
1320           Idx->valno = OldIdxVNI;
1321         // Aggressively remove all dead flags from the former dead definition.
1322         // Kill/dead flags shouldn't be used while live intervals exist; they
1323         // will be reinserted by VirtRegRewriter.
1324         if (MachineInstr *KillMI = LIS.getInstructionFromIndex(NewIdx))
1325           for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO)
1326             if (MO->isReg() && !MO->isUse())
1327               MO->setIsDead(false);
1328       } else {
1329         // OldIdxVNI is a dead def. It may have been moved across other values
1330         // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
1331         // down one position.
1332         //    |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
1333         // => |- undef/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
1334         std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
1335         // OldIdxVNI can be reused now to build a new dead def segment.
1336         LiveRange::iterator NewSegment = NewIdxOut;
1337         VNInfo *NewSegmentVNI = OldIdxVNI;
1338         *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
1339                                          NewSegmentVNI);
1340         NewSegmentVNI->def = NewIdxDef;
1341       }
1342     }
1343   }
1344 
1345   void updateRegMaskSlots() {
1346     SmallVectorImpl<SlotIndex>::iterator RI =
1347       std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
1348                        OldIdx);
1349     assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
1350            "No RegMask at OldIdx.");
1351     *RI = NewIdx.getRegSlot();
1352     assert((RI == LIS.RegMaskSlots.begin() ||
1353             SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) &&
1354            "Cannot move regmask instruction above another call");
1355     assert((std::next(RI) == LIS.RegMaskSlots.end() ||
1356             SlotIndex::isEarlierInstr(*RI, *std::next(RI))) &&
1357            "Cannot move regmask instruction below another call");
1358   }
1359 
1360   // Return the last use of reg between NewIdx and OldIdx.
1361   SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg,
1362                               LaneBitmask LaneMask) {
1363     if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1364       SlotIndex LastUse = Before;
1365       for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) {
1366         if (MO.isUndef())
1367           continue;
1368         unsigned SubReg = MO.getSubReg();
1369         if (SubReg != 0 && LaneMask.any()
1370             && (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask).none())
1371           continue;
1372 
1373         const MachineInstr &MI = *MO.getParent();
1374         SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
1375         if (InstSlot > LastUse && InstSlot < OldIdx)
1376           LastUse = InstSlot.getRegSlot();
1377       }
1378       return LastUse;
1379     }
1380 
1381     // This is a regunit interval, so scanning the use list could be very
1382     // expensive. Scan upwards from OldIdx instead.
1383     assert(Before < OldIdx && "Expected upwards move");
1384     SlotIndexes *Indexes = LIS.getSlotIndexes();
1385     MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Before);
1386 
1387     // OldIdx may not correspond to an instruction any longer, so set MII to
1388     // point to the next instruction after OldIdx, or MBB->end().
1389     MachineBasicBlock::iterator MII = MBB->end();
1390     if (MachineInstr *MI = Indexes->getInstructionFromIndex(
1391                            Indexes->getNextNonNullIndex(OldIdx)))
1392       if (MI->getParent() == MBB)
1393         MII = MI;
1394 
1395     MachineBasicBlock::iterator Begin = MBB->begin();
1396     while (MII != Begin) {
1397       if ((--MII)->isDebugInstr())
1398         continue;
1399       SlotIndex Idx = Indexes->getInstructionIndex(*MII);
1400 
1401       // Stop searching when Before is reached.
1402       if (!SlotIndex::isEarlierInstr(Before, Idx))
1403         return Before;
1404 
1405       // Check if MII uses Reg.
1406       for (MIBundleOperands MO(*MII); MO.isValid(); ++MO)
1407         if (MO->isReg() && !MO->isUndef() &&
1408             TargetRegisterInfo::isPhysicalRegister(MO->getReg()) &&
1409             TRI.hasRegUnit(MO->getReg(), Reg))
1410           return Idx.getRegSlot();
1411     }
1412     // Didn't reach Before. It must be the first instruction in the block.
1413     return Before;
1414   }
1415 };
1416 
1417 void LiveIntervals::handleMove(MachineInstr &MI, bool UpdateFlags) {
1418   assert(!MI.isBundled() && "Can't handle bundled instructions yet.");
1419   SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1420   Indexes->removeMachineInstrFromMaps(MI);
1421   SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
1422   assert(getMBBStartIdx(MI.getParent()) <= OldIndex &&
1423          OldIndex < getMBBEndIdx(MI.getParent()) &&
1424          "Cannot handle moves across basic block boundaries.");
1425 
1426   HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
1427   HME.updateAllRanges(&MI);
1428 }
1429 
1430 void LiveIntervals::handleMoveIntoBundle(MachineInstr &MI,
1431                                          MachineInstr &BundleStart,
1432                                          bool UpdateFlags) {
1433   SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1434   SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
1435   HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
1436   HME.updateAllRanges(&MI);
1437 }
1438 
1439 void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
1440                                         const MachineBasicBlock::iterator End,
1441                                         const SlotIndex endIdx,
1442                                         LiveRange &LR, const unsigned Reg,
1443                                         LaneBitmask LaneMask) {
1444   LiveInterval::iterator LII = LR.find(endIdx);
1445   SlotIndex lastUseIdx;
1446   if (LII == LR.begin()) {
1447     // This happens when the function is called for a subregister that only
1448     // occurs _after_ the range that is to be repaired.
1449     return;
1450   }
1451   if (LII != LR.end() && LII->start < endIdx)
1452     lastUseIdx = LII->end;
1453   else
1454     --LII;
1455 
1456   for (MachineBasicBlock::iterator I = End; I != Begin;) {
1457     --I;
1458     MachineInstr &MI = *I;
1459     if (MI.isDebugInstr())
1460       continue;
1461 
1462     SlotIndex instrIdx = getInstructionIndex(MI);
1463     bool isStartValid = getInstructionFromIndex(LII->start);
1464     bool isEndValid = getInstructionFromIndex(LII->end);
1465 
1466     // FIXME: This doesn't currently handle early-clobber or multiple removed
1467     // defs inside of the region to repair.
1468     for (MachineInstr::mop_iterator OI = MI.operands_begin(),
1469                                     OE = MI.operands_end();
1470          OI != OE; ++OI) {
1471       const MachineOperand &MO = *OI;
1472       if (!MO.isReg() || MO.getReg() != Reg)
1473         continue;
1474 
1475       unsigned SubReg = MO.getSubReg();
1476       LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg);
1477       if ((Mask & LaneMask).none())
1478         continue;
1479 
1480       if (MO.isDef()) {
1481         if (!isStartValid) {
1482           if (LII->end.isDead()) {
1483             SlotIndex prevStart;
1484             if (LII != LR.begin())
1485               prevStart = std::prev(LII)->start;
1486 
1487             // FIXME: This could be more efficient if there was a
1488             // removeSegment method that returned an iterator.
1489             LR.removeSegment(*LII, true);
1490             if (prevStart.isValid())
1491               LII = LR.find(prevStart);
1492             else
1493               LII = LR.begin();
1494           } else {
1495             LII->start = instrIdx.getRegSlot();
1496             LII->valno->def = instrIdx.getRegSlot();
1497             if (MO.getSubReg() && !MO.isUndef())
1498               lastUseIdx = instrIdx.getRegSlot();
1499             else
1500               lastUseIdx = SlotIndex();
1501             continue;
1502           }
1503         }
1504 
1505         if (!lastUseIdx.isValid()) {
1506           VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
1507           LiveRange::Segment S(instrIdx.getRegSlot(),
1508                                instrIdx.getDeadSlot(), VNI);
1509           LII = LR.addSegment(S);
1510         } else if (LII->start != instrIdx.getRegSlot()) {
1511           VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
1512           LiveRange::Segment S(instrIdx.getRegSlot(), lastUseIdx, VNI);
1513           LII = LR.addSegment(S);
1514         }
1515 
1516         if (MO.getSubReg() && !MO.isUndef())
1517           lastUseIdx = instrIdx.getRegSlot();
1518         else
1519           lastUseIdx = SlotIndex();
1520       } else if (MO.isUse()) {
1521         // FIXME: This should probably be handled outside of this branch,
1522         // either as part of the def case (for defs inside of the region) or
1523         // after the loop over the region.
1524         if (!isEndValid && !LII->end.isBlock())
1525           LII->end = instrIdx.getRegSlot();
1526         if (!lastUseIdx.isValid())
1527           lastUseIdx = instrIdx.getRegSlot();
1528       }
1529     }
1530   }
1531 }
1532 
1533 void
1534 LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
1535                                       MachineBasicBlock::iterator Begin,
1536                                       MachineBasicBlock::iterator End,
1537                                       ArrayRef<unsigned> OrigRegs) {
1538   // Find anchor points, which are at the beginning/end of blocks or at
1539   // instructions that already have indexes.
1540   while (Begin != MBB->begin() && !Indexes->hasIndex(*Begin))
1541     --Begin;
1542   while (End != MBB->end() && !Indexes->hasIndex(*End))
1543     ++End;
1544 
1545   SlotIndex endIdx;
1546   if (End == MBB->end())
1547     endIdx = getMBBEndIdx(MBB).getPrevSlot();
1548   else
1549     endIdx = getInstructionIndex(*End);
1550 
1551   Indexes->repairIndexesInRange(MBB, Begin, End);
1552 
1553   for (MachineBasicBlock::iterator I = End; I != Begin;) {
1554     --I;
1555     MachineInstr &MI = *I;
1556     if (MI.isDebugInstr())
1557       continue;
1558     for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(),
1559                                           MOE = MI.operands_end();
1560          MOI != MOE; ++MOI) {
1561       if (MOI->isReg() &&
1562           TargetRegisterInfo::isVirtualRegister(MOI->getReg()) &&
1563           !hasInterval(MOI->getReg())) {
1564         createAndComputeVirtRegInterval(MOI->getReg());
1565       }
1566     }
1567   }
1568 
1569   for (unsigned Reg : OrigRegs) {
1570     if (!TargetRegisterInfo::isVirtualRegister(Reg))
1571       continue;
1572 
1573     LiveInterval &LI = getInterval(Reg);
1574     // FIXME: Should we support undefs that gain defs?
1575     if (!LI.hasAtLeastOneValue())
1576       continue;
1577 
1578     for (LiveInterval::SubRange &S : LI.subranges())
1579       repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask);
1580 
1581     repairOldRegInRange(Begin, End, endIdx, LI, Reg);
1582   }
1583 }
1584 
1585 void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) {
1586   for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
1587     if (LiveRange *LR = getCachedRegUnit(*Unit))
1588       if (VNInfo *VNI = LR->getVNInfoAt(Pos))
1589         LR->removeValNo(VNI);
1590   }
1591 }
1592 
1593 void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) {
1594   // LI may not have the main range computed yet, but its subranges may
1595   // be present.
1596   VNInfo *VNI = LI.getVNInfoAt(Pos);
1597   if (VNI != nullptr) {
1598     assert(VNI->def.getBaseIndex() == Pos.getBaseIndex());
1599     LI.removeValNo(VNI);
1600   }
1601 
1602   // Also remove the value defined in subranges.
1603   for (LiveInterval::SubRange &S : LI.subranges()) {
1604     if (VNInfo *SVNI = S.getVNInfoAt(Pos))
1605       if (SVNI->def.getBaseIndex() == Pos.getBaseIndex())
1606         S.removeValNo(SVNI);
1607   }
1608   LI.removeEmptySubRanges();
1609 }
1610 
1611 void LiveIntervals::splitSeparateComponents(LiveInterval &LI,
1612     SmallVectorImpl<LiveInterval*> &SplitLIs) {
1613   ConnectedVNInfoEqClasses ConEQ(*this);
1614   unsigned NumComp = ConEQ.Classify(LI);
1615   if (NumComp <= 1)
1616     return;
1617   LLVM_DEBUG(dbgs() << "  Split " << NumComp << " components: " << LI << '\n');
1618   unsigned Reg = LI.reg;
1619   const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
1620   for (unsigned I = 1; I < NumComp; ++I) {
1621     unsigned NewVReg = MRI->createVirtualRegister(RegClass);
1622     LiveInterval &NewLI = createEmptyInterval(NewVReg);
1623     SplitLIs.push_back(&NewLI);
1624   }
1625   ConEQ.Distribute(LI, SplitLIs.data(), *MRI);
1626 }
1627 
1628 void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) {
1629   assert(LRCalc && "LRCalc not initialized.");
1630   LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
1631   LRCalc->constructMainRangeFromSubranges(LI);
1632 }
1633