1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/PriorityQueue.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/RegisterClassInfo.h"
24 #include "llvm/CodeGen/ScheduleDFS.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/GraphWriter.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 
33 using namespace llvm;
34 
35 #define DEBUG_TYPE "misched"
36 
37 namespace llvm {
38 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
39                            cl::desc("Force top-down list scheduling"));
40 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
41                             cl::desc("Force bottom-up list scheduling"));
42 cl::opt<bool>
43 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
44                        cl::desc("Print critical path length to stdout"));
45 }
46 
47 #ifndef NDEBUG
48 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
49   cl::desc("Pop up a window to show MISched dags after they are processed"));
50 
51 /// In some situations a few uninteresting nodes depend on nearly all other
52 /// nodes in the graph, provide a cutoff to hide them.
53 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
54   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
55 
56 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
57   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
58 
59 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
60   cl::desc("Only schedule this function"));
61 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
62   cl::desc("Only schedule this MBB#"));
63 #else
64 static bool ViewMISchedDAGs = false;
65 #endif // NDEBUG
66 
67 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
68   cl::desc("Enable register pressure scheduling."), cl::init(true));
69 
70 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
71   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
72 
73 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
74                                         cl::desc("Enable memop clustering."),
75                                         cl::init(true));
76 
77 // Experimental heuristics
78 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
79   cl::desc("Enable scheduling for macro fusion."), cl::init(true));
80 
81 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
82   cl::desc("Verify machine instrs before and after machine scheduling"));
83 
84 // DAG subtrees must have at least this many nodes.
85 static const unsigned MinSubtreeSize = 8;
86 
87 // Pin the vtables to this file.
88 void MachineSchedStrategy::anchor() {}
89 void ScheduleDAGMutation::anchor() {}
90 
91 //===----------------------------------------------------------------------===//
92 // Machine Instruction Scheduling Pass and Registry
93 //===----------------------------------------------------------------------===//
94 
95 MachineSchedContext::MachineSchedContext():
96     MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
97   RegClassInfo = new RegisterClassInfo();
98 }
99 
100 MachineSchedContext::~MachineSchedContext() {
101   delete RegClassInfo;
102 }
103 
104 namespace {
105 /// Base class for a machine scheduler class that can run at any point.
106 class MachineSchedulerBase : public MachineSchedContext,
107                              public MachineFunctionPass {
108 public:
109   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
110 
111   void print(raw_ostream &O, const Module* = nullptr) const override;
112 
113 protected:
114   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
115 };
116 
117 /// MachineScheduler runs after coalescing and before register allocation.
118 class MachineScheduler : public MachineSchedulerBase {
119 public:
120   MachineScheduler();
121 
122   void getAnalysisUsage(AnalysisUsage &AU) const override;
123 
124   bool runOnMachineFunction(MachineFunction&) override;
125 
126   static char ID; // Class identification, replacement for typeinfo
127 
128 protected:
129   ScheduleDAGInstrs *createMachineScheduler();
130 };
131 
132 /// PostMachineScheduler runs after shortly before code emission.
133 class PostMachineScheduler : public MachineSchedulerBase {
134 public:
135   PostMachineScheduler();
136 
137   void getAnalysisUsage(AnalysisUsage &AU) const override;
138 
139   bool runOnMachineFunction(MachineFunction&) override;
140 
141   static char ID; // Class identification, replacement for typeinfo
142 
143 protected:
144   ScheduleDAGInstrs *createPostMachineScheduler();
145 };
146 } // namespace
147 
148 char MachineScheduler::ID = 0;
149 
150 char &llvm::MachineSchedulerID = MachineScheduler::ID;
151 
152 INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
153                       "Machine Instruction Scheduler", false, false)
154 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
155 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
156 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
157 INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
158                     "Machine Instruction Scheduler", false, false)
159 
160 MachineScheduler::MachineScheduler()
161 : MachineSchedulerBase(ID) {
162   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
163 }
164 
165 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
166   AU.setPreservesCFG();
167   AU.addRequiredID(MachineDominatorsID);
168   AU.addRequired<MachineLoopInfo>();
169   AU.addRequired<AAResultsWrapperPass>();
170   AU.addRequired<TargetPassConfig>();
171   AU.addRequired<SlotIndexes>();
172   AU.addPreserved<SlotIndexes>();
173   AU.addRequired<LiveIntervals>();
174   AU.addPreserved<LiveIntervals>();
175   MachineFunctionPass::getAnalysisUsage(AU);
176 }
177 
178 char PostMachineScheduler::ID = 0;
179 
180 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
181 
182 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
183                 "PostRA Machine Instruction Scheduler", false, false)
184 
185 PostMachineScheduler::PostMachineScheduler()
186 : MachineSchedulerBase(ID) {
187   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
188 }
189 
190 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
191   AU.setPreservesCFG();
192   AU.addRequiredID(MachineDominatorsID);
193   AU.addRequired<MachineLoopInfo>();
194   AU.addRequired<TargetPassConfig>();
195   MachineFunctionPass::getAnalysisUsage(AU);
196 }
197 
198 MachinePassRegistry MachineSchedRegistry::Registry;
199 
200 /// A dummy default scheduler factory indicates whether the scheduler
201 /// is overridden on the command line.
202 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
203   return nullptr;
204 }
205 
206 /// MachineSchedOpt allows command line selection of the scheduler.
207 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
208                RegisterPassParser<MachineSchedRegistry> >
209 MachineSchedOpt("misched",
210                 cl::init(&useDefaultMachineSched), cl::Hidden,
211                 cl::desc("Machine instruction scheduler to use"));
212 
213 static MachineSchedRegistry
214 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
215                      useDefaultMachineSched);
216 
217 static cl::opt<bool> EnableMachineSched(
218     "enable-misched",
219     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
220     cl::Hidden);
221 
222 static cl::opt<bool> EnablePostRAMachineSched(
223     "enable-post-misched",
224     cl::desc("Enable the post-ra machine instruction scheduling pass."),
225     cl::init(true), cl::Hidden);
226 
227 /// Forward declare the standard machine scheduler. This will be used as the
228 /// default scheduler if the target does not set a default.
229 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
230 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
231 
232 /// Decrement this iterator until reaching the top or a non-debug instr.
233 static MachineBasicBlock::const_iterator
234 priorNonDebug(MachineBasicBlock::const_iterator I,
235               MachineBasicBlock::const_iterator Beg) {
236   assert(I != Beg && "reached the top of the region, cannot decrement");
237   while (--I != Beg) {
238     if (!I->isDebugValue())
239       break;
240   }
241   return I;
242 }
243 
244 /// Non-const version.
245 static MachineBasicBlock::iterator
246 priorNonDebug(MachineBasicBlock::iterator I,
247               MachineBasicBlock::const_iterator Beg) {
248   return const_cast<MachineInstr*>(
249     &*priorNonDebug(MachineBasicBlock::const_iterator(I), Beg));
250 }
251 
252 /// If this iterator is a debug value, increment until reaching the End or a
253 /// non-debug instruction.
254 static MachineBasicBlock::const_iterator
255 nextIfDebug(MachineBasicBlock::const_iterator I,
256             MachineBasicBlock::const_iterator End) {
257   for(; I != End; ++I) {
258     if (!I->isDebugValue())
259       break;
260   }
261   return I;
262 }
263 
264 /// Non-const version.
265 static MachineBasicBlock::iterator
266 nextIfDebug(MachineBasicBlock::iterator I,
267             MachineBasicBlock::const_iterator End) {
268   // Cast the return value to nonconst MachineInstr, then cast to an
269   // instr_iterator, which does not check for null, finally return a
270   // bundle_iterator.
271   return MachineBasicBlock::instr_iterator(
272     const_cast<MachineInstr*>(
273       &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
274 }
275 
276 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
277 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
278   // Select the scheduler, or set the default.
279   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
280   if (Ctor != useDefaultMachineSched)
281     return Ctor(this);
282 
283   // Get the default scheduler set by the target for this function.
284   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
285   if (Scheduler)
286     return Scheduler;
287 
288   // Default to GenericScheduler.
289   return createGenericSchedLive(this);
290 }
291 
292 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
293 /// the caller. We don't have a command line option to override the postRA
294 /// scheduler. The Target must configure it.
295 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
296   // Get the postRA scheduler set by the target for this function.
297   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
298   if (Scheduler)
299     return Scheduler;
300 
301   // Default to GenericScheduler.
302   return createGenericSchedPostRA(this);
303 }
304 
305 /// Top-level MachineScheduler pass driver.
306 ///
307 /// Visit blocks in function order. Divide each block into scheduling regions
308 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
309 /// consistent with the DAG builder, which traverses the interior of the
310 /// scheduling regions bottom-up.
311 ///
312 /// This design avoids exposing scheduling boundaries to the DAG builder,
313 /// simplifying the DAG builder's support for "special" target instructions.
314 /// At the same time the design allows target schedulers to operate across
315 /// scheduling boundaries, for example to bundle the boudary instructions
316 /// without reordering them. This creates complexity, because the target
317 /// scheduler must update the RegionBegin and RegionEnd positions cached by
318 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
319 /// design would be to split blocks at scheduling boundaries, but LLVM has a
320 /// general bias against block splitting purely for implementation simplicity.
321 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
322   if (skipOptnoneFunction(*mf.getFunction()))
323     return false;
324 
325   if (EnableMachineSched.getNumOccurrences()) {
326     if (!EnableMachineSched)
327       return false;
328   } else if (!mf.getSubtarget().enableMachineScheduler())
329     return false;
330 
331   DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
332 
333   // Initialize the context of the pass.
334   MF = &mf;
335   MLI = &getAnalysis<MachineLoopInfo>();
336   MDT = &getAnalysis<MachineDominatorTree>();
337   PassConfig = &getAnalysis<TargetPassConfig>();
338   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
339 
340   LIS = &getAnalysis<LiveIntervals>();
341 
342   if (VerifyScheduling) {
343     DEBUG(LIS->dump());
344     MF->verify(this, "Before machine scheduling.");
345   }
346   RegClassInfo->runOnMachineFunction(*MF);
347 
348   // Instantiate the selected scheduler for this target, function, and
349   // optimization level.
350   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
351   scheduleRegions(*Scheduler, false);
352 
353   DEBUG(LIS->dump());
354   if (VerifyScheduling)
355     MF->verify(this, "After machine scheduling.");
356   return true;
357 }
358 
359 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
360   if (skipOptnoneFunction(*mf.getFunction()))
361     return false;
362 
363   if (EnablePostRAMachineSched.getNumOccurrences()) {
364     if (!EnablePostRAMachineSched)
365       return false;
366   } else if (!mf.getSubtarget().enablePostRAScheduler()) {
367     DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
368     return false;
369   }
370   DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
371 
372   // Initialize the context of the pass.
373   MF = &mf;
374   PassConfig = &getAnalysis<TargetPassConfig>();
375 
376   if (VerifyScheduling)
377     MF->verify(this, "Before post machine scheduling.");
378 
379   // Instantiate the selected scheduler for this target, function, and
380   // optimization level.
381   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
382   scheduleRegions(*Scheduler, true);
383 
384   if (VerifyScheduling)
385     MF->verify(this, "After post machine scheduling.");
386   return true;
387 }
388 
389 /// Return true of the given instruction should not be included in a scheduling
390 /// region.
391 ///
392 /// MachineScheduler does not currently support scheduling across calls. To
393 /// handle calls, the DAG builder needs to be modified to create register
394 /// anti/output dependencies on the registers clobbered by the call's regmask
395 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
396 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
397 /// the boundary, but there would be no benefit to postRA scheduling across
398 /// calls this late anyway.
399 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
400                             MachineBasicBlock *MBB,
401                             MachineFunction *MF,
402                             const TargetInstrInfo *TII) {
403   return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
404 }
405 
406 /// Main driver for both MachineScheduler and PostMachineScheduler.
407 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
408                                            bool FixKillFlags) {
409   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
410 
411   // Visit all machine basic blocks.
412   //
413   // TODO: Visit blocks in global postorder or postorder within the bottom-up
414   // loop tree. Then we can optionally compute global RegPressure.
415   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
416        MBB != MBBEnd; ++MBB) {
417 
418     Scheduler.startBlock(&*MBB);
419 
420 #ifndef NDEBUG
421     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
422       continue;
423     if (SchedOnlyBlock.getNumOccurrences()
424         && (int)SchedOnlyBlock != MBB->getNumber())
425       continue;
426 #endif
427 
428     // Break the block into scheduling regions [I, RegionEnd), and schedule each
429     // region as soon as it is discovered. RegionEnd points the scheduling
430     // boundary at the bottom of the region. The DAG does not include RegionEnd,
431     // but the region does (i.e. the next RegionEnd is above the previous
432     // RegionBegin). If the current block has no terminator then RegionEnd ==
433     // MBB->end() for the bottom region.
434     //
435     // The Scheduler may insert instructions during either schedule() or
436     // exitRegion(), even for empty regions. So the local iterators 'I' and
437     // 'RegionEnd' are invalid across these calls.
438     //
439     // MBB::size() uses instr_iterator to count. Here we need a bundle to count
440     // as a single instruction.
441     unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end());
442     for(MachineBasicBlock::iterator RegionEnd = MBB->end();
443         RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
444 
445       // Avoid decrementing RegionEnd for blocks with no terminator.
446       if (RegionEnd != MBB->end() ||
447           isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
448         --RegionEnd;
449         // Count the boundary instruction.
450         --RemainingInstrs;
451       }
452 
453       // The next region starts above the previous region. Look backward in the
454       // instruction stream until we find the nearest boundary.
455       unsigned NumRegionInstrs = 0;
456       MachineBasicBlock::iterator I = RegionEnd;
457       for(;I != MBB->begin(); --I, --RemainingInstrs) {
458         if (isSchedBoundary(&*std::prev(I), &*MBB, MF, TII))
459           break;
460         if (!I->isDebugValue())
461           ++NumRegionInstrs;
462       }
463       // Notify the scheduler of the region, even if we may skip scheduling
464       // it. Perhaps it still needs to be bundled.
465       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
466 
467       // Skip empty scheduling regions (0 or 1 schedulable instructions).
468       if (I == RegionEnd || I == std::prev(RegionEnd)) {
469         // Close the current region. Bundle the terminator if needed.
470         // This invalidates 'RegionEnd' and 'I'.
471         Scheduler.exitRegion();
472         continue;
473       }
474       DEBUG(dbgs() << "********** MI Scheduling **********\n");
475       DEBUG(dbgs() << MF->getName()
476             << ":BB#" << MBB->getNumber() << " " << MBB->getName()
477             << "\n  From: " << *I << "    To: ";
478             if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
479             else dbgs() << "End";
480             dbgs() << " RegionInstrs: " << NumRegionInstrs
481             << " Remaining: " << RemainingInstrs << "\n");
482       if (DumpCriticalPathLength) {
483         errs() << MF->getName();
484         errs() << ":BB# " << MBB->getNumber();
485         errs() << " " << MBB->getName() << " \n";
486       }
487 
488       // Schedule a region: possibly reorder instructions.
489       // This invalidates 'RegionEnd' and 'I'.
490       Scheduler.schedule();
491 
492       // Close the current region.
493       Scheduler.exitRegion();
494 
495       // Scheduling has invalidated the current iterator 'I'. Ask the
496       // scheduler for the top of it's scheduled region.
497       RegionEnd = Scheduler.begin();
498     }
499     assert(RemainingInstrs == 0 && "Instruction count mismatch!");
500     Scheduler.finishBlock();
501     // FIXME: Ideally, no further passes should rely on kill flags. However,
502     // thumb2 size reduction is currently an exception, so the PostMIScheduler
503     // needs to do this.
504     if (FixKillFlags)
505         Scheduler.fixupKills(&*MBB);
506   }
507   Scheduler.finalizeSchedule();
508 }
509 
510 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
511   // unimplemented
512 }
513 
514 LLVM_DUMP_METHOD
515 void ReadyQueue::dump() {
516   dbgs() << "Queue " << Name << ": ";
517   for (unsigned i = 0, e = Queue.size(); i < e; ++i)
518     dbgs() << Queue[i]->NodeNum << " ";
519   dbgs() << "\n";
520 }
521 
522 //===----------------------------------------------------------------------===//
523 // ScheduleDAGMI - Basic machine instruction scheduling. This is
524 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
525 // virtual registers.
526 // ===----------------------------------------------------------------------===/
527 
528 // Provide a vtable anchor.
529 ScheduleDAGMI::~ScheduleDAGMI() {
530 }
531 
532 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
533   return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
534 }
535 
536 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
537   if (SuccSU != &ExitSU) {
538     // Do not use WillCreateCycle, it assumes SD scheduling.
539     // If Pred is reachable from Succ, then the edge creates a cycle.
540     if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
541       return false;
542     Topo.AddPred(SuccSU, PredDep.getSUnit());
543   }
544   SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
545   // Return true regardless of whether a new edge needed to be inserted.
546   return true;
547 }
548 
549 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
550 /// NumPredsLeft reaches zero, release the successor node.
551 ///
552 /// FIXME: Adjust SuccSU height based on MinLatency.
553 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
554   SUnit *SuccSU = SuccEdge->getSUnit();
555 
556   if (SuccEdge->isWeak()) {
557     --SuccSU->WeakPredsLeft;
558     if (SuccEdge->isCluster())
559       NextClusterSucc = SuccSU;
560     return;
561   }
562 #ifndef NDEBUG
563   if (SuccSU->NumPredsLeft == 0) {
564     dbgs() << "*** Scheduling failed! ***\n";
565     SuccSU->dump(this);
566     dbgs() << " has been released too many times!\n";
567     llvm_unreachable(nullptr);
568   }
569 #endif
570   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
571   // CurrCycle may have advanced since then.
572   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
573     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
574 
575   --SuccSU->NumPredsLeft;
576   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
577     SchedImpl->releaseTopNode(SuccSU);
578 }
579 
580 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
581 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
582   for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
583        I != E; ++I) {
584     releaseSucc(SU, &*I);
585   }
586 }
587 
588 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
589 /// NumSuccsLeft reaches zero, release the predecessor node.
590 ///
591 /// FIXME: Adjust PredSU height based on MinLatency.
592 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
593   SUnit *PredSU = PredEdge->getSUnit();
594 
595   if (PredEdge->isWeak()) {
596     --PredSU->WeakSuccsLeft;
597     if (PredEdge->isCluster())
598       NextClusterPred = PredSU;
599     return;
600   }
601 #ifndef NDEBUG
602   if (PredSU->NumSuccsLeft == 0) {
603     dbgs() << "*** Scheduling failed! ***\n";
604     PredSU->dump(this);
605     dbgs() << " has been released too many times!\n";
606     llvm_unreachable(nullptr);
607   }
608 #endif
609   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
610   // CurrCycle may have advanced since then.
611   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
612     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
613 
614   --PredSU->NumSuccsLeft;
615   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
616     SchedImpl->releaseBottomNode(PredSU);
617 }
618 
619 /// releasePredecessors - Call releasePred on each of SU's predecessors.
620 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
621   for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
622        I != E; ++I) {
623     releasePred(SU, &*I);
624   }
625 }
626 
627 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
628 /// crossing a scheduling boundary. [begin, end) includes all instructions in
629 /// the region, including the boundary itself and single-instruction regions
630 /// that don't get scheduled.
631 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
632                                      MachineBasicBlock::iterator begin,
633                                      MachineBasicBlock::iterator end,
634                                      unsigned regioninstrs)
635 {
636   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
637 
638   SchedImpl->initPolicy(begin, end, regioninstrs);
639 }
640 
641 /// This is normally called from the main scheduler loop but may also be invoked
642 /// by the scheduling strategy to perform additional code motion.
643 void ScheduleDAGMI::moveInstruction(
644   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
645   // Advance RegionBegin if the first instruction moves down.
646   if (&*RegionBegin == MI)
647     ++RegionBegin;
648 
649   // Update the instruction stream.
650   BB->splice(InsertPos, BB, MI);
651 
652   // Update LiveIntervals
653   if (LIS)
654     LIS->handleMove(*MI, /*UpdateFlags=*/true);
655 
656   // Recede RegionBegin if an instruction moves above the first.
657   if (RegionBegin == InsertPos)
658     RegionBegin = MI;
659 }
660 
661 bool ScheduleDAGMI::checkSchedLimit() {
662 #ifndef NDEBUG
663   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
664     CurrentTop = CurrentBottom;
665     return false;
666   }
667   ++NumInstrsScheduled;
668 #endif
669   return true;
670 }
671 
672 /// Per-region scheduling driver, called back from
673 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
674 /// does not consider liveness or register pressure. It is useful for PostRA
675 /// scheduling and potentially other custom schedulers.
676 void ScheduleDAGMI::schedule() {
677   DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
678   DEBUG(SchedImpl->dumpPolicy());
679 
680   // Build the DAG.
681   buildSchedGraph(AA);
682 
683   Topo.InitDAGTopologicalSorting();
684 
685   postprocessDAG();
686 
687   SmallVector<SUnit*, 8> TopRoots, BotRoots;
688   findRootsAndBiasEdges(TopRoots, BotRoots);
689 
690   // Initialize the strategy before modifying the DAG.
691   // This may initialize a DFSResult to be used for queue priority.
692   SchedImpl->initialize(this);
693 
694   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
695           SUnits[su].dumpAll(this));
696   if (ViewMISchedDAGs) viewGraph();
697 
698   // Initialize ready queues now that the DAG and priority data are finalized.
699   initQueues(TopRoots, BotRoots);
700 
701   bool IsTopNode = false;
702   while (true) {
703     DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
704     SUnit *SU = SchedImpl->pickNode(IsTopNode);
705     if (!SU) break;
706 
707     assert(!SU->isScheduled && "Node already scheduled");
708     if (!checkSchedLimit())
709       break;
710 
711     MachineInstr *MI = SU->getInstr();
712     if (IsTopNode) {
713       assert(SU->isTopReady() && "node still has unscheduled dependencies");
714       if (&*CurrentTop == MI)
715         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
716       else
717         moveInstruction(MI, CurrentTop);
718     }
719     else {
720       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
721       MachineBasicBlock::iterator priorII =
722         priorNonDebug(CurrentBottom, CurrentTop);
723       if (&*priorII == MI)
724         CurrentBottom = priorII;
725       else {
726         if (&*CurrentTop == MI)
727           CurrentTop = nextIfDebug(++CurrentTop, priorII);
728         moveInstruction(MI, CurrentBottom);
729         CurrentBottom = MI;
730       }
731     }
732     // Notify the scheduling strategy before updating the DAG.
733     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
734     // runs, it can then use the accurate ReadyCycle time to determine whether
735     // newly released nodes can move to the readyQ.
736     SchedImpl->schedNode(SU, IsTopNode);
737 
738     updateQueues(SU, IsTopNode);
739   }
740   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
741 
742   placeDebugValues();
743 
744   DEBUG({
745       unsigned BBNum = begin()->getParent()->getNumber();
746       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
747       dumpSchedule();
748       dbgs() << '\n';
749     });
750 }
751 
752 /// Apply each ScheduleDAGMutation step in order.
753 void ScheduleDAGMI::postprocessDAG() {
754   for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
755     Mutations[i]->apply(this);
756   }
757 }
758 
759 void ScheduleDAGMI::
760 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
761                       SmallVectorImpl<SUnit*> &BotRoots) {
762   for (std::vector<SUnit>::iterator
763          I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
764     SUnit *SU = &(*I);
765     assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
766 
767     // Order predecessors so DFSResult follows the critical path.
768     SU->biasCriticalPath();
769 
770     // A SUnit is ready to top schedule if it has no predecessors.
771     if (!I->NumPredsLeft)
772       TopRoots.push_back(SU);
773     // A SUnit is ready to bottom schedule if it has no successors.
774     if (!I->NumSuccsLeft)
775       BotRoots.push_back(SU);
776   }
777   ExitSU.biasCriticalPath();
778 }
779 
780 /// Identify DAG roots and setup scheduler queues.
781 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
782                                ArrayRef<SUnit*> BotRoots) {
783   NextClusterSucc = nullptr;
784   NextClusterPred = nullptr;
785 
786   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
787   //
788   // Nodes with unreleased weak edges can still be roots.
789   // Release top roots in forward order.
790   for (SmallVectorImpl<SUnit*>::const_iterator
791          I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
792     SchedImpl->releaseTopNode(*I);
793   }
794   // Release bottom roots in reverse order so the higher priority nodes appear
795   // first. This is more natural and slightly more efficient.
796   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
797          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
798     SchedImpl->releaseBottomNode(*I);
799   }
800 
801   releaseSuccessors(&EntrySU);
802   releasePredecessors(&ExitSU);
803 
804   SchedImpl->registerRoots();
805 
806   // Advance past initial DebugValues.
807   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
808   CurrentBottom = RegionEnd;
809 }
810 
811 /// Update scheduler queues after scheduling an instruction.
812 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
813   // Release dependent instructions for scheduling.
814   if (IsTopNode)
815     releaseSuccessors(SU);
816   else
817     releasePredecessors(SU);
818 
819   SU->isScheduled = true;
820 }
821 
822 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
823 void ScheduleDAGMI::placeDebugValues() {
824   // If first instruction was a DBG_VALUE then put it back.
825   if (FirstDbgValue) {
826     BB->splice(RegionBegin, BB, FirstDbgValue);
827     RegionBegin = FirstDbgValue;
828   }
829 
830   for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
831          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
832     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
833     MachineInstr *DbgValue = P.first;
834     MachineBasicBlock::iterator OrigPrevMI = P.second;
835     if (&*RegionBegin == DbgValue)
836       ++RegionBegin;
837     BB->splice(++OrigPrevMI, BB, DbgValue);
838     if (OrigPrevMI == std::prev(RegionEnd))
839       RegionEnd = DbgValue;
840   }
841   DbgValues.clear();
842   FirstDbgValue = nullptr;
843 }
844 
845 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
846 void ScheduleDAGMI::dumpSchedule() const {
847   for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
848     if (SUnit *SU = getSUnit(&(*MI)))
849       SU->dump(this);
850     else
851       dbgs() << "Missing SUnit\n";
852   }
853 }
854 #endif
855 
856 //===----------------------------------------------------------------------===//
857 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
858 // preservation.
859 //===----------------------------------------------------------------------===//
860 
861 ScheduleDAGMILive::~ScheduleDAGMILive() {
862   delete DFSResult;
863 }
864 
865 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
866 /// crossing a scheduling boundary. [begin, end) includes all instructions in
867 /// the region, including the boundary itself and single-instruction regions
868 /// that don't get scheduled.
869 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
870                                 MachineBasicBlock::iterator begin,
871                                 MachineBasicBlock::iterator end,
872                                 unsigned regioninstrs)
873 {
874   // ScheduleDAGMI initializes SchedImpl's per-region policy.
875   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
876 
877   // For convenience remember the end of the liveness region.
878   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
879 
880   SUPressureDiffs.clear();
881 
882   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
883   ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
884 
885   if (ShouldTrackLaneMasks) {
886     if (!ShouldTrackPressure)
887       report_fatal_error("ShouldTrackLaneMasks requires ShouldTrackPressure");
888     // Dead subregister defs have no users and therefore no dependencies,
889     // moving them around may cause liveintervals to degrade into multiple
890     // components. Change independent components to have their own vreg to avoid
891     // this.
892     if (!DisconnectedComponentsRenamed)
893       LIS->renameDisconnectedComponents();
894   }
895 }
896 
897 // Setup the register pressure trackers for the top scheduled top and bottom
898 // scheduled regions.
899 void ScheduleDAGMILive::initRegPressure() {
900   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
901                     ShouldTrackLaneMasks, false);
902   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
903                     ShouldTrackLaneMasks, false);
904 
905   // Close the RPTracker to finalize live ins.
906   RPTracker.closeRegion();
907 
908   DEBUG(RPTracker.dump());
909 
910   // Initialize the live ins and live outs.
911   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
912   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
913 
914   // Close one end of the tracker so we can call
915   // getMaxUpward/DownwardPressureDelta before advancing across any
916   // instructions. This converts currently live regs into live ins/outs.
917   TopRPTracker.closeTop();
918   BotRPTracker.closeBottom();
919 
920   BotRPTracker.initLiveThru(RPTracker);
921   if (!BotRPTracker.getLiveThru().empty()) {
922     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
923     DEBUG(dbgs() << "Live Thru: ";
924           dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
925   };
926 
927   // For each live out vreg reduce the pressure change associated with other
928   // uses of the same vreg below the live-out reaching def.
929   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
930 
931   // Account for liveness generated by the region boundary.
932   if (LiveRegionEnd != RegionEnd) {
933     SmallVector<RegisterMaskPair, 8> LiveUses;
934     BotRPTracker.recede(&LiveUses);
935     updatePressureDiffs(LiveUses);
936   }
937 
938   DEBUG(
939     dbgs() << "Top Pressure:\n";
940     dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
941     dbgs() << "Bottom Pressure:\n";
942     dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
943   );
944 
945   assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
946 
947   // Cache the list of excess pressure sets in this region. This will also track
948   // the max pressure in the scheduled code for these sets.
949   RegionCriticalPSets.clear();
950   const std::vector<unsigned> &RegionPressure =
951     RPTracker.getPressure().MaxSetPressure;
952   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
953     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
954     if (RegionPressure[i] > Limit) {
955       DEBUG(dbgs() << TRI->getRegPressureSetName(i)
956             << " Limit " << Limit
957             << " Actual " << RegionPressure[i] << "\n");
958       RegionCriticalPSets.push_back(PressureChange(i));
959     }
960   }
961   DEBUG(dbgs() << "Excess PSets: ";
962         for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
963           dbgs() << TRI->getRegPressureSetName(
964             RegionCriticalPSets[i].getPSet()) << " ";
965         dbgs() << "\n");
966 }
967 
968 void ScheduleDAGMILive::
969 updateScheduledPressure(const SUnit *SU,
970                         const std::vector<unsigned> &NewMaxPressure) {
971   const PressureDiff &PDiff = getPressureDiff(SU);
972   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
973   for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
974        I != E; ++I) {
975     if (!I->isValid())
976       break;
977     unsigned ID = I->getPSet();
978     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
979       ++CritIdx;
980     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
981       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
982           && NewMaxPressure[ID] <= INT16_MAX)
983         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
984     }
985     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
986     if (NewMaxPressure[ID] >= Limit - 2) {
987       DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
988             << NewMaxPressure[ID]
989             << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
990             << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
991     }
992   }
993 }
994 
995 /// Update the PressureDiff array for liveness after scheduling this
996 /// instruction.
997 void ScheduleDAGMILive::updatePressureDiffs(
998     ArrayRef<RegisterMaskPair> LiveUses) {
999   for (const RegisterMaskPair &P : LiveUses) {
1000     unsigned Reg = P.RegUnit;
1001     /// FIXME: Currently assuming single-use physregs.
1002     if (!TRI->isVirtualRegister(Reg))
1003       continue;
1004 
1005     if (ShouldTrackLaneMasks) {
1006       // If the register has just become live then other uses won't change
1007       // this fact anymore => decrement pressure.
1008       // If the register has just become dead then other uses make it come
1009       // back to life => increment pressure.
1010       bool Decrement = P.LaneMask != 0;
1011 
1012       for (const VReg2SUnit &V2SU
1013            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1014         SUnit &SU = *V2SU.SU;
1015         if (SU.isScheduled || &SU == &ExitSU)
1016           continue;
1017 
1018         PressureDiff &PDiff = getPressureDiff(&SU);
1019         PDiff.addPressureChange(Reg, Decrement, &MRI);
1020         DEBUG(
1021           dbgs() << "  UpdateRegP: SU(" << SU.NodeNum << ") "
1022                  << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1023                  << ' ' << *SU.getInstr();
1024           dbgs() << "              to ";
1025           PDiff.dump(*TRI);
1026         );
1027       }
1028     } else {
1029       assert(P.LaneMask != 0);
1030       DEBUG(dbgs() << "  LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1031       // This may be called before CurrentBottom has been initialized. However,
1032       // BotRPTracker must have a valid position. We want the value live into the
1033       // instruction or live out of the block, so ask for the previous
1034       // instruction's live-out.
1035       const LiveInterval &LI = LIS->getInterval(Reg);
1036       VNInfo *VNI;
1037       MachineBasicBlock::const_iterator I =
1038         nextIfDebug(BotRPTracker.getPos(), BB->end());
1039       if (I == BB->end())
1040         VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1041       else {
1042         LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1043         VNI = LRQ.valueIn();
1044       }
1045       // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1046       assert(VNI && "No live value at use.");
1047       for (const VReg2SUnit &V2SU
1048            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1049         SUnit *SU = V2SU.SU;
1050         // If this use comes before the reaching def, it cannot be a last use,
1051         // so decrease its pressure change.
1052         if (!SU->isScheduled && SU != &ExitSU) {
1053           LiveQueryResult LRQ =
1054               LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1055           if (LRQ.valueIn() == VNI) {
1056             PressureDiff &PDiff = getPressureDiff(SU);
1057             PDiff.addPressureChange(Reg, true, &MRI);
1058             DEBUG(
1059               dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1060                      << *SU->getInstr();
1061               dbgs() << "              to ";
1062               PDiff.dump(*TRI);
1063             );
1064           }
1065         }
1066       }
1067     }
1068   }
1069 }
1070 
1071 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1072 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1073 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1074 ///
1075 /// This is a skeletal driver, with all the functionality pushed into helpers,
1076 /// so that it can be easily extended by experimental schedulers. Generally,
1077 /// implementing MachineSchedStrategy should be sufficient to implement a new
1078 /// scheduling algorithm. However, if a scheduler further subclasses
1079 /// ScheduleDAGMILive then it will want to override this virtual method in order
1080 /// to update any specialized state.
1081 void ScheduleDAGMILive::schedule() {
1082   DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1083   DEBUG(SchedImpl->dumpPolicy());
1084   buildDAGWithRegPressure();
1085 
1086   Topo.InitDAGTopologicalSorting();
1087 
1088   postprocessDAG();
1089 
1090   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1091   findRootsAndBiasEdges(TopRoots, BotRoots);
1092 
1093   // Initialize the strategy before modifying the DAG.
1094   // This may initialize a DFSResult to be used for queue priority.
1095   SchedImpl->initialize(this);
1096 
1097   DEBUG(
1098     for (const SUnit &SU : SUnits) {
1099       SU.dumpAll(this);
1100       if (ShouldTrackPressure) {
1101         dbgs() << "  Pressure Diff      : ";
1102         getPressureDiff(&SU).dump(*TRI);
1103       }
1104       dbgs() << '\n';
1105     }
1106   );
1107   if (ViewMISchedDAGs) viewGraph();
1108 
1109   // Initialize ready queues now that the DAG and priority data are finalized.
1110   initQueues(TopRoots, BotRoots);
1111 
1112   if (ShouldTrackPressure) {
1113     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1114     TopRPTracker.setPos(CurrentTop);
1115   }
1116 
1117   bool IsTopNode = false;
1118   while (true) {
1119     DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1120     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1121     if (!SU) break;
1122 
1123     assert(!SU->isScheduled && "Node already scheduled");
1124     if (!checkSchedLimit())
1125       break;
1126 
1127     scheduleMI(SU, IsTopNode);
1128 
1129     if (DFSResult) {
1130       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1131       if (!ScheduledTrees.test(SubtreeID)) {
1132         ScheduledTrees.set(SubtreeID);
1133         DFSResult->scheduleTree(SubtreeID);
1134         SchedImpl->scheduleTree(SubtreeID);
1135       }
1136     }
1137 
1138     // Notify the scheduling strategy after updating the DAG.
1139     SchedImpl->schedNode(SU, IsTopNode);
1140 
1141     updateQueues(SU, IsTopNode);
1142   }
1143   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1144 
1145   placeDebugValues();
1146 
1147   DEBUG({
1148       unsigned BBNum = begin()->getParent()->getNumber();
1149       dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1150       dumpSchedule();
1151       dbgs() << '\n';
1152     });
1153 }
1154 
1155 /// Build the DAG and setup three register pressure trackers.
1156 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1157   if (!ShouldTrackPressure) {
1158     RPTracker.reset();
1159     RegionCriticalPSets.clear();
1160     buildSchedGraph(AA);
1161     return;
1162   }
1163 
1164   // Initialize the register pressure tracker used by buildSchedGraph.
1165   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1166                  ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1167 
1168   // Account for liveness generate by the region boundary.
1169   if (LiveRegionEnd != RegionEnd)
1170     RPTracker.recede();
1171 
1172   // Build the DAG, and compute current register pressure.
1173   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1174 
1175   // Initialize top/bottom trackers after computing region pressure.
1176   initRegPressure();
1177 }
1178 
1179 void ScheduleDAGMILive::computeDFSResult() {
1180   if (!DFSResult)
1181     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1182   DFSResult->clear();
1183   ScheduledTrees.clear();
1184   DFSResult->resize(SUnits.size());
1185   DFSResult->compute(SUnits);
1186   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1187 }
1188 
1189 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1190 /// only provides the critical path for single block loops. To handle loops that
1191 /// span blocks, we could use the vreg path latencies provided by
1192 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1193 /// available for use in the scheduler.
1194 ///
1195 /// The cyclic path estimation identifies a def-use pair that crosses the back
1196 /// edge and considers the depth and height of the nodes. For example, consider
1197 /// the following instruction sequence where each instruction has unit latency
1198 /// and defines an epomymous virtual register:
1199 ///
1200 /// a->b(a,c)->c(b)->d(c)->exit
1201 ///
1202 /// The cyclic critical path is a two cycles: b->c->b
1203 /// The acyclic critical path is four cycles: a->b->c->d->exit
1204 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1205 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1206 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1207 /// LiveInDepth = depth(b) = len(a->b) = 1
1208 ///
1209 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1210 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1211 /// CyclicCriticalPath = min(2, 2) = 2
1212 ///
1213 /// This could be relevant to PostRA scheduling, but is currently implemented
1214 /// assuming LiveIntervals.
1215 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1216   // This only applies to single block loop.
1217   if (!BB->isSuccessor(BB))
1218     return 0;
1219 
1220   unsigned MaxCyclicLatency = 0;
1221   // Visit each live out vreg def to find def/use pairs that cross iterations.
1222   for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1223     unsigned Reg = P.RegUnit;
1224     if (!TRI->isVirtualRegister(Reg))
1225         continue;
1226     const LiveInterval &LI = LIS->getInterval(Reg);
1227     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1228     if (!DefVNI)
1229       continue;
1230 
1231     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1232     const SUnit *DefSU = getSUnit(DefMI);
1233     if (!DefSU)
1234       continue;
1235 
1236     unsigned LiveOutHeight = DefSU->getHeight();
1237     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1238     // Visit all local users of the vreg def.
1239     for (const VReg2SUnit &V2SU
1240          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1241       SUnit *SU = V2SU.SU;
1242       if (SU == &ExitSU)
1243         continue;
1244 
1245       // Only consider uses of the phi.
1246       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1247       if (!LRQ.valueIn()->isPHIDef())
1248         continue;
1249 
1250       // Assume that a path spanning two iterations is a cycle, which could
1251       // overestimate in strange cases. This allows cyclic latency to be
1252       // estimated as the minimum slack of the vreg's depth or height.
1253       unsigned CyclicLatency = 0;
1254       if (LiveOutDepth > SU->getDepth())
1255         CyclicLatency = LiveOutDepth - SU->getDepth();
1256 
1257       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1258       if (LiveInHeight > LiveOutHeight) {
1259         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1260           CyclicLatency = LiveInHeight - LiveOutHeight;
1261       }
1262       else
1263         CyclicLatency = 0;
1264 
1265       DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1266             << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1267       if (CyclicLatency > MaxCyclicLatency)
1268         MaxCyclicLatency = CyclicLatency;
1269     }
1270   }
1271   DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1272   return MaxCyclicLatency;
1273 }
1274 
1275 /// Move an instruction and update register pressure.
1276 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1277   // Move the instruction to its new location in the instruction stream.
1278   MachineInstr *MI = SU->getInstr();
1279 
1280   if (IsTopNode) {
1281     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1282     if (&*CurrentTop == MI)
1283       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1284     else {
1285       moveInstruction(MI, CurrentTop);
1286       TopRPTracker.setPos(MI);
1287     }
1288 
1289     if (ShouldTrackPressure) {
1290       // Update top scheduled pressure.
1291       RegisterOperands RegOpers;
1292       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1293       if (ShouldTrackLaneMasks) {
1294         // Adjust liveness and add missing dead+read-undef flags.
1295         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1296         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1297       } else {
1298         // Adjust for missing dead-def flags.
1299         RegOpers.detectDeadDefs(*MI, *LIS);
1300       }
1301 
1302       TopRPTracker.advance(RegOpers);
1303       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1304       DEBUG(
1305         dbgs() << "Top Pressure:\n";
1306         dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1307       );
1308 
1309       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1310     }
1311   }
1312   else {
1313     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1314     MachineBasicBlock::iterator priorII =
1315       priorNonDebug(CurrentBottom, CurrentTop);
1316     if (&*priorII == MI)
1317       CurrentBottom = priorII;
1318     else {
1319       if (&*CurrentTop == MI) {
1320         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1321         TopRPTracker.setPos(CurrentTop);
1322       }
1323       moveInstruction(MI, CurrentBottom);
1324       CurrentBottom = MI;
1325     }
1326     if (ShouldTrackPressure) {
1327       RegisterOperands RegOpers;
1328       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1329       if (ShouldTrackLaneMasks) {
1330         // Adjust liveness and add missing dead+read-undef flags.
1331         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1332         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1333       } else {
1334         // Adjust for missing dead-def flags.
1335         RegOpers.detectDeadDefs(*MI, *LIS);
1336       }
1337 
1338       BotRPTracker.recedeSkipDebugValues();
1339       SmallVector<RegisterMaskPair, 8> LiveUses;
1340       BotRPTracker.recede(RegOpers, &LiveUses);
1341       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1342       DEBUG(
1343         dbgs() << "Bottom Pressure:\n";
1344         dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1345       );
1346 
1347       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1348       updatePressureDiffs(LiveUses);
1349     }
1350   }
1351 }
1352 
1353 //===----------------------------------------------------------------------===//
1354 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1355 //===----------------------------------------------------------------------===//
1356 
1357 namespace {
1358 /// \brief Post-process the DAG to create cluster edges between neighboring
1359 /// loads or between neighboring stores.
1360 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1361   struct MemOpInfo {
1362     SUnit *SU;
1363     unsigned BaseReg;
1364     int64_t Offset;
1365     MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
1366         : SU(su), BaseReg(reg), Offset(ofs) {}
1367 
1368     bool operator<(const MemOpInfo&RHS) const {
1369       return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1370     }
1371   };
1372 
1373   const TargetInstrInfo *TII;
1374   const TargetRegisterInfo *TRI;
1375   bool IsLoad;
1376 
1377 public:
1378   BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1379                            const TargetRegisterInfo *tri, bool IsLoad)
1380       : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1381 
1382   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1383 
1384 protected:
1385   void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1386 };
1387 
1388 class StoreClusterMutation : public BaseMemOpClusterMutation {
1389 public:
1390   StoreClusterMutation(const TargetInstrInfo *tii,
1391                        const TargetRegisterInfo *tri)
1392       : BaseMemOpClusterMutation(tii, tri, false) {}
1393 };
1394 
1395 class LoadClusterMutation : public BaseMemOpClusterMutation {
1396 public:
1397   LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1398       : BaseMemOpClusterMutation(tii, tri, true) {}
1399 };
1400 } // anonymous
1401 
1402 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1403     ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1404   SmallVector<MemOpInfo, 32> MemOpRecords;
1405   for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) {
1406     SUnit *SU = MemOps[Idx];
1407     unsigned BaseReg;
1408     int64_t Offset;
1409     if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
1410       MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
1411   }
1412   if (MemOpRecords.size() < 2)
1413     return;
1414 
1415   std::sort(MemOpRecords.begin(), MemOpRecords.end());
1416   unsigned ClusterLength = 1;
1417   for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1418     if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
1419       ClusterLength = 1;
1420       continue;
1421     }
1422 
1423     SUnit *SUa = MemOpRecords[Idx].SU;
1424     SUnit *SUb = MemOpRecords[Idx+1].SU;
1425     if (TII->shouldClusterMemOps(SUa->getInstr(), SUb->getInstr(), ClusterLength)
1426         && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1427       DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1428             << SUb->NodeNum << ")\n");
1429       // Copy successor edges from SUa to SUb. Interleaving computation
1430       // dependent on SUa can prevent load combining due to register reuse.
1431       // Predecessor edges do not need to be copied from SUb to SUa since nearby
1432       // loads should have effectively the same inputs.
1433       for (SUnit::const_succ_iterator
1434              SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1435         if (SI->getSUnit() == SUb)
1436           continue;
1437         DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1438         DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1439       }
1440       ++ClusterLength;
1441     }
1442     else
1443       ClusterLength = 1;
1444   }
1445 }
1446 
1447 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
1448 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1449 
1450   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1451 
1452   // Map DAG NodeNum to store chain ID.
1453   DenseMap<unsigned, unsigned> StoreChainIDs;
1454   // Map each store chain to a set of dependent MemOps.
1455   SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1456   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1457     SUnit *SU = &DAG->SUnits[Idx];
1458     if ((IsLoad && !SU->getInstr()->mayLoad()) ||
1459         (!IsLoad && !SU->getInstr()->mayStore()))
1460       continue;
1461 
1462     unsigned ChainPredID = DAG->SUnits.size();
1463     for (SUnit::const_pred_iterator
1464            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1465       if (PI->isCtrl()) {
1466         ChainPredID = PI->getSUnit()->NodeNum;
1467         break;
1468       }
1469     }
1470     // Check if this chain-like pred has been seen
1471     // before. ChainPredID==MaxNodeID at the top of the schedule.
1472     unsigned NumChains = StoreChainDependents.size();
1473     std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1474       StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1475     if (Result.second)
1476       StoreChainDependents.resize(NumChains + 1);
1477     StoreChainDependents[Result.first->second].push_back(SU);
1478   }
1479 
1480   // Iterate over the store chains.
1481   for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
1482     clusterNeighboringMemOps(StoreChainDependents[Idx], DAG);
1483 }
1484 
1485 //===----------------------------------------------------------------------===//
1486 // MacroFusion - DAG post-processing to encourage fusion of macro ops.
1487 //===----------------------------------------------------------------------===//
1488 
1489 namespace {
1490 /// \brief Post-process the DAG to create cluster edges between instructions
1491 /// that may be fused by the processor into a single operation.
1492 class MacroFusion : public ScheduleDAGMutation {
1493   const TargetInstrInfo &TII;
1494   const TargetRegisterInfo &TRI;
1495 public:
1496   MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI)
1497     : TII(TII), TRI(TRI) {}
1498 
1499   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1500 };
1501 } // anonymous
1502 
1503 /// Returns true if \p MI reads a register written by \p Other.
1504 static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI,
1505                        const MachineInstr &Other) {
1506   for (const MachineOperand &MO : MI.uses()) {
1507     if (!MO.isReg() || !MO.readsReg())
1508       continue;
1509 
1510     unsigned Reg = MO.getReg();
1511     if (Other.modifiesRegister(Reg, &TRI))
1512       return true;
1513   }
1514   return false;
1515 }
1516 
1517 /// \brief Callback from DAG postProcessing to create cluster edges to encourage
1518 /// fused operations.
1519 void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
1520   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1521 
1522   // For now, assume targets can only fuse with the branch.
1523   SUnit &ExitSU = DAG->ExitSU;
1524   MachineInstr *Branch = ExitSU.getInstr();
1525   if (!Branch)
1526     return;
1527 
1528   for (SUnit &SU : DAG->SUnits) {
1529     // SUnits with successors can't be schedule in front of the ExitSU.
1530     if (!SU.Succs.empty())
1531       continue;
1532     // We only care if the node writes to a register that the branch reads.
1533     MachineInstr *Pred = SU.getInstr();
1534     if (!HasDataDep(TRI, *Branch, *Pred))
1535       continue;
1536 
1537     if (!TII.shouldScheduleAdjacent(Pred, Branch))
1538       continue;
1539 
1540     // Create a single weak edge from SU to ExitSU. The only effect is to cause
1541     // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1542     // need to copy predecessor edges from ExitSU to SU, since top-down
1543     // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1544     // of SU, we could create an artificial edge from the deepest root, but it
1545     // hasn't been needed yet.
1546     bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
1547     (void)Success;
1548     assert(Success && "No DAG nodes should be reachable from ExitSU");
1549 
1550     DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
1551     break;
1552   }
1553 }
1554 
1555 //===----------------------------------------------------------------------===//
1556 // CopyConstrain - DAG post-processing to encourage copy elimination.
1557 //===----------------------------------------------------------------------===//
1558 
1559 namespace {
1560 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1561 /// the one use that defines the copy's source vreg, most likely an induction
1562 /// variable increment.
1563 class CopyConstrain : public ScheduleDAGMutation {
1564   // Transient state.
1565   SlotIndex RegionBeginIdx;
1566   // RegionEndIdx is the slot index of the last non-debug instruction in the
1567   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1568   SlotIndex RegionEndIdx;
1569 public:
1570   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1571 
1572   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1573 
1574 protected:
1575   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1576 };
1577 } // anonymous
1578 
1579 /// constrainLocalCopy handles two possibilities:
1580 /// 1) Local src:
1581 /// I0:     = dst
1582 /// I1: src = ...
1583 /// I2:     = dst
1584 /// I3: dst = src (copy)
1585 /// (create pred->succ edges I0->I1, I2->I1)
1586 ///
1587 /// 2) Local copy:
1588 /// I0: dst = src (copy)
1589 /// I1:     = dst
1590 /// I2: src = ...
1591 /// I3:     = dst
1592 /// (create pred->succ edges I1->I2, I3->I2)
1593 ///
1594 /// Although the MachineScheduler is currently constrained to single blocks,
1595 /// this algorithm should handle extended blocks. An EBB is a set of
1596 /// contiguously numbered blocks such that the previous block in the EBB is
1597 /// always the single predecessor.
1598 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1599   LiveIntervals *LIS = DAG->getLIS();
1600   MachineInstr *Copy = CopySU->getInstr();
1601 
1602   // Check for pure vreg copies.
1603   const MachineOperand &SrcOp = Copy->getOperand(1);
1604   unsigned SrcReg = SrcOp.getReg();
1605   if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
1606     return;
1607 
1608   const MachineOperand &DstOp = Copy->getOperand(0);
1609   unsigned DstReg = DstOp.getReg();
1610   if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
1611     return;
1612 
1613   // Check if either the dest or source is local. If it's live across a back
1614   // edge, it's not local. Note that if both vregs are live across the back
1615   // edge, we cannot successfully contrain the copy without cyclic scheduling.
1616   // If both the copy's source and dest are local live intervals, then we
1617   // should treat the dest as the global for the purpose of adding
1618   // constraints. This adds edges from source's other uses to the copy.
1619   unsigned LocalReg = SrcReg;
1620   unsigned GlobalReg = DstReg;
1621   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1622   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1623     LocalReg = DstReg;
1624     GlobalReg = SrcReg;
1625     LocalLI = &LIS->getInterval(LocalReg);
1626     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1627       return;
1628   }
1629   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1630 
1631   // Find the global segment after the start of the local LI.
1632   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1633   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1634   // local live range. We could create edges from other global uses to the local
1635   // start, but the coalescer should have already eliminated these cases, so
1636   // don't bother dealing with it.
1637   if (GlobalSegment == GlobalLI->end())
1638     return;
1639 
1640   // If GlobalSegment is killed at the LocalLI->start, the call to find()
1641   // returned the next global segment. But if GlobalSegment overlaps with
1642   // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1643   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1644   if (GlobalSegment->contains(LocalLI->beginIndex()))
1645     ++GlobalSegment;
1646 
1647   if (GlobalSegment == GlobalLI->end())
1648     return;
1649 
1650   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1651   if (GlobalSegment != GlobalLI->begin()) {
1652     // Two address defs have no hole.
1653     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1654                                GlobalSegment->start)) {
1655       return;
1656     }
1657     // If the prior global segment may be defined by the same two-address
1658     // instruction that also defines LocalLI, then can't make a hole here.
1659     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1660                                LocalLI->beginIndex())) {
1661       return;
1662     }
1663     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1664     // it would be a disconnected component in the live range.
1665     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1666            "Disconnected LRG within the scheduling region.");
1667   }
1668   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1669   if (!GlobalDef)
1670     return;
1671 
1672   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1673   if (!GlobalSU)
1674     return;
1675 
1676   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1677   // constraining the uses of the last local def to precede GlobalDef.
1678   SmallVector<SUnit*,8> LocalUses;
1679   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1680   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1681   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1682   for (SUnit::const_succ_iterator
1683          I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1684        I != E; ++I) {
1685     if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1686       continue;
1687     if (I->getSUnit() == GlobalSU)
1688       continue;
1689     if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1690       return;
1691     LocalUses.push_back(I->getSUnit());
1692   }
1693   // Open the top of the GlobalLI hole by constraining any earlier global uses
1694   // to precede the start of LocalLI.
1695   SmallVector<SUnit*,8> GlobalUses;
1696   MachineInstr *FirstLocalDef =
1697     LIS->getInstructionFromIndex(LocalLI->beginIndex());
1698   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1699   for (SUnit::const_pred_iterator
1700          I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1701     if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1702       continue;
1703     if (I->getSUnit() == FirstLocalSU)
1704       continue;
1705     if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1706       return;
1707     GlobalUses.push_back(I->getSUnit());
1708   }
1709   DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1710   // Add the weak edges.
1711   for (SmallVectorImpl<SUnit*>::const_iterator
1712          I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1713     DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1714           << GlobalSU->NodeNum << ")\n");
1715     DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1716   }
1717   for (SmallVectorImpl<SUnit*>::const_iterator
1718          I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1719     DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1720           << FirstLocalSU->NodeNum << ")\n");
1721     DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1722   }
1723 }
1724 
1725 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1726 /// copy elimination.
1727 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1728   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1729   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1730 
1731   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1732   if (FirstPos == DAG->end())
1733     return;
1734   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
1735   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1736       *priorNonDebug(DAG->end(), DAG->begin()));
1737 
1738   for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1739     SUnit *SU = &DAG->SUnits[Idx];
1740     if (!SU->getInstr()->isCopy())
1741       continue;
1742 
1743     constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
1744   }
1745 }
1746 
1747 //===----------------------------------------------------------------------===//
1748 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1749 // and possibly other custom schedulers.
1750 //===----------------------------------------------------------------------===//
1751 
1752 static const unsigned InvalidCycle = ~0U;
1753 
1754 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1755 
1756 void SchedBoundary::reset() {
1757   // A new HazardRec is created for each DAG and owned by SchedBoundary.
1758   // Destroying and reconstructing it is very expensive though. So keep
1759   // invalid, placeholder HazardRecs.
1760   if (HazardRec && HazardRec->isEnabled()) {
1761     delete HazardRec;
1762     HazardRec = nullptr;
1763   }
1764   Available.clear();
1765   Pending.clear();
1766   CheckPending = false;
1767   NextSUs.clear();
1768   CurrCycle = 0;
1769   CurrMOps = 0;
1770   MinReadyCycle = UINT_MAX;
1771   ExpectedLatency = 0;
1772   DependentLatency = 0;
1773   RetiredMOps = 0;
1774   MaxExecutedResCount = 0;
1775   ZoneCritResIdx = 0;
1776   IsResourceLimited = false;
1777   ReservedCycles.clear();
1778 #ifndef NDEBUG
1779   // Track the maximum number of stall cycles that could arise either from the
1780   // latency of a DAG edge or the number of cycles that a processor resource is
1781   // reserved (SchedBoundary::ReservedCycles).
1782   MaxObservedStall = 0;
1783 #endif
1784   // Reserve a zero-count for invalid CritResIdx.
1785   ExecutedResCounts.resize(1);
1786   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1787 }
1788 
1789 void SchedRemainder::
1790 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1791   reset();
1792   if (!SchedModel->hasInstrSchedModel())
1793     return;
1794   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1795   for (std::vector<SUnit>::iterator
1796          I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1797     const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1798     RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1799       * SchedModel->getMicroOpFactor();
1800     for (TargetSchedModel::ProcResIter
1801            PI = SchedModel->getWriteProcResBegin(SC),
1802            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1803       unsigned PIdx = PI->ProcResourceIdx;
1804       unsigned Factor = SchedModel->getResourceFactor(PIdx);
1805       RemainingCounts[PIdx] += (Factor * PI->Cycles);
1806     }
1807   }
1808 }
1809 
1810 void SchedBoundary::
1811 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1812   reset();
1813   DAG = dag;
1814   SchedModel = smodel;
1815   Rem = rem;
1816   if (SchedModel->hasInstrSchedModel()) {
1817     ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1818     ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1819   }
1820 }
1821 
1822 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1823 /// these "soft stalls" differently than the hard stall cycles based on CPU
1824 /// resources and computed by checkHazard(). A fully in-order model
1825 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1826 /// available for scheduling until they are ready. However, a weaker in-order
1827 /// model may use this for heuristics. For example, if a processor has in-order
1828 /// behavior when reading certain resources, this may come into play.
1829 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1830   if (!SU->isUnbuffered)
1831     return 0;
1832 
1833   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1834   if (ReadyCycle > CurrCycle)
1835     return ReadyCycle - CurrCycle;
1836   return 0;
1837 }
1838 
1839 /// Compute the next cycle at which the given processor resource can be
1840 /// scheduled.
1841 unsigned SchedBoundary::
1842 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1843   unsigned NextUnreserved = ReservedCycles[PIdx];
1844   // If this resource has never been used, always return cycle zero.
1845   if (NextUnreserved == InvalidCycle)
1846     return 0;
1847   // For bottom-up scheduling add the cycles needed for the current operation.
1848   if (!isTop())
1849     NextUnreserved += Cycles;
1850   return NextUnreserved;
1851 }
1852 
1853 /// Does this SU have a hazard within the current instruction group.
1854 ///
1855 /// The scheduler supports two modes of hazard recognition. The first is the
1856 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1857 /// supports highly complicated in-order reservation tables
1858 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1859 ///
1860 /// The second is a streamlined mechanism that checks for hazards based on
1861 /// simple counters that the scheduler itself maintains. It explicitly checks
1862 /// for instruction dispatch limitations, including the number of micro-ops that
1863 /// can dispatch per cycle.
1864 ///
1865 /// TODO: Also check whether the SU must start a new group.
1866 bool SchedBoundary::checkHazard(SUnit *SU) {
1867   if (HazardRec->isEnabled()
1868       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1869     return true;
1870   }
1871   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1872   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1873     DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1874           << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1875     return true;
1876   }
1877   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1878     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1879     for (TargetSchedModel::ProcResIter
1880            PI = SchedModel->getWriteProcResBegin(SC),
1881            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1882       unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1883       if (NRCycle > CurrCycle) {
1884 #ifndef NDEBUG
1885         MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1886 #endif
1887         DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
1888               << SchedModel->getResourceName(PI->ProcResourceIdx)
1889               << "=" << NRCycle << "c\n");
1890         return true;
1891       }
1892     }
1893   }
1894   return false;
1895 }
1896 
1897 // Find the unscheduled node in ReadySUs with the highest latency.
1898 unsigned SchedBoundary::
1899 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1900   SUnit *LateSU = nullptr;
1901   unsigned RemLatency = 0;
1902   for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1903        I != E; ++I) {
1904     unsigned L = getUnscheduledLatency(*I);
1905     if (L > RemLatency) {
1906       RemLatency = L;
1907       LateSU = *I;
1908     }
1909   }
1910   if (LateSU) {
1911     DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1912           << LateSU->NodeNum << ") " << RemLatency << "c\n");
1913   }
1914   return RemLatency;
1915 }
1916 
1917 // Count resources in this zone and the remaining unscheduled
1918 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1919 // resource index, or zero if the zone is issue limited.
1920 unsigned SchedBoundary::
1921 getOtherResourceCount(unsigned &OtherCritIdx) {
1922   OtherCritIdx = 0;
1923   if (!SchedModel->hasInstrSchedModel())
1924     return 0;
1925 
1926   unsigned OtherCritCount = Rem->RemIssueCount
1927     + (RetiredMOps * SchedModel->getMicroOpFactor());
1928   DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1929         << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1930   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1931        PIdx != PEnd; ++PIdx) {
1932     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1933     if (OtherCount > OtherCritCount) {
1934       OtherCritCount = OtherCount;
1935       OtherCritIdx = PIdx;
1936     }
1937   }
1938   if (OtherCritIdx) {
1939     DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1940           << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1941           << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1942   }
1943   return OtherCritCount;
1944 }
1945 
1946 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1947   assert(SU->getInstr() && "Scheduled SUnit must have instr");
1948 
1949 #ifndef NDEBUG
1950   // ReadyCycle was been bumped up to the CurrCycle when this node was
1951   // scheduled, but CurrCycle may have been eagerly advanced immediately after
1952   // scheduling, so may now be greater than ReadyCycle.
1953   if (ReadyCycle > CurrCycle)
1954     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1955 #endif
1956 
1957   if (ReadyCycle < MinReadyCycle)
1958     MinReadyCycle = ReadyCycle;
1959 
1960   // Check for interlocks first. For the purpose of other heuristics, an
1961   // instruction that cannot issue appears as if it's not in the ReadyQueue.
1962   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1963   if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
1964     Pending.push(SU);
1965   else
1966     Available.push(SU);
1967 
1968   // Record this node as an immediate dependent of the scheduled node.
1969   NextSUs.insert(SU);
1970 }
1971 
1972 void SchedBoundary::releaseTopNode(SUnit *SU) {
1973   if (SU->isScheduled)
1974     return;
1975 
1976   releaseNode(SU, SU->TopReadyCycle);
1977 }
1978 
1979 void SchedBoundary::releaseBottomNode(SUnit *SU) {
1980   if (SU->isScheduled)
1981     return;
1982 
1983   releaseNode(SU, SU->BotReadyCycle);
1984 }
1985 
1986 /// Move the boundary of scheduled code by one cycle.
1987 void SchedBoundary::bumpCycle(unsigned NextCycle) {
1988   if (SchedModel->getMicroOpBufferSize() == 0) {
1989     assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1990     if (MinReadyCycle > NextCycle)
1991       NextCycle = MinReadyCycle;
1992   }
1993   // Update the current micro-ops, which will issue in the next cycle.
1994   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1995   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1996 
1997   // Decrement DependentLatency based on the next cycle.
1998   if ((NextCycle - CurrCycle) > DependentLatency)
1999     DependentLatency = 0;
2000   else
2001     DependentLatency -= (NextCycle - CurrCycle);
2002 
2003   if (!HazardRec->isEnabled()) {
2004     // Bypass HazardRec virtual calls.
2005     CurrCycle = NextCycle;
2006   }
2007   else {
2008     // Bypass getHazardType calls in case of long latency.
2009     for (; CurrCycle != NextCycle; ++CurrCycle) {
2010       if (isTop())
2011         HazardRec->AdvanceCycle();
2012       else
2013         HazardRec->RecedeCycle();
2014     }
2015   }
2016   CheckPending = true;
2017   unsigned LFactor = SchedModel->getLatencyFactor();
2018   IsResourceLimited =
2019     (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2020     > (int)LFactor;
2021 
2022   DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
2023 }
2024 
2025 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2026   ExecutedResCounts[PIdx] += Count;
2027   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2028     MaxExecutedResCount = ExecutedResCounts[PIdx];
2029 }
2030 
2031 /// Add the given processor resource to this scheduled zone.
2032 ///
2033 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2034 /// during which this resource is consumed.
2035 ///
2036 /// \return the next cycle at which the instruction may execute without
2037 /// oversubscribing resources.
2038 unsigned SchedBoundary::
2039 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2040   unsigned Factor = SchedModel->getResourceFactor(PIdx);
2041   unsigned Count = Factor * Cycles;
2042   DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx)
2043         << " +" << Cycles << "x" << Factor << "u\n");
2044 
2045   // Update Executed resources counts.
2046   incExecutedResources(PIdx, Count);
2047   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2048   Rem->RemainingCounts[PIdx] -= Count;
2049 
2050   // Check if this resource exceeds the current critical resource. If so, it
2051   // becomes the critical resource.
2052   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2053     ZoneCritResIdx = PIdx;
2054     DEBUG(dbgs() << "  *** Critical resource "
2055           << SchedModel->getResourceName(PIdx) << ": "
2056           << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
2057   }
2058   // For reserved resources, record the highest cycle using the resource.
2059   unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2060   if (NextAvailable > CurrCycle) {
2061     DEBUG(dbgs() << "  Resource conflict: "
2062           << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2063           << NextAvailable << "\n");
2064   }
2065   return NextAvailable;
2066 }
2067 
2068 /// Move the boundary of scheduled code by one SUnit.
2069 void SchedBoundary::bumpNode(SUnit *SU) {
2070   // Update the reservation table.
2071   if (HazardRec->isEnabled()) {
2072     if (!isTop() && SU->isCall) {
2073       // Calls are scheduled with their preceding instructions. For bottom-up
2074       // scheduling, clear the pipeline state before emitting.
2075       HazardRec->Reset();
2076     }
2077     HazardRec->EmitInstruction(SU);
2078   }
2079   // checkHazard should prevent scheduling multiple instructions per cycle that
2080   // exceed the issue width.
2081   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2082   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2083   assert(
2084       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2085       "Cannot schedule this instruction's MicroOps in the current cycle.");
2086 
2087   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2088   DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
2089 
2090   unsigned NextCycle = CurrCycle;
2091   switch (SchedModel->getMicroOpBufferSize()) {
2092   case 0:
2093     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2094     break;
2095   case 1:
2096     if (ReadyCycle > NextCycle) {
2097       NextCycle = ReadyCycle;
2098       DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2099     }
2100     break;
2101   default:
2102     // We don't currently model the OOO reorder buffer, so consider all
2103     // scheduled MOps to be "retired". We do loosely model in-order resource
2104     // latency. If this instruction uses an in-order resource, account for any
2105     // likely stall cycles.
2106     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2107       NextCycle = ReadyCycle;
2108     break;
2109   }
2110   RetiredMOps += IncMOps;
2111 
2112   // Update resource counts and critical resource.
2113   if (SchedModel->hasInstrSchedModel()) {
2114     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2115     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2116     Rem->RemIssueCount -= DecRemIssue;
2117     if (ZoneCritResIdx) {
2118       // Scale scheduled micro-ops for comparing with the critical resource.
2119       unsigned ScaledMOps =
2120         RetiredMOps * SchedModel->getMicroOpFactor();
2121 
2122       // If scaled micro-ops are now more than the previous critical resource by
2123       // a full cycle, then micro-ops issue becomes critical.
2124       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2125           >= (int)SchedModel->getLatencyFactor()) {
2126         ZoneCritResIdx = 0;
2127         DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2128               << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2129       }
2130     }
2131     for (TargetSchedModel::ProcResIter
2132            PI = SchedModel->getWriteProcResBegin(SC),
2133            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2134       unsigned RCycle =
2135         countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2136       if (RCycle > NextCycle)
2137         NextCycle = RCycle;
2138     }
2139     if (SU->hasReservedResource) {
2140       // For reserved resources, record the highest cycle using the resource.
2141       // For top-down scheduling, this is the cycle in which we schedule this
2142       // instruction plus the number of cycles the operations reserves the
2143       // resource. For bottom-up is it simply the instruction's cycle.
2144       for (TargetSchedModel::ProcResIter
2145              PI = SchedModel->getWriteProcResBegin(SC),
2146              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2147         unsigned PIdx = PI->ProcResourceIdx;
2148         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2149           if (isTop()) {
2150             ReservedCycles[PIdx] =
2151               std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2152           }
2153           else
2154             ReservedCycles[PIdx] = NextCycle;
2155         }
2156       }
2157     }
2158   }
2159   // Update ExpectedLatency and DependentLatency.
2160   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2161   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2162   if (SU->getDepth() > TopLatency) {
2163     TopLatency = SU->getDepth();
2164     DEBUG(dbgs() << "  " << Available.getName()
2165           << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2166   }
2167   if (SU->getHeight() > BotLatency) {
2168     BotLatency = SU->getHeight();
2169     DEBUG(dbgs() << "  " << Available.getName()
2170           << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2171   }
2172   // If we stall for any reason, bump the cycle.
2173   if (NextCycle > CurrCycle) {
2174     bumpCycle(NextCycle);
2175   }
2176   else {
2177     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2178     // resource limited. If a stall occurred, bumpCycle does this.
2179     unsigned LFactor = SchedModel->getLatencyFactor();
2180     IsResourceLimited =
2181       (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2182       > (int)LFactor;
2183   }
2184   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2185   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2186   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2187   // bump the cycle to avoid uselessly checking everything in the readyQ.
2188   CurrMOps += IncMOps;
2189   while (CurrMOps >= SchedModel->getIssueWidth()) {
2190     DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
2191           << " at cycle " << CurrCycle << '\n');
2192     bumpCycle(++NextCycle);
2193   }
2194   DEBUG(dumpScheduledState());
2195 }
2196 
2197 /// Release pending ready nodes in to the available queue. This makes them
2198 /// visible to heuristics.
2199 void SchedBoundary::releasePending() {
2200   // If the available queue is empty, it is safe to reset MinReadyCycle.
2201   if (Available.empty())
2202     MinReadyCycle = UINT_MAX;
2203 
2204   // Check to see if any of the pending instructions are ready to issue.  If
2205   // so, add them to the available queue.
2206   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2207   for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2208     SUnit *SU = *(Pending.begin()+i);
2209     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2210 
2211     if (ReadyCycle < MinReadyCycle)
2212       MinReadyCycle = ReadyCycle;
2213 
2214     if (!IsBuffered && ReadyCycle > CurrCycle)
2215       continue;
2216 
2217     if (checkHazard(SU))
2218       continue;
2219 
2220     Available.push(SU);
2221     Pending.remove(Pending.begin()+i);
2222     --i; --e;
2223   }
2224   DEBUG(if (!Pending.empty()) Pending.dump());
2225   CheckPending = false;
2226 }
2227 
2228 /// Remove SU from the ready set for this boundary.
2229 void SchedBoundary::removeReady(SUnit *SU) {
2230   if (Available.isInQueue(SU))
2231     Available.remove(Available.find(SU));
2232   else {
2233     assert(Pending.isInQueue(SU) && "bad ready count");
2234     Pending.remove(Pending.find(SU));
2235   }
2236 }
2237 
2238 /// If this queue only has one ready candidate, return it. As a side effect,
2239 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2240 /// one node is ready. If multiple instructions are ready, return NULL.
2241 SUnit *SchedBoundary::pickOnlyChoice() {
2242   if (CheckPending)
2243     releasePending();
2244 
2245   if (CurrMOps > 0) {
2246     // Defer any ready instrs that now have a hazard.
2247     for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2248       if (checkHazard(*I)) {
2249         Pending.push(*I);
2250         I = Available.remove(I);
2251         continue;
2252       }
2253       ++I;
2254     }
2255   }
2256   for (unsigned i = 0; Available.empty(); ++i) {
2257 //  FIXME: Re-enable assert once PR20057 is resolved.
2258 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2259 //           "permanent hazard");
2260     (void)i;
2261     bumpCycle(CurrCycle + 1);
2262     releasePending();
2263   }
2264   if (Available.size() == 1)
2265     return *Available.begin();
2266   return nullptr;
2267 }
2268 
2269 #ifndef NDEBUG
2270 // This is useful information to dump after bumpNode.
2271 // Note that the Queue contents are more useful before pickNodeFromQueue.
2272 void SchedBoundary::dumpScheduledState() {
2273   unsigned ResFactor;
2274   unsigned ResCount;
2275   if (ZoneCritResIdx) {
2276     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2277     ResCount = getResourceCount(ZoneCritResIdx);
2278   }
2279   else {
2280     ResFactor = SchedModel->getMicroOpFactor();
2281     ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2282   }
2283   unsigned LFactor = SchedModel->getLatencyFactor();
2284   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2285          << "  Retired: " << RetiredMOps;
2286   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2287   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2288          << ResCount / ResFactor << " "
2289          << SchedModel->getResourceName(ZoneCritResIdx)
2290          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2291          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2292          << " limited.\n";
2293 }
2294 #endif
2295 
2296 //===----------------------------------------------------------------------===//
2297 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2298 //===----------------------------------------------------------------------===//
2299 
2300 void GenericSchedulerBase::SchedCandidate::
2301 initResourceDelta(const ScheduleDAGMI *DAG,
2302                   const TargetSchedModel *SchedModel) {
2303   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2304     return;
2305 
2306   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2307   for (TargetSchedModel::ProcResIter
2308          PI = SchedModel->getWriteProcResBegin(SC),
2309          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2310     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2311       ResDelta.CritResources += PI->Cycles;
2312     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2313       ResDelta.DemandedResources += PI->Cycles;
2314   }
2315 }
2316 
2317 /// Set the CandPolicy given a scheduling zone given the current resources and
2318 /// latencies inside and outside the zone.
2319 void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
2320                                      bool IsPostRA,
2321                                      SchedBoundary &CurrZone,
2322                                      SchedBoundary *OtherZone) {
2323   // Apply preemptive heuristics based on the total latency and resources
2324   // inside and outside this zone. Potential stalls should be considered before
2325   // following this policy.
2326 
2327   // Compute remaining latency. We need this both to determine whether the
2328   // overall schedule has become latency-limited and whether the instructions
2329   // outside this zone are resource or latency limited.
2330   //
2331   // The "dependent" latency is updated incrementally during scheduling as the
2332   // max height/depth of scheduled nodes minus the cycles since it was
2333   // scheduled:
2334   //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2335   //
2336   // The "independent" latency is the max ready queue depth:
2337   //   ILat = max N.depth for N in Available|Pending
2338   //
2339   // RemainingLatency is the greater of independent and dependent latency.
2340   unsigned RemLatency = CurrZone.getDependentLatency();
2341   RemLatency = std::max(RemLatency,
2342                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2343   RemLatency = std::max(RemLatency,
2344                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2345 
2346   // Compute the critical resource outside the zone.
2347   unsigned OtherCritIdx = 0;
2348   unsigned OtherCount =
2349     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2350 
2351   bool OtherResLimited = false;
2352   if (SchedModel->hasInstrSchedModel()) {
2353     unsigned LFactor = SchedModel->getLatencyFactor();
2354     OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2355   }
2356   // Schedule aggressively for latency in PostRA mode. We don't check for
2357   // acyclic latency during PostRA, and highly out-of-order processors will
2358   // skip PostRA scheduling.
2359   if (!OtherResLimited) {
2360     if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2361       Policy.ReduceLatency |= true;
2362       DEBUG(dbgs() << "  " << CurrZone.Available.getName()
2363             << " RemainingLatency " << RemLatency << " + "
2364             << CurrZone.getCurrCycle() << "c > CritPath "
2365             << Rem.CriticalPath << "\n");
2366     }
2367   }
2368   // If the same resource is limiting inside and outside the zone, do nothing.
2369   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2370     return;
2371 
2372   DEBUG(
2373     if (CurrZone.isResourceLimited()) {
2374       dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
2375              << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2376              << "\n";
2377     }
2378     if (OtherResLimited)
2379       dbgs() << "  RemainingLimit: "
2380              << SchedModel->getResourceName(OtherCritIdx) << "\n";
2381     if (!CurrZone.isResourceLimited() && !OtherResLimited)
2382       dbgs() << "  Latency limited both directions.\n");
2383 
2384   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2385     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2386 
2387   if (OtherResLimited)
2388     Policy.DemandResIdx = OtherCritIdx;
2389 }
2390 
2391 #ifndef NDEBUG
2392 const char *GenericSchedulerBase::getReasonStr(
2393   GenericSchedulerBase::CandReason Reason) {
2394   switch (Reason) {
2395   case NoCand:         return "NOCAND    ";
2396   case PhysRegCopy:    return "PREG-COPY";
2397   case RegExcess:      return "REG-EXCESS";
2398   case RegCritical:    return "REG-CRIT  ";
2399   case Stall:          return "STALL     ";
2400   case Cluster:        return "CLUSTER   ";
2401   case Weak:           return "WEAK      ";
2402   case RegMax:         return "REG-MAX   ";
2403   case ResourceReduce: return "RES-REDUCE";
2404   case ResourceDemand: return "RES-DEMAND";
2405   case TopDepthReduce: return "TOP-DEPTH ";
2406   case TopPathReduce:  return "TOP-PATH  ";
2407   case BotHeightReduce:return "BOT-HEIGHT";
2408   case BotPathReduce:  return "BOT-PATH  ";
2409   case NextDefUse:     return "DEF-USE   ";
2410   case NodeOrder:      return "ORDER     ";
2411   };
2412   llvm_unreachable("Unknown reason!");
2413 }
2414 
2415 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2416   PressureChange P;
2417   unsigned ResIdx = 0;
2418   unsigned Latency = 0;
2419   switch (Cand.Reason) {
2420   default:
2421     break;
2422   case RegExcess:
2423     P = Cand.RPDelta.Excess;
2424     break;
2425   case RegCritical:
2426     P = Cand.RPDelta.CriticalMax;
2427     break;
2428   case RegMax:
2429     P = Cand.RPDelta.CurrentMax;
2430     break;
2431   case ResourceReduce:
2432     ResIdx = Cand.Policy.ReduceResIdx;
2433     break;
2434   case ResourceDemand:
2435     ResIdx = Cand.Policy.DemandResIdx;
2436     break;
2437   case TopDepthReduce:
2438     Latency = Cand.SU->getDepth();
2439     break;
2440   case TopPathReduce:
2441     Latency = Cand.SU->getHeight();
2442     break;
2443   case BotHeightReduce:
2444     Latency = Cand.SU->getHeight();
2445     break;
2446   case BotPathReduce:
2447     Latency = Cand.SU->getDepth();
2448     break;
2449   }
2450   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2451   if (P.isValid())
2452     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2453            << ":" << P.getUnitInc() << " ";
2454   else
2455     dbgs() << "      ";
2456   if (ResIdx)
2457     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2458   else
2459     dbgs() << "         ";
2460   if (Latency)
2461     dbgs() << " " << Latency << " cycles ";
2462   else
2463     dbgs() << "          ";
2464   dbgs() << '\n';
2465 }
2466 #endif
2467 
2468 /// Return true if this heuristic determines order.
2469 static bool tryLess(int TryVal, int CandVal,
2470                     GenericSchedulerBase::SchedCandidate &TryCand,
2471                     GenericSchedulerBase::SchedCandidate &Cand,
2472                     GenericSchedulerBase::CandReason Reason) {
2473   if (TryVal < CandVal) {
2474     TryCand.Reason = Reason;
2475     return true;
2476   }
2477   if (TryVal > CandVal) {
2478     if (Cand.Reason > Reason)
2479       Cand.Reason = Reason;
2480     return true;
2481   }
2482   Cand.setRepeat(Reason);
2483   return false;
2484 }
2485 
2486 static bool tryGreater(int TryVal, int CandVal,
2487                        GenericSchedulerBase::SchedCandidate &TryCand,
2488                        GenericSchedulerBase::SchedCandidate &Cand,
2489                        GenericSchedulerBase::CandReason Reason) {
2490   if (TryVal > CandVal) {
2491     TryCand.Reason = Reason;
2492     return true;
2493   }
2494   if (TryVal < CandVal) {
2495     if (Cand.Reason > Reason)
2496       Cand.Reason = Reason;
2497     return true;
2498   }
2499   Cand.setRepeat(Reason);
2500   return false;
2501 }
2502 
2503 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2504                        GenericSchedulerBase::SchedCandidate &Cand,
2505                        SchedBoundary &Zone) {
2506   if (Zone.isTop()) {
2507     if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2508       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2509                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2510         return true;
2511     }
2512     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2513                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2514       return true;
2515   }
2516   else {
2517     if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2518       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2519                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2520         return true;
2521     }
2522     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2523                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2524       return true;
2525   }
2526   return false;
2527 }
2528 
2529 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
2530                       bool IsTop) {
2531   DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2532         << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n');
2533 }
2534 
2535 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2536   assert(dag->hasVRegLiveness() &&
2537          "(PreRA)GenericScheduler needs vreg liveness");
2538   DAG = static_cast<ScheduleDAGMILive*>(dag);
2539   SchedModel = DAG->getSchedModel();
2540   TRI = DAG->TRI;
2541 
2542   Rem.init(DAG, SchedModel);
2543   Top.init(DAG, SchedModel, &Rem);
2544   Bot.init(DAG, SchedModel, &Rem);
2545 
2546   // Initialize resource counts.
2547 
2548   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2549   // are disabled, then these HazardRecs will be disabled.
2550   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2551   if (!Top.HazardRec) {
2552     Top.HazardRec =
2553         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2554             Itin, DAG);
2555   }
2556   if (!Bot.HazardRec) {
2557     Bot.HazardRec =
2558         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2559             Itin, DAG);
2560   }
2561 }
2562 
2563 /// Initialize the per-region scheduling policy.
2564 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2565                                   MachineBasicBlock::iterator End,
2566                                   unsigned NumRegionInstrs) {
2567   const MachineFunction &MF = *Begin->getParent()->getParent();
2568   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2569 
2570   // Avoid setting up the register pressure tracker for small regions to save
2571   // compile time. As a rough heuristic, only track pressure when the number of
2572   // schedulable instructions exceeds half the integer register file.
2573   RegionPolicy.ShouldTrackPressure = true;
2574   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2575     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2576     if (TLI->isTypeLegal(LegalIntVT)) {
2577       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2578         TLI->getRegClassFor(LegalIntVT));
2579       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2580     }
2581   }
2582 
2583   // For generic targets, we default to bottom-up, because it's simpler and more
2584   // compile-time optimizations have been implemented in that direction.
2585   RegionPolicy.OnlyBottomUp = true;
2586 
2587   // Allow the subtarget to override default policy.
2588   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, Begin, End,
2589                                         NumRegionInstrs);
2590 
2591   // After subtarget overrides, apply command line options.
2592   if (!EnableRegPressure)
2593     RegionPolicy.ShouldTrackPressure = false;
2594 
2595   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2596   // e.g. -misched-bottomup=false allows scheduling in both directions.
2597   assert((!ForceTopDown || !ForceBottomUp) &&
2598          "-misched-topdown incompatible with -misched-bottomup");
2599   if (ForceBottomUp.getNumOccurrences() > 0) {
2600     RegionPolicy.OnlyBottomUp = ForceBottomUp;
2601     if (RegionPolicy.OnlyBottomUp)
2602       RegionPolicy.OnlyTopDown = false;
2603   }
2604   if (ForceTopDown.getNumOccurrences() > 0) {
2605     RegionPolicy.OnlyTopDown = ForceTopDown;
2606     if (RegionPolicy.OnlyTopDown)
2607       RegionPolicy.OnlyBottomUp = false;
2608   }
2609 }
2610 
2611 void GenericScheduler::dumpPolicy() {
2612   dbgs() << "GenericScheduler RegionPolicy: "
2613          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2614          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2615          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2616          << "\n";
2617 }
2618 
2619 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2620 /// critical path by more cycles than it takes to drain the instruction buffer.
2621 /// We estimate an upper bounds on in-flight instructions as:
2622 ///
2623 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2624 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2625 /// InFlightResources = InFlightIterations * LoopResources
2626 ///
2627 /// TODO: Check execution resources in addition to IssueCount.
2628 void GenericScheduler::checkAcyclicLatency() {
2629   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2630     return;
2631 
2632   // Scaled number of cycles per loop iteration.
2633   unsigned IterCount =
2634     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2635              Rem.RemIssueCount);
2636   // Scaled acyclic critical path.
2637   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2638   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2639   unsigned InFlightCount =
2640     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2641   unsigned BufferLimit =
2642     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2643 
2644   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2645 
2646   DEBUG(dbgs() << "IssueCycles="
2647         << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2648         << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2649         << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2650         << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2651         << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2652         if (Rem.IsAcyclicLatencyLimited)
2653           dbgs() << "  ACYCLIC LATENCY LIMIT\n");
2654 }
2655 
2656 void GenericScheduler::registerRoots() {
2657   Rem.CriticalPath = DAG->ExitSU.getDepth();
2658 
2659   // Some roots may not feed into ExitSU. Check all of them in case.
2660   for (std::vector<SUnit*>::const_iterator
2661          I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2662     if ((*I)->getDepth() > Rem.CriticalPath)
2663       Rem.CriticalPath = (*I)->getDepth();
2664   }
2665   DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2666   if (DumpCriticalPathLength) {
2667     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2668   }
2669 
2670   if (EnableCyclicPath) {
2671     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2672     checkAcyclicLatency();
2673   }
2674 }
2675 
2676 static bool tryPressure(const PressureChange &TryP,
2677                         const PressureChange &CandP,
2678                         GenericSchedulerBase::SchedCandidate &TryCand,
2679                         GenericSchedulerBase::SchedCandidate &Cand,
2680                         GenericSchedulerBase::CandReason Reason,
2681                         const TargetRegisterInfo *TRI,
2682                         const MachineFunction &MF) {
2683   unsigned TryPSet = TryP.getPSetOrMax();
2684   unsigned CandPSet = CandP.getPSetOrMax();
2685   // If both candidates affect the same set, go with the smallest increase.
2686   if (TryPSet == CandPSet) {
2687     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2688                    Reason);
2689   }
2690   // If one candidate decreases and the other increases, go with it.
2691   // Invalid candidates have UnitInc==0.
2692   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2693                  Reason)) {
2694     return true;
2695   }
2696 
2697   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2698                                  std::numeric_limits<int>::max();
2699 
2700   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2701                                    std::numeric_limits<int>::max();
2702 
2703   // If the candidates are decreasing pressure, reverse priority.
2704   if (TryP.getUnitInc() < 0)
2705     std::swap(TryRank, CandRank);
2706   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2707 }
2708 
2709 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2710   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2711 }
2712 
2713 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2714 /// their physreg def/use.
2715 ///
2716 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2717 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2718 /// with the operation that produces or consumes the physreg. We'll do this when
2719 /// regalloc has support for parallel copies.
2720 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2721   const MachineInstr *MI = SU->getInstr();
2722   if (!MI->isCopy())
2723     return 0;
2724 
2725   unsigned ScheduledOper = isTop ? 1 : 0;
2726   unsigned UnscheduledOper = isTop ? 0 : 1;
2727   // If we have already scheduled the physreg produce/consumer, immediately
2728   // schedule the copy.
2729   if (TargetRegisterInfo::isPhysicalRegister(
2730         MI->getOperand(ScheduledOper).getReg()))
2731     return 1;
2732   // If the physreg is at the boundary, defer it. Otherwise schedule it
2733   // immediately to free the dependent. We can hoist the copy later.
2734   bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2735   if (TargetRegisterInfo::isPhysicalRegister(
2736         MI->getOperand(UnscheduledOper).getReg()))
2737     return AtBoundary ? -1 : 1;
2738   return 0;
2739 }
2740 
2741 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2742 /// hierarchical. This may be more efficient than a graduated cost model because
2743 /// we don't need to evaluate all aspects of the model for each node in the
2744 /// queue. But it's really done to make the heuristics easier to debug and
2745 /// statistically analyze.
2746 ///
2747 /// \param Cand provides the policy and current best candidate.
2748 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2749 /// \param Zone describes the scheduled zone that we are extending.
2750 /// \param RPTracker describes reg pressure within the scheduled zone.
2751 /// \param TempTracker is a scratch pressure tracker to reuse in queries.
2752 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2753                                     SchedCandidate &TryCand,
2754                                     SchedBoundary &Zone,
2755                                     const RegPressureTracker &RPTracker,
2756                                     RegPressureTracker &TempTracker) {
2757 
2758   if (DAG->isTrackingPressure()) {
2759     // Always initialize TryCand's RPDelta.
2760     if (Zone.isTop()) {
2761       TempTracker.getMaxDownwardPressureDelta(
2762         TryCand.SU->getInstr(),
2763         TryCand.RPDelta,
2764         DAG->getRegionCriticalPSets(),
2765         DAG->getRegPressure().MaxSetPressure);
2766     }
2767     else {
2768       if (VerifyScheduling) {
2769         TempTracker.getMaxUpwardPressureDelta(
2770           TryCand.SU->getInstr(),
2771           &DAG->getPressureDiff(TryCand.SU),
2772           TryCand.RPDelta,
2773           DAG->getRegionCriticalPSets(),
2774           DAG->getRegPressure().MaxSetPressure);
2775       }
2776       else {
2777         RPTracker.getUpwardPressureDelta(
2778           TryCand.SU->getInstr(),
2779           DAG->getPressureDiff(TryCand.SU),
2780           TryCand.RPDelta,
2781           DAG->getRegionCriticalPSets(),
2782           DAG->getRegPressure().MaxSetPressure);
2783       }
2784     }
2785   }
2786   DEBUG(if (TryCand.RPDelta.Excess.isValid())
2787           dbgs() << "  Try  SU(" << TryCand.SU->NodeNum << ") "
2788                  << TRI->getRegPressureSetName(TryCand.RPDelta.Excess.getPSet())
2789                  << ":" << TryCand.RPDelta.Excess.getUnitInc() << "\n");
2790 
2791   // Initialize the candidate if needed.
2792   if (!Cand.isValid()) {
2793     TryCand.Reason = NodeOrder;
2794     return;
2795   }
2796 
2797   if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2798                  biasPhysRegCopy(Cand.SU, Zone.isTop()),
2799                  TryCand, Cand, PhysRegCopy))
2800     return;
2801 
2802   // Avoid exceeding the target's limit.
2803   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2804                                                Cand.RPDelta.Excess,
2805                                                TryCand, Cand, RegExcess, TRI,
2806                                                DAG->MF))
2807     return;
2808 
2809   // Avoid increasing the max critical pressure in the scheduled region.
2810   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2811                                                Cand.RPDelta.CriticalMax,
2812                                                TryCand, Cand, RegCritical, TRI,
2813                                                DAG->MF))
2814     return;
2815 
2816   // For loops that are acyclic path limited, aggressively schedule for latency.
2817   // This can result in very long dependence chains scheduled in sequence, so
2818   // once every cycle (when CurrMOps == 0), switch to normal heuristics.
2819   if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
2820       && tryLatency(TryCand, Cand, Zone))
2821     return;
2822 
2823   // Prioritize instructions that read unbuffered resources by stall cycles.
2824   if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
2825               Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2826     return;
2827 
2828   // Keep clustered nodes together to encourage downstream peephole
2829   // optimizations which may reduce resource requirements.
2830   //
2831   // This is a best effort to set things up for a post-RA pass. Optimizations
2832   // like generating loads of multiple registers should ideally be done within
2833   // the scheduler pass by combining the loads during DAG postprocessing.
2834   const SUnit *NextClusterSU =
2835     Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2836   if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2837                  TryCand, Cand, Cluster))
2838     return;
2839 
2840   // Weak edges are for clustering and other constraints.
2841   if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2842               getWeakLeft(Cand.SU, Zone.isTop()),
2843               TryCand, Cand, Weak)) {
2844     return;
2845   }
2846   // Avoid increasing the max pressure of the entire region.
2847   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2848                                                Cand.RPDelta.CurrentMax,
2849                                                TryCand, Cand, RegMax, TRI,
2850                                                DAG->MF))
2851     return;
2852 
2853   // Avoid critical resource consumption and balance the schedule.
2854   TryCand.initResourceDelta(DAG, SchedModel);
2855   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2856               TryCand, Cand, ResourceReduce))
2857     return;
2858   if (tryGreater(TryCand.ResDelta.DemandedResources,
2859                  Cand.ResDelta.DemandedResources,
2860                  TryCand, Cand, ResourceDemand))
2861     return;
2862 
2863   // Avoid serializing long latency dependence chains.
2864   // For acyclic path limited loops, latency was already checked above.
2865   if (!RegionPolicy.DisableLatencyHeuristic && Cand.Policy.ReduceLatency &&
2866       !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone)) {
2867     return;
2868   }
2869 
2870   // Prefer immediate defs/users of the last scheduled instruction. This is a
2871   // local pressure avoidance strategy that also makes the machine code
2872   // readable.
2873   if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
2874                  TryCand, Cand, NextDefUse))
2875     return;
2876 
2877   // Fall through to original instruction order.
2878   if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2879       || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2880     TryCand.Reason = NodeOrder;
2881   }
2882 }
2883 
2884 /// Pick the best candidate from the queue.
2885 ///
2886 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2887 /// DAG building. To adjust for the current scheduling location we need to
2888 /// maintain the number of vreg uses remaining to be top-scheduled.
2889 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2890                                          const RegPressureTracker &RPTracker,
2891                                          SchedCandidate &Cand) {
2892   ReadyQueue &Q = Zone.Available;
2893 
2894   DEBUG(Q.dump());
2895 
2896   // getMaxPressureDelta temporarily modifies the tracker.
2897   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2898 
2899   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2900 
2901     SchedCandidate TryCand(Cand.Policy);
2902     TryCand.SU = *I;
2903     tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2904     if (TryCand.Reason != NoCand) {
2905       // Initialize resource delta if needed in case future heuristics query it.
2906       if (TryCand.ResDelta == SchedResourceDelta())
2907         TryCand.initResourceDelta(DAG, SchedModel);
2908       Cand.setBest(TryCand);
2909       DEBUG(traceCandidate(Cand));
2910     }
2911   }
2912 }
2913 
2914 /// Pick the best candidate node from either the top or bottom queue.
2915 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2916   // Schedule as far as possible in the direction of no choice. This is most
2917   // efficient, but also provides the best heuristics for CriticalPSets.
2918   if (SUnit *SU = Bot.pickOnlyChoice()) {
2919     IsTopNode = false;
2920     DEBUG(dbgs() << "Pick Bot ONLY1\n");
2921     return SU;
2922   }
2923   if (SUnit *SU = Top.pickOnlyChoice()) {
2924     IsTopNode = true;
2925     DEBUG(dbgs() << "Pick Top ONLY1\n");
2926     return SU;
2927   }
2928   CandPolicy NoPolicy;
2929   SchedCandidate BotCand(NoPolicy);
2930   SchedCandidate TopCand(NoPolicy);
2931   // Set the bottom-up policy based on the state of the current bottom zone and
2932   // the instructions outside the zone, including the top zone.
2933   setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
2934   // Set the top-down policy based on the state of the current top zone and
2935   // the instructions outside the zone, including the bottom zone.
2936   setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
2937 
2938   // Prefer bottom scheduling when heuristics are silent.
2939   pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2940   assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2941 
2942   // If either Q has a single candidate that provides the least increase in
2943   // Excess pressure, we can immediately schedule from that Q.
2944   //
2945   // RegionCriticalPSets summarizes the pressure within the scheduled region and
2946   // affects picking from either Q. If scheduling in one direction must
2947   // increase pressure for one of the excess PSets, then schedule in that
2948   // direction first to provide more freedom in the other direction.
2949   if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2950       || (BotCand.Reason == RegCritical
2951           && !BotCand.isRepeat(RegCritical)))
2952   {
2953     IsTopNode = false;
2954     tracePick(BotCand, IsTopNode);
2955     return BotCand.SU;
2956   }
2957   // Check if the top Q has a better candidate.
2958   pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2959   assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2960 
2961   // Choose the queue with the most important (lowest enum) reason.
2962   if (TopCand.Reason < BotCand.Reason) {
2963     IsTopNode = true;
2964     tracePick(TopCand, IsTopNode);
2965     return TopCand.SU;
2966   }
2967   // Otherwise prefer the bottom candidate, in node order if all else failed.
2968   IsTopNode = false;
2969   tracePick(BotCand, IsTopNode);
2970   return BotCand.SU;
2971 }
2972 
2973 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2974 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
2975   if (DAG->top() == DAG->bottom()) {
2976     assert(Top.Available.empty() && Top.Pending.empty() &&
2977            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2978     return nullptr;
2979   }
2980   SUnit *SU;
2981   do {
2982     if (RegionPolicy.OnlyTopDown) {
2983       SU = Top.pickOnlyChoice();
2984       if (!SU) {
2985         CandPolicy NoPolicy;
2986         SchedCandidate TopCand(NoPolicy);
2987         pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2988         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2989         tracePick(TopCand, true);
2990         SU = TopCand.SU;
2991       }
2992       IsTopNode = true;
2993     }
2994     else if (RegionPolicy.OnlyBottomUp) {
2995       SU = Bot.pickOnlyChoice();
2996       if (!SU) {
2997         CandPolicy NoPolicy;
2998         SchedCandidate BotCand(NoPolicy);
2999         pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
3000         assert(BotCand.Reason != NoCand && "failed to find a candidate");
3001         tracePick(BotCand, false);
3002         SU = BotCand.SU;
3003       }
3004       IsTopNode = false;
3005     }
3006     else {
3007       SU = pickNodeBidirectional(IsTopNode);
3008     }
3009   } while (SU->isScheduled);
3010 
3011   if (SU->isTopReady())
3012     Top.removeReady(SU);
3013   if (SU->isBottomReady())
3014     Bot.removeReady(SU);
3015 
3016   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3017   return SU;
3018 }
3019 
3020 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
3021 
3022   MachineBasicBlock::iterator InsertPos = SU->getInstr();
3023   if (!isTop)
3024     ++InsertPos;
3025   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3026 
3027   // Find already scheduled copies with a single physreg dependence and move
3028   // them just above the scheduled instruction.
3029   for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
3030        I != E; ++I) {
3031     if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
3032       continue;
3033     SUnit *DepSU = I->getSUnit();
3034     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3035       continue;
3036     MachineInstr *Copy = DepSU->getInstr();
3037     if (!Copy->isCopy())
3038       continue;
3039     DEBUG(dbgs() << "  Rescheduling physreg copy ";
3040           I->getSUnit()->dump(DAG));
3041     DAG->moveInstruction(Copy, InsertPos);
3042   }
3043 }
3044 
3045 /// Update the scheduler's state after scheduling a node. This is the same node
3046 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3047 /// update it's state based on the current cycle before MachineSchedStrategy
3048 /// does.
3049 ///
3050 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3051 /// them here. See comments in biasPhysRegCopy.
3052 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3053   if (IsTopNode) {
3054     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3055     Top.bumpNode(SU);
3056     if (SU->hasPhysRegUses)
3057       reschedulePhysRegCopies(SU, true);
3058   }
3059   else {
3060     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3061     Bot.bumpNode(SU);
3062     if (SU->hasPhysRegDefs)
3063       reschedulePhysRegCopies(SU, false);
3064   }
3065 }
3066 
3067 /// Create the standard converging machine scheduler. This will be used as the
3068 /// default scheduler if the target does not set a default.
3069 static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
3070   ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
3071   // Register DAG post-processors.
3072   //
3073   // FIXME: extend the mutation API to allow earlier mutations to instantiate
3074   // data and pass it to later mutations. Have a single mutation that gathers
3075   // the interesting nodes in one pass.
3076   DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
3077   if (EnableMemOpCluster) {
3078     if (DAG->TII->enableClusterLoads())
3079       DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
3080     if (DAG->TII->enableClusterStores())
3081       DAG->addMutation(make_unique<StoreClusterMutation>(DAG->TII, DAG->TRI));
3082   }
3083   if (EnableMacroFusion)
3084     DAG->addMutation(make_unique<MacroFusion>(*DAG->TII, *DAG->TRI));
3085   return DAG;
3086 }
3087 
3088 static MachineSchedRegistry
3089 GenericSchedRegistry("converge", "Standard converging scheduler.",
3090                      createGenericSchedLive);
3091 
3092 //===----------------------------------------------------------------------===//
3093 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3094 //===----------------------------------------------------------------------===//
3095 
3096 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3097   DAG = Dag;
3098   SchedModel = DAG->getSchedModel();
3099   TRI = DAG->TRI;
3100 
3101   Rem.init(DAG, SchedModel);
3102   Top.init(DAG, SchedModel, &Rem);
3103   BotRoots.clear();
3104 
3105   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3106   // or are disabled, then these HazardRecs will be disabled.
3107   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3108   if (!Top.HazardRec) {
3109     Top.HazardRec =
3110         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3111             Itin, DAG);
3112   }
3113 }
3114 
3115 
3116 void PostGenericScheduler::registerRoots() {
3117   Rem.CriticalPath = DAG->ExitSU.getDepth();
3118 
3119   // Some roots may not feed into ExitSU. Check all of them in case.
3120   for (SmallVectorImpl<SUnit*>::const_iterator
3121          I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3122     if ((*I)->getDepth() > Rem.CriticalPath)
3123       Rem.CriticalPath = (*I)->getDepth();
3124   }
3125   DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3126   if (DumpCriticalPathLength) {
3127     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3128   }
3129 }
3130 
3131 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
3132 ///
3133 /// \param Cand provides the policy and current best candidate.
3134 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3135 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3136                                         SchedCandidate &TryCand) {
3137 
3138   // Initialize the candidate if needed.
3139   if (!Cand.isValid()) {
3140     TryCand.Reason = NodeOrder;
3141     return;
3142   }
3143 
3144   // Prioritize instructions that read unbuffered resources by stall cycles.
3145   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3146               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3147     return;
3148 
3149   // Avoid critical resource consumption and balance the schedule.
3150   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3151               TryCand, Cand, ResourceReduce))
3152     return;
3153   if (tryGreater(TryCand.ResDelta.DemandedResources,
3154                  Cand.ResDelta.DemandedResources,
3155                  TryCand, Cand, ResourceDemand))
3156     return;
3157 
3158   // Avoid serializing long latency dependence chains.
3159   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3160     return;
3161   }
3162 
3163   // Fall through to original instruction order.
3164   if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3165     TryCand.Reason = NodeOrder;
3166 }
3167 
3168 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3169   ReadyQueue &Q = Top.Available;
3170 
3171   DEBUG(Q.dump());
3172 
3173   for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3174     SchedCandidate TryCand(Cand.Policy);
3175     TryCand.SU = *I;
3176     TryCand.initResourceDelta(DAG, SchedModel);
3177     tryCandidate(Cand, TryCand);
3178     if (TryCand.Reason != NoCand) {
3179       Cand.setBest(TryCand);
3180       DEBUG(traceCandidate(Cand));
3181     }
3182   }
3183 }
3184 
3185 /// Pick the next node to schedule.
3186 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3187   if (DAG->top() == DAG->bottom()) {
3188     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3189     return nullptr;
3190   }
3191   SUnit *SU;
3192   do {
3193     SU = Top.pickOnlyChoice();
3194     if (!SU) {
3195       CandPolicy NoPolicy;
3196       SchedCandidate TopCand(NoPolicy);
3197       // Set the top-down policy based on the state of the current top zone and
3198       // the instructions outside the zone, including the bottom zone.
3199       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3200       pickNodeFromQueue(TopCand);
3201       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3202       tracePick(TopCand, true);
3203       SU = TopCand.SU;
3204     }
3205   } while (SU->isScheduled);
3206 
3207   IsTopNode = true;
3208   Top.removeReady(SU);
3209 
3210   DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3211   return SU;
3212 }
3213 
3214 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3215 /// scheduled/remaining flags in the DAG nodes.
3216 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3217   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3218   Top.bumpNode(SU);
3219 }
3220 
3221 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
3222 static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
3223   return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
3224 }
3225 
3226 //===----------------------------------------------------------------------===//
3227 // ILP Scheduler. Currently for experimental analysis of heuristics.
3228 //===----------------------------------------------------------------------===//
3229 
3230 namespace {
3231 /// \brief Order nodes by the ILP metric.
3232 struct ILPOrder {
3233   const SchedDFSResult *DFSResult;
3234   const BitVector *ScheduledTrees;
3235   bool MaximizeILP;
3236 
3237   ILPOrder(bool MaxILP)
3238     : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
3239 
3240   /// \brief Apply a less-than relation on node priority.
3241   ///
3242   /// (Return true if A comes after B in the Q.)
3243   bool operator()(const SUnit *A, const SUnit *B) const {
3244     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3245     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3246     if (SchedTreeA != SchedTreeB) {
3247       // Unscheduled trees have lower priority.
3248       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3249         return ScheduledTrees->test(SchedTreeB);
3250 
3251       // Trees with shallower connections have have lower priority.
3252       if (DFSResult->getSubtreeLevel(SchedTreeA)
3253           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3254         return DFSResult->getSubtreeLevel(SchedTreeA)
3255           < DFSResult->getSubtreeLevel(SchedTreeB);
3256       }
3257     }
3258     if (MaximizeILP)
3259       return DFSResult->getILP(A) < DFSResult->getILP(B);
3260     else
3261       return DFSResult->getILP(A) > DFSResult->getILP(B);
3262   }
3263 };
3264 
3265 /// \brief Schedule based on the ILP metric.
3266 class ILPScheduler : public MachineSchedStrategy {
3267   ScheduleDAGMILive *DAG;
3268   ILPOrder Cmp;
3269 
3270   std::vector<SUnit*> ReadyQ;
3271 public:
3272   ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
3273 
3274   void initialize(ScheduleDAGMI *dag) override {
3275     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3276     DAG = static_cast<ScheduleDAGMILive*>(dag);
3277     DAG->computeDFSResult();
3278     Cmp.DFSResult = DAG->getDFSResult();
3279     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3280     ReadyQ.clear();
3281   }
3282 
3283   void registerRoots() override {
3284     // Restore the heap in ReadyQ with the updated DFS results.
3285     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3286   }
3287 
3288   /// Implement MachineSchedStrategy interface.
3289   /// -----------------------------------------
3290 
3291   /// Callback to select the highest priority node from the ready Q.
3292   SUnit *pickNode(bool &IsTopNode) override {
3293     if (ReadyQ.empty()) return nullptr;
3294     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3295     SUnit *SU = ReadyQ.back();
3296     ReadyQ.pop_back();
3297     IsTopNode = false;
3298     DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3299           << " ILP: " << DAG->getDFSResult()->getILP(SU)
3300           << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3301           << DAG->getDFSResult()->getSubtreeLevel(
3302             DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3303           << "Scheduling " << *SU->getInstr());
3304     return SU;
3305   }
3306 
3307   /// \brief Scheduler callback to notify that a new subtree is scheduled.
3308   void scheduleTree(unsigned SubtreeID) override {
3309     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3310   }
3311 
3312   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3313   /// DFSResults, and resort the priority Q.
3314   void schedNode(SUnit *SU, bool IsTopNode) override {
3315     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3316   }
3317 
3318   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3319 
3320   void releaseBottomNode(SUnit *SU) override {
3321     ReadyQ.push_back(SU);
3322     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3323   }
3324 };
3325 } // namespace
3326 
3327 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3328   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
3329 }
3330 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3331   return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
3332 }
3333 static MachineSchedRegistry ILPMaxRegistry(
3334   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3335 static MachineSchedRegistry ILPMinRegistry(
3336   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3337 
3338 //===----------------------------------------------------------------------===//
3339 // Machine Instruction Shuffler for Correctness Testing
3340 //===----------------------------------------------------------------------===//
3341 
3342 #ifndef NDEBUG
3343 namespace {
3344 /// Apply a less-than relation on the node order, which corresponds to the
3345 /// instruction order prior to scheduling. IsReverse implements greater-than.
3346 template<bool IsReverse>
3347 struct SUnitOrder {
3348   bool operator()(SUnit *A, SUnit *B) const {
3349     if (IsReverse)
3350       return A->NodeNum > B->NodeNum;
3351     else
3352       return A->NodeNum < B->NodeNum;
3353   }
3354 };
3355 
3356 /// Reorder instructions as much as possible.
3357 class InstructionShuffler : public MachineSchedStrategy {
3358   bool IsAlternating;
3359   bool IsTopDown;
3360 
3361   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3362   // gives nodes with a higher number higher priority causing the latest
3363   // instructions to be scheduled first.
3364   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3365     TopQ;
3366   // When scheduling bottom-up, use greater-than as the queue priority.
3367   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3368     BottomQ;
3369 public:
3370   InstructionShuffler(bool alternate, bool topdown)
3371     : IsAlternating(alternate), IsTopDown(topdown) {}
3372 
3373   void initialize(ScheduleDAGMI*) override {
3374     TopQ.clear();
3375     BottomQ.clear();
3376   }
3377 
3378   /// Implement MachineSchedStrategy interface.
3379   /// -----------------------------------------
3380 
3381   SUnit *pickNode(bool &IsTopNode) override {
3382     SUnit *SU;
3383     if (IsTopDown) {
3384       do {
3385         if (TopQ.empty()) return nullptr;
3386         SU = TopQ.top();
3387         TopQ.pop();
3388       } while (SU->isScheduled);
3389       IsTopNode = true;
3390     }
3391     else {
3392       do {
3393         if (BottomQ.empty()) return nullptr;
3394         SU = BottomQ.top();
3395         BottomQ.pop();
3396       } while (SU->isScheduled);
3397       IsTopNode = false;
3398     }
3399     if (IsAlternating)
3400       IsTopDown = !IsTopDown;
3401     return SU;
3402   }
3403 
3404   void schedNode(SUnit *SU, bool IsTopNode) override {}
3405 
3406   void releaseTopNode(SUnit *SU) override {
3407     TopQ.push(SU);
3408   }
3409   void releaseBottomNode(SUnit *SU) override {
3410     BottomQ.push(SU);
3411   }
3412 };
3413 } // namespace
3414 
3415 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3416   bool Alternate = !ForceTopDown && !ForceBottomUp;
3417   bool TopDown = !ForceBottomUp;
3418   assert((TopDown || !ForceTopDown) &&
3419          "-misched-topdown incompatible with -misched-bottomup");
3420   return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
3421 }
3422 static MachineSchedRegistry ShufflerRegistry(
3423   "shuffle", "Shuffle machine instructions alternating directions",
3424   createInstructionShuffler);
3425 #endif // !NDEBUG
3426 
3427 //===----------------------------------------------------------------------===//
3428 // GraphWriter support for ScheduleDAGMILive.
3429 //===----------------------------------------------------------------------===//
3430 
3431 #ifndef NDEBUG
3432 namespace llvm {
3433 
3434 template<> struct GraphTraits<
3435   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3436 
3437 template<>
3438 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3439 
3440   DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3441 
3442   static std::string getGraphName(const ScheduleDAG *G) {
3443     return G->MF.getName();
3444   }
3445 
3446   static bool renderGraphFromBottomUp() {
3447     return true;
3448   }
3449 
3450   static bool isNodeHidden(const SUnit *Node) {
3451     if (ViewMISchedCutoff == 0)
3452       return false;
3453     return (Node->Preds.size() > ViewMISchedCutoff
3454          || Node->Succs.size() > ViewMISchedCutoff);
3455   }
3456 
3457   /// If you want to override the dot attributes printed for a particular
3458   /// edge, override this method.
3459   static std::string getEdgeAttributes(const SUnit *Node,
3460                                        SUnitIterator EI,
3461                                        const ScheduleDAG *Graph) {
3462     if (EI.isArtificialDep())
3463       return "color=cyan,style=dashed";
3464     if (EI.isCtrlDep())
3465       return "color=blue,style=dashed";
3466     return "";
3467   }
3468 
3469   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3470     std::string Str;
3471     raw_string_ostream SS(Str);
3472     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3473     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3474       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3475     SS << "SU:" << SU->NodeNum;
3476     if (DFS)
3477       SS << " I:" << DFS->getNumInstrs(SU);
3478     return SS.str();
3479   }
3480   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3481     return G->getGraphNodeLabel(SU);
3482   }
3483 
3484   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3485     std::string Str("shape=Mrecord");
3486     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3487     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3488       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3489     if (DFS) {
3490       Str += ",style=filled,fillcolor=\"#";
3491       Str += DOT::getColorString(DFS->getSubtreeID(N));
3492       Str += '"';
3493     }
3494     return Str;
3495   }
3496 };
3497 } // namespace llvm
3498 #endif // NDEBUG
3499 
3500 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3501 /// rendered using 'dot'.
3502 ///
3503 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3504 #ifndef NDEBUG
3505   ViewGraph(this, Name, false, Title);
3506 #else
3507   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3508          << "systems with Graphviz or gv!\n";
3509 #endif  // NDEBUG
3510 }
3511 
3512 /// Out-of-line implementation with no arguments is handy for gdb.
3513 void ScheduleDAGMI::viewGraph() {
3514   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3515 }
3516