1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 18 #include "llvm/CodeGen/MachineScheduler.h" 19 #include "llvm/CodeGen/Passes.h" 20 #include "llvm/CodeGen/RegisterClassInfo.h" 21 #include "llvm/CodeGen/RegisterPressure.h" 22 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 24 #include "llvm/Target/TargetInstrInfo.h" 25 #include "llvm/MC/MCInstrItineraries.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/ADT/OwningPtr.h" 32 #include "llvm/ADT/PriorityQueue.h" 33 34 #include <queue> 35 36 using namespace llvm; 37 38 static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 39 cl::desc("Force top-down list scheduling")); 40 static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 41 cl::desc("Force bottom-up list scheduling")); 42 43 #ifndef NDEBUG 44 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 45 cl::desc("Pop up a window to show MISched dags after they are processed")); 46 47 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 48 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 49 #else 50 static bool ViewMISchedDAGs = false; 51 #endif // NDEBUG 52 53 //===----------------------------------------------------------------------===// 54 // Machine Instruction Scheduling Pass and Registry 55 //===----------------------------------------------------------------------===// 56 57 MachineSchedContext::MachineSchedContext(): 58 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 59 RegClassInfo = new RegisterClassInfo(); 60 } 61 62 MachineSchedContext::~MachineSchedContext() { 63 delete RegClassInfo; 64 } 65 66 namespace { 67 /// MachineScheduler runs after coalescing and before register allocation. 68 class MachineScheduler : public MachineSchedContext, 69 public MachineFunctionPass { 70 public: 71 MachineScheduler(); 72 73 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 74 75 virtual void releaseMemory() {} 76 77 virtual bool runOnMachineFunction(MachineFunction&); 78 79 virtual void print(raw_ostream &O, const Module* = 0) const; 80 81 static char ID; // Class identification, replacement for typeinfo 82 }; 83 } // namespace 84 85 char MachineScheduler::ID = 0; 86 87 char &llvm::MachineSchedulerID = MachineScheduler::ID; 88 89 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 90 "Machine Instruction Scheduler", false, false) 91 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 92 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 93 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 94 INITIALIZE_PASS_END(MachineScheduler, "misched", 95 "Machine Instruction Scheduler", false, false) 96 97 MachineScheduler::MachineScheduler() 98 : MachineFunctionPass(ID) { 99 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 100 } 101 102 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 103 AU.setPreservesCFG(); 104 AU.addRequiredID(MachineDominatorsID); 105 AU.addRequired<MachineLoopInfo>(); 106 AU.addRequired<AliasAnalysis>(); 107 AU.addRequired<TargetPassConfig>(); 108 AU.addRequired<SlotIndexes>(); 109 AU.addPreserved<SlotIndexes>(); 110 AU.addRequired<LiveIntervals>(); 111 AU.addPreserved<LiveIntervals>(); 112 MachineFunctionPass::getAnalysisUsage(AU); 113 } 114 115 MachinePassRegistry MachineSchedRegistry::Registry; 116 117 /// A dummy default scheduler factory indicates whether the scheduler 118 /// is overridden on the command line. 119 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 120 return 0; 121 } 122 123 /// MachineSchedOpt allows command line selection of the scheduler. 124 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 125 RegisterPassParser<MachineSchedRegistry> > 126 MachineSchedOpt("misched", 127 cl::init(&useDefaultMachineSched), cl::Hidden, 128 cl::desc("Machine instruction scheduler to use")); 129 130 static MachineSchedRegistry 131 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 132 useDefaultMachineSched); 133 134 /// Forward declare the standard machine scheduler. This will be used as the 135 /// default scheduler if the target does not set a default. 136 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 137 138 139 /// Decrement this iterator until reaching the top or a non-debug instr. 140 static MachineBasicBlock::iterator 141 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 142 assert(I != Beg && "reached the top of the region, cannot decrement"); 143 while (--I != Beg) { 144 if (!I->isDebugValue()) 145 break; 146 } 147 return I; 148 } 149 150 /// If this iterator is a debug value, increment until reaching the End or a 151 /// non-debug instruction. 152 static MachineBasicBlock::iterator 153 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 154 for(; I != End; ++I) { 155 if (!I->isDebugValue()) 156 break; 157 } 158 return I; 159 } 160 161 /// Top-level MachineScheduler pass driver. 162 /// 163 /// Visit blocks in function order. Divide each block into scheduling regions 164 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 165 /// consistent with the DAG builder, which traverses the interior of the 166 /// scheduling regions bottom-up. 167 /// 168 /// This design avoids exposing scheduling boundaries to the DAG builder, 169 /// simplifying the DAG builder's support for "special" target instructions. 170 /// At the same time the design allows target schedulers to operate across 171 /// scheduling boundaries, for example to bundle the boudary instructions 172 /// without reordering them. This creates complexity, because the target 173 /// scheduler must update the RegionBegin and RegionEnd positions cached by 174 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 175 /// design would be to split blocks at scheduling boundaries, but LLVM has a 176 /// general bias against block splitting purely for implementation simplicity. 177 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 178 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 179 180 // Initialize the context of the pass. 181 MF = &mf; 182 MLI = &getAnalysis<MachineLoopInfo>(); 183 MDT = &getAnalysis<MachineDominatorTree>(); 184 PassConfig = &getAnalysis<TargetPassConfig>(); 185 AA = &getAnalysis<AliasAnalysis>(); 186 187 LIS = &getAnalysis<LiveIntervals>(); 188 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 189 190 RegClassInfo->runOnMachineFunction(*MF); 191 192 // Select the scheduler, or set the default. 193 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 194 if (Ctor == useDefaultMachineSched) { 195 // Get the default scheduler set by the target. 196 Ctor = MachineSchedRegistry::getDefault(); 197 if (!Ctor) { 198 Ctor = createConvergingSched; 199 MachineSchedRegistry::setDefault(Ctor); 200 } 201 } 202 // Instantiate the selected scheduler. 203 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 204 205 // Visit all machine basic blocks. 206 // 207 // TODO: Visit blocks in global postorder or postorder within the bottom-up 208 // loop tree. Then we can optionally compute global RegPressure. 209 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 210 MBB != MBBEnd; ++MBB) { 211 212 Scheduler->startBlock(MBB); 213 214 // Break the block into scheduling regions [I, RegionEnd), and schedule each 215 // region as soon as it is discovered. RegionEnd points the the scheduling 216 // boundary at the bottom of the region. The DAG does not include RegionEnd, 217 // but the region does (i.e. the next RegionEnd is above the previous 218 // RegionBegin). If the current block has no terminator then RegionEnd == 219 // MBB->end() for the bottom region. 220 // 221 // The Scheduler may insert instructions during either schedule() or 222 // exitRegion(), even for empty regions. So the local iterators 'I' and 223 // 'RegionEnd' are invalid across these calls. 224 unsigned RemainingCount = MBB->size(); 225 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 226 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 227 228 // Avoid decrementing RegionEnd for blocks with no terminator. 229 if (RegionEnd != MBB->end() 230 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 231 --RegionEnd; 232 // Count the boundary instruction. 233 --RemainingCount; 234 } 235 236 // The next region starts above the previous region. Look backward in the 237 // instruction stream until we find the nearest boundary. 238 MachineBasicBlock::iterator I = RegionEnd; 239 for(;I != MBB->begin(); --I, --RemainingCount) { 240 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 241 break; 242 } 243 // Notify the scheduler of the region, even if we may skip scheduling 244 // it. Perhaps it still needs to be bundled. 245 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount); 246 247 // Skip empty scheduling regions (0 or 1 schedulable instructions). 248 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 249 // Close the current region. Bundle the terminator if needed. 250 // This invalidates 'RegionEnd' and 'I'. 251 Scheduler->exitRegion(); 252 continue; 253 } 254 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 255 DEBUG(dbgs() << MF->getFunction()->getName() 256 << ":BB#" << MBB->getNumber() << "\n From: " << *I << " To: "; 257 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 258 else dbgs() << "End"; 259 dbgs() << " Remaining: " << RemainingCount << "\n"); 260 261 // Schedule a region: possibly reorder instructions. 262 // This invalidates 'RegionEnd' and 'I'. 263 Scheduler->schedule(); 264 265 // Close the current region. 266 Scheduler->exitRegion(); 267 268 // Scheduling has invalidated the current iterator 'I'. Ask the 269 // scheduler for the top of it's scheduled region. 270 RegionEnd = Scheduler->begin(); 271 } 272 assert(RemainingCount == 0 && "Instruction count mismatch!"); 273 Scheduler->finishBlock(); 274 } 275 Scheduler->finalizeSchedule(); 276 DEBUG(LIS->print(dbgs())); 277 return true; 278 } 279 280 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 281 // unimplemented 282 } 283 284 //===----------------------------------------------------------------------===// 285 // MachineSchedStrategy - Interface to a machine scheduling algorithm. 286 //===----------------------------------------------------------------------===// 287 288 namespace { 289 class ScheduleDAGMI; 290 291 /// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected 292 /// scheduling algorithm. 293 /// 294 /// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it 295 /// in ScheduleDAGInstrs.h 296 class MachineSchedStrategy { 297 public: 298 virtual ~MachineSchedStrategy() {} 299 300 /// Initialize the strategy after building the DAG for a new region. 301 virtual void initialize(ScheduleDAGMI *DAG) = 0; 302 303 /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to 304 /// schedule the node at the top of the unscheduled region. Otherwise it will 305 /// be scheduled at the bottom. 306 virtual SUnit *pickNode(bool &IsTopNode) = 0; 307 308 /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node. 309 virtual void schedNode(SUnit *SU, bool IsTopNode) = 0; 310 311 /// When all predecessor dependencies have been resolved, free this node for 312 /// top-down scheduling. 313 virtual void releaseTopNode(SUnit *SU) = 0; 314 /// When all successor dependencies have been resolved, free this node for 315 /// bottom-up scheduling. 316 virtual void releaseBottomNode(SUnit *SU) = 0; 317 }; 318 } // namespace 319 320 //===----------------------------------------------------------------------===// 321 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 322 // preservation. 323 //===----------------------------------------------------------------------===// 324 325 namespace { 326 /// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules 327 /// machine instructions while updating LiveIntervals. 328 class ScheduleDAGMI : public ScheduleDAGInstrs { 329 AliasAnalysis *AA; 330 RegisterClassInfo *RegClassInfo; 331 MachineSchedStrategy *SchedImpl; 332 333 MachineBasicBlock::iterator LiveRegionEnd; 334 335 /// Register pressure in this region computed by buildSchedGraph. 336 IntervalPressure RegPressure; 337 RegPressureTracker RPTracker; 338 339 /// List of pressure sets that exceed the target's pressure limit before 340 /// scheduling, listed in increasing set ID order. Each pressure set is paired 341 /// with its max pressure in the currently scheduled regions. 342 std::vector<PressureElement> RegionCriticalPSets; 343 344 /// The top of the unscheduled zone. 345 MachineBasicBlock::iterator CurrentTop; 346 IntervalPressure TopPressure; 347 RegPressureTracker TopRPTracker; 348 349 /// The bottom of the unscheduled zone. 350 MachineBasicBlock::iterator CurrentBottom; 351 IntervalPressure BotPressure; 352 RegPressureTracker BotRPTracker; 353 354 /// The number of instructions scheduled so far. Used to cut off the 355 /// scheduler at the point determined by misched-cutoff. 356 unsigned NumInstrsScheduled; 357 public: 358 ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S): 359 ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS), 360 AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S), 361 RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure), 362 CurrentBottom(), BotRPTracker(BotPressure), NumInstrsScheduled(0) {} 363 364 ~ScheduleDAGMI() { 365 delete SchedImpl; 366 } 367 368 MachineBasicBlock::iterator top() const { return CurrentTop; } 369 MachineBasicBlock::iterator bottom() const { return CurrentBottom; } 370 371 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling 372 /// region. This covers all instructions in a block, while schedule() may only 373 /// cover a subset. 374 void enterRegion(MachineBasicBlock *bb, 375 MachineBasicBlock::iterator begin, 376 MachineBasicBlock::iterator end, 377 unsigned endcount); 378 379 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of 380 /// reorderable instructions. 381 void schedule(); 382 383 /// Get current register pressure for the top scheduled instructions. 384 const IntervalPressure &getTopPressure() const { return TopPressure; } 385 const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; } 386 387 /// Get current register pressure for the bottom scheduled instructions. 388 const IntervalPressure &getBotPressure() const { return BotPressure; } 389 const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; } 390 391 /// Get register pressure for the entire scheduling region before scheduling. 392 const IntervalPressure &getRegPressure() const { return RegPressure; } 393 394 const std::vector<PressureElement> &getRegionCriticalPSets() const { 395 return RegionCriticalPSets; 396 } 397 398 /// getIssueWidth - Return the max instructions per scheduling group. 399 /// 400 unsigned getIssueWidth() const { 401 return InstrItins ? InstrItins->Props.IssueWidth : 1; 402 } 403 404 protected: 405 void initRegPressure(); 406 void updateScheduledPressure(std::vector<unsigned> NewMaxPressure); 407 408 void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos); 409 bool checkSchedLimit(); 410 411 void releaseRoots(); 412 413 void releaseSucc(SUnit *SU, SDep *SuccEdge); 414 void releaseSuccessors(SUnit *SU); 415 void releasePred(SUnit *SU, SDep *PredEdge); 416 void releasePredecessors(SUnit *SU); 417 418 void placeDebugValues(); 419 }; 420 } // namespace 421 422 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 423 /// NumPredsLeft reaches zero, release the successor node. 424 /// 425 /// FIXME: Adjust SuccSU height based on MinLatency. 426 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 427 SUnit *SuccSU = SuccEdge->getSUnit(); 428 429 #ifndef NDEBUG 430 if (SuccSU->NumPredsLeft == 0) { 431 dbgs() << "*** Scheduling failed! ***\n"; 432 SuccSU->dump(this); 433 dbgs() << " has been released too many times!\n"; 434 llvm_unreachable(0); 435 } 436 #endif 437 --SuccSU->NumPredsLeft; 438 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 439 SchedImpl->releaseTopNode(SuccSU); 440 } 441 442 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 443 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 444 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 445 I != E; ++I) { 446 releaseSucc(SU, &*I); 447 } 448 } 449 450 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 451 /// NumSuccsLeft reaches zero, release the predecessor node. 452 /// 453 /// FIXME: Adjust PredSU height based on MinLatency. 454 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 455 SUnit *PredSU = PredEdge->getSUnit(); 456 457 #ifndef NDEBUG 458 if (PredSU->NumSuccsLeft == 0) { 459 dbgs() << "*** Scheduling failed! ***\n"; 460 PredSU->dump(this); 461 dbgs() << " has been released too many times!\n"; 462 llvm_unreachable(0); 463 } 464 #endif 465 --PredSU->NumSuccsLeft; 466 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 467 SchedImpl->releaseBottomNode(PredSU); 468 } 469 470 /// releasePredecessors - Call releasePred on each of SU's predecessors. 471 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 472 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 473 I != E; ++I) { 474 releasePred(SU, &*I); 475 } 476 } 477 478 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 479 MachineBasicBlock::iterator InsertPos) { 480 // Advance RegionBegin if the first instruction moves down. 481 if (&*RegionBegin == MI) 482 ++RegionBegin; 483 484 // Update the instruction stream. 485 BB->splice(InsertPos, BB, MI); 486 487 // Update LiveIntervals 488 LIS->handleMove(MI); 489 490 // Recede RegionBegin if an instruction moves above the first. 491 if (RegionBegin == InsertPos) 492 RegionBegin = MI; 493 } 494 495 bool ScheduleDAGMI::checkSchedLimit() { 496 #ifndef NDEBUG 497 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 498 CurrentTop = CurrentBottom; 499 return false; 500 } 501 ++NumInstrsScheduled; 502 #endif 503 return true; 504 } 505 506 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 507 /// crossing a scheduling boundary. [begin, end) includes all instructions in 508 /// the region, including the boundary itself and single-instruction regions 509 /// that don't get scheduled. 510 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 511 MachineBasicBlock::iterator begin, 512 MachineBasicBlock::iterator end, 513 unsigned endcount) 514 { 515 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 516 517 // For convenience remember the end of the liveness region. 518 LiveRegionEnd = 519 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 520 } 521 522 // Setup the register pressure trackers for the top scheduled top and bottom 523 // scheduled regions. 524 void ScheduleDAGMI::initRegPressure() { 525 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 526 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 527 528 // Close the RPTracker to finalize live ins. 529 RPTracker.closeRegion(); 530 531 DEBUG(RPTracker.getPressure().dump(TRI)); 532 533 // Initialize the live ins and live outs. 534 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 535 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 536 537 // Close one end of the tracker so we can call 538 // getMaxUpward/DownwardPressureDelta before advancing across any 539 // instructions. This converts currently live regs into live ins/outs. 540 TopRPTracker.closeTop(); 541 BotRPTracker.closeBottom(); 542 543 // Account for liveness generated by the region boundary. 544 if (LiveRegionEnd != RegionEnd) 545 BotRPTracker.recede(); 546 547 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 548 549 // Cache the list of excess pressure sets in this region. This will also track 550 // the max pressure in the scheduled code for these sets. 551 RegionCriticalPSets.clear(); 552 std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure; 553 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 554 unsigned Limit = TRI->getRegPressureSetLimit(i); 555 if (RegionPressure[i] > Limit) 556 RegionCriticalPSets.push_back(PressureElement(i, 0)); 557 } 558 DEBUG(dbgs() << "Excess PSets: "; 559 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 560 dbgs() << TRI->getRegPressureSetName( 561 RegionCriticalPSets[i].PSetID) << " "; 562 dbgs() << "\n"); 563 } 564 565 // FIXME: When the pressure tracker deals in pressure differences then we won't 566 // iterate over all RegionCriticalPSets[i]. 567 void ScheduleDAGMI:: 568 updateScheduledPressure(std::vector<unsigned> NewMaxPressure) { 569 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 570 unsigned ID = RegionCriticalPSets[i].PSetID; 571 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 572 if ((int)NewMaxPressure[ID] > MaxUnits) 573 MaxUnits = NewMaxPressure[ID]; 574 } 575 } 576 577 // Release all DAG roots for scheduling. 578 void ScheduleDAGMI::releaseRoots() { 579 SmallVector<SUnit*, 16> BotRoots; 580 581 for (std::vector<SUnit>::iterator 582 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 583 // A SUnit is ready to top schedule if it has no predecessors. 584 if (I->Preds.empty()) 585 SchedImpl->releaseTopNode(&(*I)); 586 // A SUnit is ready to bottom schedule if it has no successors. 587 if (I->Succs.empty()) 588 BotRoots.push_back(&(*I)); 589 } 590 // Release bottom roots in reverse order so the higher priority nodes appear 591 // first. This is more natural and slightly more efficient. 592 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 593 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) 594 SchedImpl->releaseBottomNode(*I); 595 } 596 597 /// schedule - Called back from MachineScheduler::runOnMachineFunction 598 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 599 /// only includes instructions that have DAG nodes, not scheduling boundaries. 600 void ScheduleDAGMI::schedule() { 601 // Initialize the register pressure tracker used by buildSchedGraph. 602 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 603 604 // Account for liveness generate by the region boundary. 605 if (LiveRegionEnd != RegionEnd) 606 RPTracker.recede(); 607 608 // Build the DAG, and compute current register pressure. 609 buildSchedGraph(AA, &RPTracker); 610 611 // Initialize top/bottom trackers after computing region pressure. 612 initRegPressure(); 613 614 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 615 SUnits[su].dumpAll(this)); 616 617 if (ViewMISchedDAGs) viewGraph(); 618 619 SchedImpl->initialize(this); 620 621 // Release edges from the special Entry node or to the special Exit node. 622 releaseSuccessors(&EntrySU); 623 releasePredecessors(&ExitSU); 624 625 // Release all DAG roots for scheduling. 626 releaseRoots(); 627 628 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 629 CurrentBottom = RegionEnd; 630 bool IsTopNode = false; 631 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 632 if (!checkSchedLimit()) 633 break; 634 635 // Move the instruction to its new location in the instruction stream. 636 MachineInstr *MI = SU->getInstr(); 637 638 if (IsTopNode) { 639 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 640 if (&*CurrentTop == MI) 641 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 642 else { 643 moveInstruction(MI, CurrentTop); 644 TopRPTracker.setPos(MI); 645 } 646 647 // Update top scheduled pressure. 648 TopRPTracker.advance(); 649 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 650 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 651 652 // Release dependent instructions for scheduling. 653 releaseSuccessors(SU); 654 } 655 else { 656 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 657 MachineBasicBlock::iterator priorII = 658 priorNonDebug(CurrentBottom, CurrentTop); 659 if (&*priorII == MI) 660 CurrentBottom = priorII; 661 else { 662 if (&*CurrentTop == MI) { 663 CurrentTop = nextIfDebug(++CurrentTop, priorII); 664 TopRPTracker.setPos(CurrentTop); 665 } 666 moveInstruction(MI, CurrentBottom); 667 CurrentBottom = MI; 668 } 669 // Update bottom scheduled pressure. 670 BotRPTracker.recede(); 671 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 672 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 673 674 // Release dependent instructions for scheduling. 675 releasePredecessors(SU); 676 } 677 SU->isScheduled = true; 678 SchedImpl->schedNode(SU, IsTopNode); 679 } 680 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 681 682 placeDebugValues(); 683 } 684 685 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 686 void ScheduleDAGMI::placeDebugValues() { 687 // If first instruction was a DBG_VALUE then put it back. 688 if (FirstDbgValue) { 689 BB->splice(RegionBegin, BB, FirstDbgValue); 690 RegionBegin = FirstDbgValue; 691 } 692 693 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 694 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 695 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 696 MachineInstr *DbgValue = P.first; 697 MachineBasicBlock::iterator OrigPrevMI = P.second; 698 BB->splice(++OrigPrevMI, BB, DbgValue); 699 if (OrigPrevMI == llvm::prior(RegionEnd)) 700 RegionEnd = DbgValue; 701 } 702 DbgValues.clear(); 703 FirstDbgValue = NULL; 704 } 705 706 //===----------------------------------------------------------------------===// 707 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. 708 //===----------------------------------------------------------------------===// 709 710 namespace { 711 /// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience 712 /// methods for pushing and removing nodes. ReadyQueue's are uniquely identified 713 /// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in. 714 class ReadyQueue { 715 unsigned ID; 716 std::string Name; 717 std::vector<SUnit*> Queue; 718 719 public: 720 ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {} 721 722 unsigned getID() const { return ID; } 723 724 StringRef getName() const { return Name; } 725 726 // SU is in this queue if it's NodeQueueID is a superset of this ID. 727 bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); } 728 729 bool empty() const { return Queue.empty(); } 730 731 unsigned size() const { return Queue.size(); } 732 733 typedef std::vector<SUnit*>::iterator iterator; 734 735 iterator begin() { return Queue.begin(); } 736 737 iterator end() { return Queue.end(); } 738 739 iterator find(SUnit *SU) { 740 return std::find(Queue.begin(), Queue.end(), SU); 741 } 742 743 void push(SUnit *SU) { 744 Queue.push_back(SU); 745 SU->NodeQueueId |= ID; 746 } 747 748 void remove(iterator I) { 749 (*I)->NodeQueueId &= ~ID; 750 *I = Queue.back(); 751 Queue.pop_back(); 752 } 753 754 void dump() { 755 dbgs() << Name << ": "; 756 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 757 dbgs() << Queue[i]->NodeNum << " "; 758 dbgs() << "\n"; 759 } 760 }; 761 762 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 763 /// the schedule. 764 class ConvergingScheduler : public MachineSchedStrategy { 765 766 /// Store the state used by ConvergingScheduler heuristics, required for the 767 /// lifetime of one invocation of pickNode(). 768 struct SchedCandidate { 769 // The best SUnit candidate. 770 SUnit *SU; 771 772 // Register pressure values for the best candidate. 773 RegPressureDelta RPDelta; 774 775 SchedCandidate(): SU(NULL) {} 776 }; 777 /// Represent the type of SchedCandidate found within a single queue. 778 enum CandResult { 779 NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure }; 780 781 /// Each Scheduling boundary is associated with ready queues. It tracks the 782 /// current cycle in whichever direction at has moved, and maintains the state 783 /// of "hazards" and other interlocks at the current cycle. 784 struct SchedBoundary { 785 ReadyQueue Available; 786 ReadyQueue Pending; 787 bool CheckPending; 788 789 ScheduleHazardRecognizer *HazardRec; 790 791 unsigned CurrCycle; 792 unsigned IssueCount; 793 794 /// MinReadyCycle - Cycle of the soonest available instruction. 795 unsigned MinReadyCycle; 796 797 // Remember the greatest min operand latency. 798 unsigned MaxMinLatency; 799 800 /// Pending queues extend the ready queues with the same ID and the 801 /// PendingFlag set. 802 SchedBoundary(unsigned ID, const Twine &Name): 803 Available(ID, Name+".A"), 804 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 805 CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0), 806 MinReadyCycle(UINT_MAX), MaxMinLatency(0) {} 807 808 ~SchedBoundary() { delete HazardRec; } 809 810 bool isTop() const { 811 return Available.getID() == ConvergingScheduler::TopQID; 812 } 813 814 void releaseNode(SUnit *SU, unsigned ReadyCycle); 815 816 void bumpCycle(); 817 818 void bumpNode(SUnit *SU, unsigned IssueWidth); 819 820 void releasePending(); 821 822 void removeReady(SUnit *SU); 823 824 SUnit *pickOnlyChoice(); 825 }; 826 827 ScheduleDAGMI *DAG; 828 const TargetRegisterInfo *TRI; 829 830 // State of the top and bottom scheduled instruction boundaries. 831 SchedBoundary Top; 832 SchedBoundary Bot; 833 834 public: 835 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 836 enum { 837 TopQID = 1, 838 BotQID = 2, 839 LogMaxQID = 2 840 }; 841 842 ConvergingScheduler(): 843 DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 844 845 virtual void initialize(ScheduleDAGMI *dag); 846 847 virtual SUnit *pickNode(bool &IsTopNode); 848 849 virtual void schedNode(SUnit *SU, bool IsTopNode); 850 851 virtual void releaseTopNode(SUnit *SU); 852 853 virtual void releaseBottomNode(SUnit *SU); 854 855 protected: 856 SUnit *pickNodeBidrectional(bool &IsTopNode); 857 858 CandResult pickNodeFromQueue(ReadyQueue &Q, 859 const RegPressureTracker &RPTracker, 860 SchedCandidate &Candidate); 861 #ifndef NDEBUG 862 void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU, 863 PressureElement P = PressureElement()); 864 #endif 865 }; 866 } // namespace 867 868 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 869 DAG = dag; 870 TRI = DAG->TRI; 871 872 // Initialize the HazardRecognizers. 873 const TargetMachine &TM = DAG->MF.getTarget(); 874 const InstrItineraryData *Itin = TM.getInstrItineraryData(); 875 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 876 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 877 878 assert((!ForceTopDown || !ForceBottomUp) && 879 "-misched-topdown incompatible with -misched-bottomup"); 880 } 881 882 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 883 if (SU->isScheduled) 884 return; 885 886 for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 887 I != E; ++I) { 888 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 889 unsigned Latency = 890 DAG->computeOperandLatency(I->getSUnit(), SU, *I, /*FindMin=*/true); 891 #ifndef NDEBUG 892 Top.MaxMinLatency = std::max(Latency, Top.MaxMinLatency); 893 #endif 894 if (SU->TopReadyCycle < PredReadyCycle + Latency) 895 SU->TopReadyCycle = PredReadyCycle + Latency; 896 } 897 Top.releaseNode(SU, SU->TopReadyCycle); 898 } 899 900 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 901 if (SU->isScheduled) 902 return; 903 904 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 905 906 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 907 I != E; ++I) { 908 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 909 unsigned Latency = 910 DAG->computeOperandLatency(SU, I->getSUnit(), *I, /*FindMin=*/true); 911 #ifndef NDEBUG 912 Bot.MaxMinLatency = std::max(Latency, Bot.MaxMinLatency); 913 #endif 914 if (SU->BotReadyCycle < SuccReadyCycle + Latency) 915 SU->BotReadyCycle = SuccReadyCycle + Latency; 916 } 917 Bot.releaseNode(SU, SU->BotReadyCycle); 918 } 919 920 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 921 unsigned ReadyCycle) { 922 if (ReadyCycle < MinReadyCycle) 923 MinReadyCycle = ReadyCycle; 924 925 // Check for interlocks first. For the purpose of other heuristics, an 926 // instruction that cannot issue appears as if it's not in the ReadyQueue. 927 if (ReadyCycle > CurrCycle 928 || (HazardRec->isEnabled() && (HazardRec->getHazardType(SU) 929 != ScheduleHazardRecognizer::NoHazard))) 930 Pending.push(SU); 931 else 932 Available.push(SU); 933 } 934 935 /// Move the boundary of scheduled code by one cycle. 936 void ConvergingScheduler::SchedBoundary::bumpCycle() { 937 IssueCount = 0; 938 939 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 940 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle); 941 942 if (!HazardRec->isEnabled()) { 943 // Bypass HazardRec virtual calls. 944 CurrCycle = NextCycle; 945 } 946 else { 947 // Bypass getHazardType calls in case of long latency. 948 for (; CurrCycle != NextCycle; ++CurrCycle) { 949 if (isTop()) 950 HazardRec->AdvanceCycle(); 951 else 952 HazardRec->RecedeCycle(); 953 } 954 } 955 CheckPending = true; 956 957 DEBUG(dbgs() << "*** " << Available.getName() << " cycle " 958 << CurrCycle << '\n'); 959 } 960 961 /// Move the boundary of scheduled code by one SUnit. 962 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU, 963 unsigned IssueWidth) { 964 // Update the reservation table. 965 if (HazardRec->isEnabled()) { 966 if (!isTop() && SU->isCall) { 967 // Calls are scheduled with their preceding instructions. For bottom-up 968 // scheduling, clear the pipeline state before emitting. 969 HazardRec->Reset(); 970 } 971 HazardRec->EmitInstruction(SU); 972 } 973 // Check the instruction group size limit. 974 ++IssueCount; 975 if (IssueCount == IssueWidth) { 976 DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n'); 977 bumpCycle(); 978 } 979 } 980 981 /// Release pending ready nodes in to the available queue. This makes them 982 /// visible to heuristics. 983 void ConvergingScheduler::SchedBoundary::releasePending() { 984 // If the available queue is empty, it is safe to reset MinReadyCycle. 985 if (Available.empty()) 986 MinReadyCycle = UINT_MAX; 987 988 // Check to see if any of the pending instructions are ready to issue. If 989 // so, add them to the available queue. 990 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 991 SUnit *SU = *(Pending.begin()+i); 992 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 993 994 if (ReadyCycle < MinReadyCycle) 995 MinReadyCycle = ReadyCycle; 996 997 if (ReadyCycle > CurrCycle) 998 continue; 999 1000 if (HazardRec->isEnabled() 1001 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) 1002 continue; 1003 1004 Available.push(SU); 1005 Pending.remove(Pending.begin()+i); 1006 --i; --e; 1007 } 1008 CheckPending = false; 1009 } 1010 1011 /// Remove SU from the ready set for this boundary. 1012 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 1013 if (Available.isInQueue(SU)) 1014 Available.remove(Available.find(SU)); 1015 else { 1016 assert(Pending.isInQueue(SU) && "bad ready count"); 1017 Pending.remove(Pending.find(SU)); 1018 } 1019 } 1020 1021 /// If this queue only has one ready candidate, return it. As a side effect, 1022 /// advance the cycle until at least one node is ready. If multiple instructions 1023 /// are ready, return NULL. 1024 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 1025 if (CheckPending) 1026 releasePending(); 1027 1028 for (unsigned i = 0; Available.empty(); ++i) { 1029 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && 1030 "permanent hazard"); (void)i; 1031 bumpCycle(); 1032 releasePending(); 1033 } 1034 if (Available.size() == 1) 1035 return *Available.begin(); 1036 return NULL; 1037 } 1038 1039 #ifndef NDEBUG 1040 void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q, 1041 SUnit *SU, PressureElement P) { 1042 dbgs() << Label << " " << Q.getName() << " "; 1043 if (P.isValid()) 1044 dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease 1045 << " "; 1046 else 1047 dbgs() << " "; 1048 SU->dump(DAG); 1049 } 1050 #endif 1051 1052 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 1053 /// more desirable than RHS from scheduling standpoint. 1054 static bool compareRPDelta(const RegPressureDelta &LHS, 1055 const RegPressureDelta &RHS) { 1056 // Compare each component of pressure in decreasing order of importance 1057 // without checking if any are valid. Invalid PressureElements are assumed to 1058 // have UnitIncrease==0, so are neutral. 1059 1060 // Avoid increasing the max critical pressure in the scheduled region. 1061 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) 1062 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 1063 1064 // Avoid increasing the max critical pressure in the scheduled region. 1065 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) 1066 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 1067 1068 // Avoid increasing the max pressure of the entire region. 1069 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) 1070 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 1071 1072 return false; 1073 } 1074 1075 /// Pick the best candidate from the top queue. 1076 /// 1077 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 1078 /// DAG building. To adjust for the current scheduling location we need to 1079 /// maintain the number of vreg uses remaining to be top-scheduled. 1080 ConvergingScheduler::CandResult ConvergingScheduler:: 1081 pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker, 1082 SchedCandidate &Candidate) { 1083 DEBUG(Q.dump()); 1084 1085 // getMaxPressureDelta temporarily modifies the tracker. 1086 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 1087 1088 // BestSU remains NULL if no top candidates beat the best existing candidate. 1089 CandResult FoundCandidate = NoCand; 1090 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 1091 RegPressureDelta RPDelta; 1092 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta, 1093 DAG->getRegionCriticalPSets(), 1094 DAG->getRegPressure().MaxSetPressure); 1095 1096 // Initialize the candidate if needed. 1097 if (!Candidate.SU) { 1098 Candidate.SU = *I; 1099 Candidate.RPDelta = RPDelta; 1100 FoundCandidate = NodeOrder; 1101 continue; 1102 } 1103 // Avoid exceeding the target's limit. 1104 if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) { 1105 DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess)); 1106 Candidate.SU = *I; 1107 Candidate.RPDelta = RPDelta; 1108 FoundCandidate = SingleExcess; 1109 continue; 1110 } 1111 if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease) 1112 continue; 1113 if (FoundCandidate == SingleExcess) 1114 FoundCandidate = MultiPressure; 1115 1116 // Avoid increasing the max critical pressure in the scheduled region. 1117 if (RPDelta.CriticalMax.UnitIncrease 1118 < Candidate.RPDelta.CriticalMax.UnitIncrease) { 1119 DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax)); 1120 Candidate.SU = *I; 1121 Candidate.RPDelta = RPDelta; 1122 FoundCandidate = SingleCritical; 1123 continue; 1124 } 1125 if (RPDelta.CriticalMax.UnitIncrease 1126 > Candidate.RPDelta.CriticalMax.UnitIncrease) 1127 continue; 1128 if (FoundCandidate == SingleCritical) 1129 FoundCandidate = MultiPressure; 1130 1131 // Avoid increasing the max pressure of the entire region. 1132 if (RPDelta.CurrentMax.UnitIncrease 1133 < Candidate.RPDelta.CurrentMax.UnitIncrease) { 1134 DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax)); 1135 Candidate.SU = *I; 1136 Candidate.RPDelta = RPDelta; 1137 FoundCandidate = SingleMax; 1138 continue; 1139 } 1140 if (RPDelta.CurrentMax.UnitIncrease 1141 > Candidate.RPDelta.CurrentMax.UnitIncrease) 1142 continue; 1143 if (FoundCandidate == SingleMax) 1144 FoundCandidate = MultiPressure; 1145 1146 // Fall through to original instruction order. 1147 // Only consider node order if Candidate was chosen from this Q. 1148 if (FoundCandidate == NoCand) 1149 continue; 1150 1151 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum) 1152 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) { 1153 DEBUG(traceCandidate("NCAND", Q, *I)); 1154 Candidate.SU = *I; 1155 Candidate.RPDelta = RPDelta; 1156 FoundCandidate = NodeOrder; 1157 } 1158 } 1159 return FoundCandidate; 1160 } 1161 1162 /// Pick the best candidate node from either the top or bottom queue. 1163 SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) { 1164 // Schedule as far as possible in the direction of no choice. This is most 1165 // efficient, but also provides the best heuristics for CriticalPSets. 1166 if (SUnit *SU = Bot.pickOnlyChoice()) { 1167 IsTopNode = false; 1168 return SU; 1169 } 1170 if (SUnit *SU = Top.pickOnlyChoice()) { 1171 IsTopNode = true; 1172 return SU; 1173 } 1174 SchedCandidate BotCand; 1175 // Prefer bottom scheduling when heuristics are silent. 1176 CandResult BotResult = pickNodeFromQueue(Bot.Available, 1177 DAG->getBotRPTracker(), BotCand); 1178 assert(BotResult != NoCand && "failed to find the first candidate"); 1179 1180 // If either Q has a single candidate that provides the least increase in 1181 // Excess pressure, we can immediately schedule from that Q. 1182 // 1183 // RegionCriticalPSets summarizes the pressure within the scheduled region and 1184 // affects picking from either Q. If scheduling in one direction must 1185 // increase pressure for one of the excess PSets, then schedule in that 1186 // direction first to provide more freedom in the other direction. 1187 if (BotResult == SingleExcess || BotResult == SingleCritical) { 1188 IsTopNode = false; 1189 return BotCand.SU; 1190 } 1191 // Check if the top Q has a better candidate. 1192 SchedCandidate TopCand; 1193 CandResult TopResult = pickNodeFromQueue(Top.Available, 1194 DAG->getTopRPTracker(), TopCand); 1195 assert(TopResult != NoCand && "failed to find the first candidate"); 1196 1197 if (TopResult == SingleExcess || TopResult == SingleCritical) { 1198 IsTopNode = true; 1199 return TopCand.SU; 1200 } 1201 // If either Q has a single candidate that minimizes pressure above the 1202 // original region's pressure pick it. 1203 if (BotResult == SingleMax) { 1204 IsTopNode = false; 1205 return BotCand.SU; 1206 } 1207 if (TopResult == SingleMax) { 1208 IsTopNode = true; 1209 return TopCand.SU; 1210 } 1211 // Check for a salient pressure difference and pick the best from either side. 1212 if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { 1213 IsTopNode = true; 1214 return TopCand.SU; 1215 } 1216 // Otherwise prefer the bottom candidate in node order. 1217 IsTopNode = false; 1218 return BotCand.SU; 1219 } 1220 1221 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 1222 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 1223 if (DAG->top() == DAG->bottom()) { 1224 assert(Top.Available.empty() && Top.Pending.empty() && 1225 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 1226 return NULL; 1227 } 1228 SUnit *SU; 1229 if (ForceTopDown) { 1230 SU = Top.pickOnlyChoice(); 1231 if (!SU) { 1232 SchedCandidate TopCand; 1233 CandResult TopResult = 1234 pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand); 1235 assert(TopResult != NoCand && "failed to find the first candidate"); 1236 (void)TopResult; 1237 SU = TopCand.SU; 1238 } 1239 IsTopNode = true; 1240 } 1241 else if (ForceBottomUp) { 1242 SU = Bot.pickOnlyChoice(); 1243 if (!SU) { 1244 SchedCandidate BotCand; 1245 CandResult BotResult = 1246 pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand); 1247 assert(BotResult != NoCand && "failed to find the first candidate"); 1248 (void)BotResult; 1249 SU = BotCand.SU; 1250 } 1251 IsTopNode = false; 1252 } 1253 else { 1254 SU = pickNodeBidrectional(IsTopNode); 1255 } 1256 if (SU->isTopReady()) 1257 Top.removeReady(SU); 1258 if (SU->isBottomReady()) 1259 Bot.removeReady(SU); 1260 1261 DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom") 1262 << " Scheduling Instruction in cycle " 1263 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n'; 1264 SU->dump(DAG)); 1265 return SU; 1266 } 1267 1268 /// Update the scheduler's state after scheduling a node. This is the same node 1269 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 1270 /// it's state based on the current cycle before MachineSchedStrategy does. 1271 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 1272 if (IsTopNode) { 1273 SU->TopReadyCycle = Top.CurrCycle; 1274 Top.bumpNode(SU, DAG->getIssueWidth()); 1275 } 1276 else { 1277 SU->BotReadyCycle = Bot.CurrCycle; 1278 Bot.bumpNode(SU, DAG->getIssueWidth()); 1279 } 1280 } 1281 1282 /// Create the standard converging machine scheduler. This will be used as the 1283 /// default scheduler if the target does not set a default. 1284 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 1285 assert((!ForceTopDown || !ForceBottomUp) && 1286 "-misched-topdown incompatible with -misched-bottomup"); 1287 return new ScheduleDAGMI(C, new ConvergingScheduler()); 1288 } 1289 static MachineSchedRegistry 1290 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 1291 createConvergingSched); 1292 1293 //===----------------------------------------------------------------------===// 1294 // Machine Instruction Shuffler for Correctness Testing 1295 //===----------------------------------------------------------------------===// 1296 1297 #ifndef NDEBUG 1298 namespace { 1299 /// Apply a less-than relation on the node order, which corresponds to the 1300 /// instruction order prior to scheduling. IsReverse implements greater-than. 1301 template<bool IsReverse> 1302 struct SUnitOrder { 1303 bool operator()(SUnit *A, SUnit *B) const { 1304 if (IsReverse) 1305 return A->NodeNum > B->NodeNum; 1306 else 1307 return A->NodeNum < B->NodeNum; 1308 } 1309 }; 1310 1311 /// Reorder instructions as much as possible. 1312 class InstructionShuffler : public MachineSchedStrategy { 1313 bool IsAlternating; 1314 bool IsTopDown; 1315 1316 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 1317 // gives nodes with a higher number higher priority causing the latest 1318 // instructions to be scheduled first. 1319 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 1320 TopQ; 1321 // When scheduling bottom-up, use greater-than as the queue priority. 1322 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 1323 BottomQ; 1324 public: 1325 InstructionShuffler(bool alternate, bool topdown) 1326 : IsAlternating(alternate), IsTopDown(topdown) {} 1327 1328 virtual void initialize(ScheduleDAGMI *) { 1329 TopQ.clear(); 1330 BottomQ.clear(); 1331 } 1332 1333 /// Implement MachineSchedStrategy interface. 1334 /// ----------------------------------------- 1335 1336 virtual SUnit *pickNode(bool &IsTopNode) { 1337 SUnit *SU; 1338 if (IsTopDown) { 1339 do { 1340 if (TopQ.empty()) return NULL; 1341 SU = TopQ.top(); 1342 TopQ.pop(); 1343 } while (SU->isScheduled); 1344 IsTopNode = true; 1345 } 1346 else { 1347 do { 1348 if (BottomQ.empty()) return NULL; 1349 SU = BottomQ.top(); 1350 BottomQ.pop(); 1351 } while (SU->isScheduled); 1352 IsTopNode = false; 1353 } 1354 if (IsAlternating) 1355 IsTopDown = !IsTopDown; 1356 return SU; 1357 } 1358 1359 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 1360 1361 virtual void releaseTopNode(SUnit *SU) { 1362 TopQ.push(SU); 1363 } 1364 virtual void releaseBottomNode(SUnit *SU) { 1365 BottomQ.push(SU); 1366 } 1367 }; 1368 } // namespace 1369 1370 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 1371 bool Alternate = !ForceTopDown && !ForceBottomUp; 1372 bool TopDown = !ForceBottomUp; 1373 assert((TopDown || !ForceTopDown) && 1374 "-misched-topdown incompatible with -misched-bottomup"); 1375 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 1376 } 1377 static MachineSchedRegistry ShufflerRegistry( 1378 "shuffle", "Shuffle machine instructions alternating directions", 1379 createInstructionShuffler); 1380 #endif // !NDEBUG 1381