1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements a top-down list scheduler, using standard algorithms. 11 // The basic approach uses a priority queue of available nodes to schedule. 12 // One at a time, nodes are taken from the priority queue (thus in priority 13 // order), checked for legality to schedule, and emitted if legal. 14 // 15 // Nodes may not be legal to schedule either due to structural hazards (e.g. 16 // pipeline or resource constraints) or because an input to the instruction has 17 // not completed execution. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #define DEBUG_TYPE "post-RA-sched" 22 #include "llvm/CodeGen/Passes.h" 23 #include "AggressiveAntiDepBreaker.h" 24 #include "AntiDepBreaker.h" 25 #include "CriticalAntiDepBreaker.h" 26 #include "llvm/ADT/BitVector.h" 27 #include "llvm/ADT/Statistic.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/CodeGen/LatencyPriorityQueue.h" 30 #include "llvm/CodeGen/MachineDominators.h" 31 #include "llvm/CodeGen/MachineFrameInfo.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineLoopInfo.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/RegisterClassInfo.h" 37 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 38 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 39 #include "llvm/CodeGen/SchedulerRegistry.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetInstrInfo.h" 45 #include "llvm/Target/TargetLowering.h" 46 #include "llvm/Target/TargetMachine.h" 47 #include "llvm/Target/TargetRegisterInfo.h" 48 #include "llvm/Target/TargetSubtargetInfo.h" 49 using namespace llvm; 50 51 STATISTIC(NumNoops, "Number of noops inserted"); 52 STATISTIC(NumStalls, "Number of pipeline stalls"); 53 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 54 55 // Post-RA scheduling is enabled with 56 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 57 // override the target. 58 static cl::opt<bool> 59 EnablePostRAScheduler("post-RA-scheduler", 60 cl::desc("Enable scheduling after register allocation"), 61 cl::init(false), cl::Hidden); 62 static cl::opt<std::string> 63 EnableAntiDepBreaking("break-anti-dependencies", 64 cl::desc("Break post-RA scheduling anti-dependencies: " 65 "\"critical\", \"all\", or \"none\""), 66 cl::init("none"), cl::Hidden); 67 68 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 69 static cl::opt<int> 70 DebugDiv("postra-sched-debugdiv", 71 cl::desc("Debug control MBBs that are scheduled"), 72 cl::init(0), cl::Hidden); 73 static cl::opt<int> 74 DebugMod("postra-sched-debugmod", 75 cl::desc("Debug control MBBs that are scheduled"), 76 cl::init(0), cl::Hidden); 77 78 AntiDepBreaker::~AntiDepBreaker() { } 79 80 namespace { 81 class PostRAScheduler : public MachineFunctionPass { 82 const TargetInstrInfo *TII; 83 RegisterClassInfo RegClassInfo; 84 85 public: 86 static char ID; 87 PostRAScheduler() : MachineFunctionPass(ID) {} 88 89 void getAnalysisUsage(AnalysisUsage &AU) const { 90 AU.setPreservesCFG(); 91 AU.addRequired<AliasAnalysis>(); 92 AU.addRequired<TargetPassConfig>(); 93 AU.addRequired<MachineDominatorTree>(); 94 AU.addPreserved<MachineDominatorTree>(); 95 AU.addRequired<MachineLoopInfo>(); 96 AU.addPreserved<MachineLoopInfo>(); 97 MachineFunctionPass::getAnalysisUsage(AU); 98 } 99 100 bool runOnMachineFunction(MachineFunction &Fn); 101 }; 102 char PostRAScheduler::ID = 0; 103 104 class SchedulePostRATDList : public ScheduleDAGInstrs { 105 /// AvailableQueue - The priority queue to use for the available SUnits. 106 /// 107 LatencyPriorityQueue AvailableQueue; 108 109 /// PendingQueue - This contains all of the instructions whose operands have 110 /// been issued, but their results are not ready yet (due to the latency of 111 /// the operation). Once the operands becomes available, the instruction is 112 /// added to the AvailableQueue. 113 std::vector<SUnit*> PendingQueue; 114 115 /// HazardRec - The hazard recognizer to use. 116 ScheduleHazardRecognizer *HazardRec; 117 118 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 119 AntiDepBreaker *AntiDepBreak; 120 121 /// AA - AliasAnalysis for making memory reference queries. 122 AliasAnalysis *AA; 123 124 /// LiveRegs - true if the register is live. 125 BitVector LiveRegs; 126 127 /// The schedule. Null SUnit*'s represent noop instructions. 128 std::vector<SUnit*> Sequence; 129 130 public: 131 SchedulePostRATDList( 132 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 133 AliasAnalysis *AA, const RegisterClassInfo&, 134 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 135 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs); 136 137 ~SchedulePostRATDList(); 138 139 /// startBlock - Initialize register live-range state for scheduling in 140 /// this block. 141 /// 142 void startBlock(MachineBasicBlock *BB); 143 144 /// Initialize the scheduler state for the next scheduling region. 145 virtual void enterRegion(MachineBasicBlock *bb, 146 MachineBasicBlock::iterator begin, 147 MachineBasicBlock::iterator end, 148 unsigned endcount); 149 150 /// Notify that the scheduler has finished scheduling the current region. 151 virtual void exitRegion(); 152 153 /// Schedule - Schedule the instruction range using list scheduling. 154 /// 155 void schedule(); 156 157 void EmitSchedule(); 158 159 /// Observe - Update liveness information to account for the current 160 /// instruction, which will not be scheduled. 161 /// 162 void Observe(MachineInstr *MI, unsigned Count); 163 164 /// finishBlock - Clean up register live-range state. 165 /// 166 void finishBlock(); 167 168 /// FixupKills - Fix register kill flags that have been made 169 /// invalid due to scheduling 170 /// 171 void FixupKills(MachineBasicBlock *MBB); 172 173 private: 174 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 175 void ReleaseSuccessors(SUnit *SU); 176 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 177 void ListScheduleTopDown(); 178 void StartBlockForKills(MachineBasicBlock *BB); 179 180 // ToggleKillFlag - Toggle a register operand kill flag. Other 181 // adjustments may be made to the instruction if necessary. Return 182 // true if the operand has been deleted, false if not. 183 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 184 185 void dumpSchedule() const; 186 }; 187 } 188 189 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 190 191 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 192 "Post RA top-down list latency scheduler", false, false) 193 194 SchedulePostRATDList::SchedulePostRATDList( 195 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 196 AliasAnalysis *AA, const RegisterClassInfo &RCI, 197 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 198 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs) 199 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), AA(AA), 200 LiveRegs(TRI->getNumRegs()) 201 { 202 const TargetMachine &TM = MF.getTarget(); 203 const InstrItineraryData *InstrItins = TM.getInstrItineraryData(); 204 HazardRec = 205 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this); 206 207 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE || 208 MRI.tracksLiveness()) && 209 "Live-ins must be accurate for anti-dependency breaking"); 210 AntiDepBreak = 211 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 212 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 213 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 214 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL)); 215 } 216 217 SchedulePostRATDList::~SchedulePostRATDList() { 218 delete HazardRec; 219 delete AntiDepBreak; 220 } 221 222 /// Initialize state associated with the next scheduling region. 223 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb, 224 MachineBasicBlock::iterator begin, 225 MachineBasicBlock::iterator end, 226 unsigned endcount) { 227 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 228 Sequence.clear(); 229 } 230 231 /// Print the schedule before exiting the region. 232 void SchedulePostRATDList::exitRegion() { 233 DEBUG({ 234 dbgs() << "*** Final schedule ***\n"; 235 dumpSchedule(); 236 dbgs() << '\n'; 237 }); 238 ScheduleDAGInstrs::exitRegion(); 239 } 240 241 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 242 /// dumpSchedule - dump the scheduled Sequence. 243 void SchedulePostRATDList::dumpSchedule() const { 244 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 245 if (SUnit *SU = Sequence[i]) 246 SU->dump(this); 247 else 248 dbgs() << "**** NOOP ****\n"; 249 } 250 } 251 #endif 252 253 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 254 TII = Fn.getTarget().getInstrInfo(); 255 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 256 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 257 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 258 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 259 260 RegClassInfo.runOnMachineFunction(Fn); 261 262 // Check for explicit enable/disable of post-ra scheduling. 263 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 264 TargetSubtargetInfo::ANTIDEP_NONE; 265 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; 266 if (EnablePostRAScheduler.getPosition() > 0) { 267 if (!EnablePostRAScheduler) 268 return false; 269 } else { 270 // Check that post-RA scheduling is enabled for this target. 271 // This may upgrade the AntiDepMode. 272 const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 273 if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode, 274 CriticalPathRCs)) 275 return false; 276 } 277 278 // Check for antidep breaking override... 279 if (EnableAntiDepBreaking.getPosition() > 0) { 280 AntiDepMode = (EnableAntiDepBreaking == "all") 281 ? TargetSubtargetInfo::ANTIDEP_ALL 282 : ((EnableAntiDepBreaking == "critical") 283 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 284 : TargetSubtargetInfo::ANTIDEP_NONE); 285 } 286 287 DEBUG(dbgs() << "PostRAScheduler\n"); 288 289 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 290 CriticalPathRCs); 291 292 // Loop over all of the basic blocks 293 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 294 MBB != MBBe; ++MBB) { 295 #ifndef NDEBUG 296 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 297 if (DebugDiv > 0) { 298 static int bbcnt = 0; 299 if (bbcnt++ % DebugDiv != DebugMod) 300 continue; 301 dbgs() << "*** DEBUG scheduling " << Fn.getName() 302 << ":BB#" << MBB->getNumber() << " ***\n"; 303 } 304 #endif 305 306 // Initialize register live-range state for scheduling in this block. 307 Scheduler.startBlock(MBB); 308 309 // Schedule each sequence of instructions not interrupted by a label 310 // or anything else that effectively needs to shut down scheduling. 311 MachineBasicBlock::iterator Current = MBB->end(); 312 unsigned Count = MBB->size(), CurrentCount = Count; 313 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 314 MachineInstr *MI = llvm::prior(I); 315 // Calls are not scheduling boundaries before register allocation, but 316 // post-ra we don't gain anything by scheduling across calls since we 317 // don't need to worry about register pressure. 318 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { 319 Scheduler.enterRegion(MBB, I, Current, CurrentCount); 320 Scheduler.schedule(); 321 Scheduler.exitRegion(); 322 Scheduler.EmitSchedule(); 323 Current = MI; 324 CurrentCount = Count - 1; 325 Scheduler.Observe(MI, CurrentCount); 326 } 327 I = MI; 328 --Count; 329 if (MI->isBundle()) 330 Count -= MI->getBundleSize(); 331 } 332 assert(Count == 0 && "Instruction count mismatch!"); 333 assert((MBB->begin() == Current || CurrentCount != 0) && 334 "Instruction count mismatch!"); 335 Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount); 336 Scheduler.schedule(); 337 Scheduler.exitRegion(); 338 Scheduler.EmitSchedule(); 339 340 // Clean up register live-range state. 341 Scheduler.finishBlock(); 342 343 // Update register kills 344 Scheduler.FixupKills(MBB); 345 } 346 347 return true; 348 } 349 350 /// StartBlock - Initialize register live-range state for scheduling in 351 /// this block. 352 /// 353 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) { 354 // Call the superclass. 355 ScheduleDAGInstrs::startBlock(BB); 356 357 // Reset the hazard recognizer and anti-dep breaker. 358 HazardRec->Reset(); 359 if (AntiDepBreak != NULL) 360 AntiDepBreak->StartBlock(BB); 361 } 362 363 /// Schedule - Schedule the instruction range using list scheduling. 364 /// 365 void SchedulePostRATDList::schedule() { 366 // Build the scheduling graph. 367 buildSchedGraph(AA); 368 369 if (AntiDepBreak != NULL) { 370 unsigned Broken = 371 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, 372 EndIndex, DbgValues); 373 374 if (Broken != 0) { 375 // We made changes. Update the dependency graph. 376 // Theoretically we could update the graph in place: 377 // When a live range is changed to use a different register, remove 378 // the def's anti-dependence *and* output-dependence edges due to 379 // that register, and add new anti-dependence and output-dependence 380 // edges based on the next live range of the register. 381 ScheduleDAG::clearDAG(); 382 buildSchedGraph(AA); 383 384 NumFixedAnti += Broken; 385 } 386 } 387 388 DEBUG(dbgs() << "********** List Scheduling **********\n"); 389 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 390 SUnits[su].dumpAll(this)); 391 392 AvailableQueue.initNodes(SUnits); 393 ListScheduleTopDown(); 394 AvailableQueue.releaseState(); 395 } 396 397 /// Observe - Update liveness information to account for the current 398 /// instruction, which will not be scheduled. 399 /// 400 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 401 if (AntiDepBreak != NULL) 402 AntiDepBreak->Observe(MI, Count, EndIndex); 403 } 404 405 /// FinishBlock - Clean up register live-range state. 406 /// 407 void SchedulePostRATDList::finishBlock() { 408 if (AntiDepBreak != NULL) 409 AntiDepBreak->FinishBlock(); 410 411 // Call the superclass. 412 ScheduleDAGInstrs::finishBlock(); 413 } 414 415 /// StartBlockForKills - Initialize register live-range state for updating kills 416 /// 417 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 418 // Start with no live registers. 419 LiveRegs.reset(); 420 421 // Determine the live-out physregs for this block. 422 if (!BB->empty() && BB->back().isReturn()) { 423 // In a return block, examine the function live-out regs. 424 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 425 E = MRI.liveout_end(); I != E; ++I) { 426 unsigned Reg = *I; 427 LiveRegs.set(Reg); 428 // Repeat, for all subregs. 429 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) 430 LiveRegs.set(*SubRegs); 431 } 432 } 433 else { 434 // In a non-return block, examine the live-in regs of all successors. 435 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 436 SE = BB->succ_end(); SI != SE; ++SI) { 437 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 438 E = (*SI)->livein_end(); I != E; ++I) { 439 unsigned Reg = *I; 440 LiveRegs.set(Reg); 441 // Repeat, for all subregs. 442 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) 443 LiveRegs.set(*SubRegs); 444 } 445 } 446 } 447 } 448 449 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 450 MachineOperand &MO) { 451 // Setting kill flag... 452 if (!MO.isKill()) { 453 MO.setIsKill(true); 454 return false; 455 } 456 457 // If MO itself is live, clear the kill flag... 458 if (LiveRegs.test(MO.getReg())) { 459 MO.setIsKill(false); 460 return false; 461 } 462 463 // If any subreg of MO is live, then create an imp-def for that 464 // subreg and keep MO marked as killed. 465 MO.setIsKill(false); 466 bool AllDead = true; 467 const unsigned SuperReg = MO.getReg(); 468 MachineInstrBuilder MIB(MF, MI); 469 for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) { 470 if (LiveRegs.test(*SubRegs)) { 471 MIB.addReg(*SubRegs, RegState::ImplicitDefine); 472 AllDead = false; 473 } 474 } 475 476 if(AllDead) 477 MO.setIsKill(true); 478 return false; 479 } 480 481 /// FixupKills - Fix the register kill flags, they may have been made 482 /// incorrect by instruction reordering. 483 /// 484 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 485 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 486 487 BitVector killedRegs(TRI->getNumRegs()); 488 489 StartBlockForKills(MBB); 490 491 // Examine block from end to start... 492 unsigned Count = MBB->size(); 493 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 494 I != E; --Count) { 495 MachineInstr *MI = --I; 496 if (MI->isDebugValue()) 497 continue; 498 499 // Update liveness. Registers that are defed but not used in this 500 // instruction are now dead. Mark register and all subregs as they 501 // are completely defined. 502 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 503 MachineOperand &MO = MI->getOperand(i); 504 if (MO.isRegMask()) 505 LiveRegs.clearBitsNotInMask(MO.getRegMask()); 506 if (!MO.isReg()) continue; 507 unsigned Reg = MO.getReg(); 508 if (Reg == 0) continue; 509 if (!MO.isDef()) continue; 510 // Ignore two-addr defs. 511 if (MI->isRegTiedToUseOperand(i)) continue; 512 513 LiveRegs.reset(Reg); 514 515 // Repeat for all subregs. 516 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) 517 LiveRegs.reset(*SubRegs); 518 } 519 520 // Examine all used registers and set/clear kill flag. When a 521 // register is used multiple times we only set the kill flag on 522 // the first use. 523 killedRegs.reset(); 524 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 525 MachineOperand &MO = MI->getOperand(i); 526 if (!MO.isReg() || !MO.isUse()) continue; 527 unsigned Reg = MO.getReg(); 528 if ((Reg == 0) || MRI.isReserved(Reg)) continue; 529 530 bool kill = false; 531 if (!killedRegs.test(Reg)) { 532 kill = true; 533 // A register is not killed if any subregs are live... 534 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) { 535 if (LiveRegs.test(*SubRegs)) { 536 kill = false; 537 break; 538 } 539 } 540 541 // If subreg is not live, then register is killed if it became 542 // live in this instruction 543 if (kill) 544 kill = !LiveRegs.test(Reg); 545 } 546 547 if (MO.isKill() != kill) { 548 DEBUG(dbgs() << "Fixing " << MO << " in "); 549 // Warning: ToggleKillFlag may invalidate MO. 550 ToggleKillFlag(MI, MO); 551 DEBUG(MI->dump()); 552 } 553 554 killedRegs.set(Reg); 555 } 556 557 // Mark any used register (that is not using undef) and subregs as 558 // now live... 559 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 560 MachineOperand &MO = MI->getOperand(i); 561 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 562 unsigned Reg = MO.getReg(); 563 if ((Reg == 0) || MRI.isReserved(Reg)) continue; 564 565 LiveRegs.set(Reg); 566 567 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) 568 LiveRegs.set(*SubRegs); 569 } 570 } 571 } 572 573 //===----------------------------------------------------------------------===// 574 // Top-Down Scheduling 575 //===----------------------------------------------------------------------===// 576 577 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 578 /// the PendingQueue if the count reaches zero. 579 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 580 SUnit *SuccSU = SuccEdge->getSUnit(); 581 582 if (SuccEdge->isWeak()) { 583 --SuccSU->WeakPredsLeft; 584 return; 585 } 586 #ifndef NDEBUG 587 if (SuccSU->NumPredsLeft == 0) { 588 dbgs() << "*** Scheduling failed! ***\n"; 589 SuccSU->dump(this); 590 dbgs() << " has been released too many times!\n"; 591 llvm_unreachable(0); 592 } 593 #endif 594 --SuccSU->NumPredsLeft; 595 596 // Standard scheduler algorithms will recompute the depth of the successor 597 // here as such: 598 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 599 // 600 // However, we lazily compute node depth instead. Note that 601 // ScheduleNodeTopDown has already updated the depth of this node which causes 602 // all descendents to be marked dirty. Setting the successor depth explicitly 603 // here would cause depth to be recomputed for all its ancestors. If the 604 // successor is not yet ready (because of a transitively redundant edge) then 605 // this causes depth computation to be quadratic in the size of the DAG. 606 607 // If all the node's predecessors are scheduled, this node is ready 608 // to be scheduled. Ignore the special ExitSU node. 609 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 610 PendingQueue.push_back(SuccSU); 611 } 612 613 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 614 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 615 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 616 I != E; ++I) { 617 ReleaseSucc(SU, &*I); 618 } 619 } 620 621 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 622 /// count of its successors. If a successor pending count is zero, add it to 623 /// the Available queue. 624 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 625 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 626 DEBUG(SU->dump(this)); 627 628 Sequence.push_back(SU); 629 assert(CurCycle >= SU->getDepth() && 630 "Node scheduled above its depth!"); 631 SU->setDepthToAtLeast(CurCycle); 632 633 ReleaseSuccessors(SU); 634 SU->isScheduled = true; 635 AvailableQueue.scheduledNode(SU); 636 } 637 638 /// ListScheduleTopDown - The main loop of list scheduling for top-down 639 /// schedulers. 640 void SchedulePostRATDList::ListScheduleTopDown() { 641 unsigned CurCycle = 0; 642 643 // We're scheduling top-down but we're visiting the regions in 644 // bottom-up order, so we don't know the hazards at the start of a 645 // region. So assume no hazards (this should usually be ok as most 646 // blocks are a single region). 647 HazardRec->Reset(); 648 649 // Release any successors of the special Entry node. 650 ReleaseSuccessors(&EntrySU); 651 652 // Add all leaves to Available queue. 653 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 654 // It is available if it has no predecessors. 655 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) { 656 AvailableQueue.push(&SUnits[i]); 657 SUnits[i].isAvailable = true; 658 } 659 } 660 661 // In any cycle where we can't schedule any instructions, we must 662 // stall or emit a noop, depending on the target. 663 bool CycleHasInsts = false; 664 665 // While Available queue is not empty, grab the node with the highest 666 // priority. If it is not ready put it back. Schedule the node. 667 std::vector<SUnit*> NotReady; 668 Sequence.reserve(SUnits.size()); 669 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 670 // Check to see if any of the pending instructions are ready to issue. If 671 // so, add them to the available queue. 672 unsigned MinDepth = ~0u; 673 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 674 if (PendingQueue[i]->getDepth() <= CurCycle) { 675 AvailableQueue.push(PendingQueue[i]); 676 PendingQueue[i]->isAvailable = true; 677 PendingQueue[i] = PendingQueue.back(); 678 PendingQueue.pop_back(); 679 --i; --e; 680 } else if (PendingQueue[i]->getDepth() < MinDepth) 681 MinDepth = PendingQueue[i]->getDepth(); 682 } 683 684 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 685 686 SUnit *FoundSUnit = 0; 687 bool HasNoopHazards = false; 688 while (!AvailableQueue.empty()) { 689 SUnit *CurSUnit = AvailableQueue.pop(); 690 691 ScheduleHazardRecognizer::HazardType HT = 692 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 693 if (HT == ScheduleHazardRecognizer::NoHazard) { 694 FoundSUnit = CurSUnit; 695 break; 696 } 697 698 // Remember if this is a noop hazard. 699 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 700 701 NotReady.push_back(CurSUnit); 702 } 703 704 // Add the nodes that aren't ready back onto the available list. 705 if (!NotReady.empty()) { 706 AvailableQueue.push_all(NotReady); 707 NotReady.clear(); 708 } 709 710 // If we found a node to schedule... 711 if (FoundSUnit) { 712 // ... schedule the node... 713 ScheduleNodeTopDown(FoundSUnit, CurCycle); 714 HazardRec->EmitInstruction(FoundSUnit); 715 CycleHasInsts = true; 716 if (HazardRec->atIssueLimit()) { 717 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 718 HazardRec->AdvanceCycle(); 719 ++CurCycle; 720 CycleHasInsts = false; 721 } 722 } else { 723 if (CycleHasInsts) { 724 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 725 HazardRec->AdvanceCycle(); 726 } else if (!HasNoopHazards) { 727 // Otherwise, we have a pipeline stall, but no other problem, 728 // just advance the current cycle and try again. 729 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 730 HazardRec->AdvanceCycle(); 731 ++NumStalls; 732 } else { 733 // Otherwise, we have no instructions to issue and we have instructions 734 // that will fault if we don't do this right. This is the case for 735 // processors without pipeline interlocks and other cases. 736 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 737 HazardRec->EmitNoop(); 738 Sequence.push_back(0); // NULL here means noop 739 ++NumNoops; 740 } 741 742 ++CurCycle; 743 CycleHasInsts = false; 744 } 745 } 746 747 #ifndef NDEBUG 748 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false); 749 unsigned Noops = 0; 750 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 751 if (!Sequence[i]) 752 ++Noops; 753 assert(Sequence.size() - Noops == ScheduledNodes && 754 "The number of nodes scheduled doesn't match the expected number!"); 755 #endif // NDEBUG 756 } 757 758 // EmitSchedule - Emit the machine code in scheduled order. 759 void SchedulePostRATDList::EmitSchedule() { 760 RegionBegin = RegionEnd; 761 762 // If first instruction was a DBG_VALUE then put it back. 763 if (FirstDbgValue) 764 BB->splice(RegionEnd, BB, FirstDbgValue); 765 766 // Then re-insert them according to the given schedule. 767 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 768 if (SUnit *SU = Sequence[i]) 769 BB->splice(RegionEnd, BB, SU->getInstr()); 770 else 771 // Null SUnit* is a noop. 772 TII->insertNoop(*BB, RegionEnd); 773 774 // Update the Begin iterator, as the first instruction in the block 775 // may have been scheduled later. 776 if (i == 0) 777 RegionBegin = prior(RegionEnd); 778 } 779 780 // Reinsert any remaining debug_values. 781 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 782 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 783 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 784 MachineInstr *DbgValue = P.first; 785 MachineBasicBlock::iterator OrigPrivMI = P.second; 786 BB->splice(++OrigPrivMI, BB, DbgValue); 787 } 788 DbgValues.clear(); 789 FirstDbgValue = NULL; 790 } 791