1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements a top-down list scheduler, using standard algorithms. 11 // The basic approach uses a priority queue of available nodes to schedule. 12 // One at a time, nodes are taken from the priority queue (thus in priority 13 // order), checked for legality to schedule, and emitted if legal. 14 // 15 // Nodes may not be legal to schedule either due to structural hazards (e.g. 16 // pipeline or resource constraints) or because an input to the instruction has 17 // not completed execution. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #define DEBUG_TYPE "post-RA-sched" 22 #include "AntiDepBreaker.h" 23 #include "AggressiveAntiDepBreaker.h" 24 #include "CriticalAntiDepBreaker.h" 25 #include "RegisterClassInfo.h" 26 #include "ScheduleDAGInstrs.h" 27 #include "llvm/CodeGen/Passes.h" 28 #include "llvm/CodeGen/LatencyPriorityQueue.h" 29 #include "llvm/CodeGen/SchedulerRegistry.h" 30 #include "llvm/CodeGen/MachineDominators.h" 31 #include "llvm/CodeGen/MachineFrameInfo.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineLoopInfo.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 36 #include "llvm/Analysis/AliasAnalysis.h" 37 #include "llvm/Target/TargetLowering.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/Target/TargetInstrInfo.h" 40 #include "llvm/Target/TargetRegisterInfo.h" 41 #include "llvm/Target/TargetSubtargetInfo.h" 42 #include "llvm/Support/CommandLine.h" 43 #include "llvm/Support/Debug.h" 44 #include "llvm/Support/ErrorHandling.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include "llvm/ADT/BitVector.h" 47 #include "llvm/ADT/Statistic.h" 48 using namespace llvm; 49 50 STATISTIC(NumNoops, "Number of noops inserted"); 51 STATISTIC(NumStalls, "Number of pipeline stalls"); 52 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 53 54 // Post-RA scheduling is enabled with 55 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 56 // override the target. 57 static cl::opt<bool> 58 EnablePostRAScheduler("post-RA-scheduler", 59 cl::desc("Enable scheduling after register allocation"), 60 cl::init(false), cl::Hidden); 61 static cl::opt<std::string> 62 EnableAntiDepBreaking("break-anti-dependencies", 63 cl::desc("Break post-RA scheduling anti-dependencies: " 64 "\"critical\", \"all\", or \"none\""), 65 cl::init("none"), cl::Hidden); 66 67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 68 static cl::opt<int> 69 DebugDiv("postra-sched-debugdiv", 70 cl::desc("Debug control MBBs that are scheduled"), 71 cl::init(0), cl::Hidden); 72 static cl::opt<int> 73 DebugMod("postra-sched-debugmod", 74 cl::desc("Debug control MBBs that are scheduled"), 75 cl::init(0), cl::Hidden); 76 77 AntiDepBreaker::~AntiDepBreaker() { } 78 79 namespace { 80 class PostRAScheduler : public MachineFunctionPass { 81 AliasAnalysis *AA; 82 const TargetInstrInfo *TII; 83 RegisterClassInfo RegClassInfo; 84 85 public: 86 static char ID; 87 PostRAScheduler() : MachineFunctionPass(ID) {} 88 89 void getAnalysisUsage(AnalysisUsage &AU) const { 90 AU.setPreservesCFG(); 91 AU.addRequired<AliasAnalysis>(); 92 AU.addRequired<TargetPassConfig>(); 93 AU.addRequired<MachineDominatorTree>(); 94 AU.addPreserved<MachineDominatorTree>(); 95 AU.addRequired<MachineLoopInfo>(); 96 AU.addPreserved<MachineLoopInfo>(); 97 MachineFunctionPass::getAnalysisUsage(AU); 98 } 99 100 bool runOnMachineFunction(MachineFunction &Fn); 101 }; 102 char PostRAScheduler::ID = 0; 103 104 class SchedulePostRATDList : public ScheduleDAGInstrs { 105 /// AvailableQueue - The priority queue to use for the available SUnits. 106 /// 107 LatencyPriorityQueue AvailableQueue; 108 109 /// PendingQueue - This contains all of the instructions whose operands have 110 /// been issued, but their results are not ready yet (due to the latency of 111 /// the operation). Once the operands becomes available, the instruction is 112 /// added to the AvailableQueue. 113 std::vector<SUnit*> PendingQueue; 114 115 /// Topo - A topological ordering for SUnits. 116 ScheduleDAGTopologicalSort Topo; 117 118 /// HazardRec - The hazard recognizer to use. 119 ScheduleHazardRecognizer *HazardRec; 120 121 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 122 AntiDepBreaker *AntiDepBreak; 123 124 /// AA - AliasAnalysis for making memory reference queries. 125 AliasAnalysis *AA; 126 127 /// LiveRegs - true if the register is live. 128 BitVector LiveRegs; 129 130 public: 131 SchedulePostRATDList( 132 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 133 AliasAnalysis *AA, const RegisterClassInfo&, 134 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 135 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs); 136 137 ~SchedulePostRATDList(); 138 139 /// StartBlock - Initialize register live-range state for scheduling in 140 /// this block. 141 /// 142 void StartBlock(MachineBasicBlock *BB); 143 144 /// Schedule - Schedule the instruction range using list scheduling. 145 /// 146 void Schedule(); 147 148 /// Observe - Update liveness information to account for the current 149 /// instruction, which will not be scheduled. 150 /// 151 void Observe(MachineInstr *MI, unsigned Count); 152 153 /// FinishBlock - Clean up register live-range state. 154 /// 155 void FinishBlock(); 156 157 /// FixupKills - Fix register kill flags that have been made 158 /// invalid due to scheduling 159 /// 160 void FixupKills(MachineBasicBlock *MBB); 161 162 private: 163 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 164 void ReleaseSuccessors(SUnit *SU); 165 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 166 void ListScheduleTopDown(); 167 void StartBlockForKills(MachineBasicBlock *BB); 168 169 // ToggleKillFlag - Toggle a register operand kill flag. Other 170 // adjustments may be made to the instruction if necessary. Return 171 // true if the operand has been deleted, false if not. 172 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 173 }; 174 } 175 176 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 177 178 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 179 "Post RA top-down list latency scheduler", false, false) 180 181 SchedulePostRATDList::SchedulePostRATDList( 182 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 183 AliasAnalysis *AA, const RegisterClassInfo &RCI, 184 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 185 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs) 186 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), Topo(SUnits), AA(AA), 187 LiveRegs(TRI->getNumRegs()) 188 { 189 const TargetMachine &TM = MF.getTarget(); 190 const InstrItineraryData *InstrItins = TM.getInstrItineraryData(); 191 HazardRec = 192 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this); 193 AntiDepBreak = 194 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 195 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 196 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 197 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL)); 198 } 199 200 SchedulePostRATDList::~SchedulePostRATDList() { 201 delete HazardRec; 202 delete AntiDepBreak; 203 } 204 205 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 206 TII = Fn.getTarget().getInstrInfo(); 207 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 208 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 209 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 210 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 211 212 RegClassInfo.runOnMachineFunction(Fn); 213 214 // Check for explicit enable/disable of post-ra scheduling. 215 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 216 TargetSubtargetInfo::ANTIDEP_NONE; 217 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; 218 if (EnablePostRAScheduler.getPosition() > 0) { 219 if (!EnablePostRAScheduler) 220 return false; 221 } else { 222 // Check that post-RA scheduling is enabled for this target. 223 // This may upgrade the AntiDepMode. 224 const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 225 if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode, 226 CriticalPathRCs)) 227 return false; 228 } 229 230 // Check for antidep breaking override... 231 if (EnableAntiDepBreaking.getPosition() > 0) { 232 AntiDepMode = (EnableAntiDepBreaking == "all") 233 ? TargetSubtargetInfo::ANTIDEP_ALL 234 : ((EnableAntiDepBreaking == "critical") 235 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 236 : TargetSubtargetInfo::ANTIDEP_NONE); 237 } 238 239 DEBUG(dbgs() << "PostRAScheduler\n"); 240 241 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 242 CriticalPathRCs); 243 244 // Loop over all of the basic blocks 245 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 246 MBB != MBBe; ++MBB) { 247 #ifndef NDEBUG 248 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 249 if (DebugDiv > 0) { 250 static int bbcnt = 0; 251 if (bbcnt++ % DebugDiv != DebugMod) 252 continue; 253 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getName() 254 << ":BB#" << MBB->getNumber() << " ***\n"; 255 } 256 #endif 257 258 // Initialize register live-range state for scheduling in this block. 259 Scheduler.StartBlock(MBB); 260 261 // Schedule each sequence of instructions not interrupted by a label 262 // or anything else that effectively needs to shut down scheduling. 263 MachineBasicBlock::iterator Current = MBB->end(); 264 unsigned Count = MBB->size(), CurrentCount = Count; 265 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 266 MachineInstr *MI = llvm::prior(I); 267 // Calls are not scheduling boundaries before register allocation, but 268 // post-ra we don't gain anything by scheduling across calls since we 269 // don't need to worry about register pressure. 270 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { 271 Scheduler.Run(MBB, I, Current, CurrentCount); 272 Scheduler.EmitSchedule(); 273 Current = MI; 274 CurrentCount = Count - 1; 275 Scheduler.Observe(MI, CurrentCount); 276 } 277 I = MI; 278 --Count; 279 if (MI->isBundle()) 280 Count -= MI->getBundleSize(); 281 } 282 assert(Count == 0 && "Instruction count mismatch!"); 283 assert((MBB->begin() == Current || CurrentCount != 0) && 284 "Instruction count mismatch!"); 285 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount); 286 Scheduler.EmitSchedule(); 287 288 // Clean up register live-range state. 289 Scheduler.FinishBlock(); 290 291 // Update register kills 292 Scheduler.FixupKills(MBB); 293 } 294 295 return true; 296 } 297 298 /// StartBlock - Initialize register live-range state for scheduling in 299 /// this block. 300 /// 301 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { 302 // Call the superclass. 303 ScheduleDAGInstrs::StartBlock(BB); 304 305 // Reset the hazard recognizer and anti-dep breaker. 306 HazardRec->Reset(); 307 if (AntiDepBreak != NULL) 308 AntiDepBreak->StartBlock(BB); 309 } 310 311 /// Schedule - Schedule the instruction range using list scheduling. 312 /// 313 void SchedulePostRATDList::Schedule() { 314 // Build the scheduling graph. 315 BuildSchedGraph(AA); 316 317 if (AntiDepBreak != NULL) { 318 unsigned Broken = 319 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos, 320 InsertPosIndex, DbgValues); 321 322 if (Broken != 0) { 323 // We made changes. Update the dependency graph. 324 // Theoretically we could update the graph in place: 325 // When a live range is changed to use a different register, remove 326 // the def's anti-dependence *and* output-dependence edges due to 327 // that register, and add new anti-dependence and output-dependence 328 // edges based on the next live range of the register. 329 SUnits.clear(); 330 Sequence.clear(); 331 EntrySU = SUnit(); 332 ExitSU = SUnit(); 333 BuildSchedGraph(AA); 334 335 NumFixedAnti += Broken; 336 } 337 } 338 339 DEBUG(dbgs() << "********** List Scheduling **********\n"); 340 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 341 SUnits[su].dumpAll(this)); 342 343 AvailableQueue.initNodes(SUnits); 344 ListScheduleTopDown(); 345 AvailableQueue.releaseState(); 346 } 347 348 /// Observe - Update liveness information to account for the current 349 /// instruction, which will not be scheduled. 350 /// 351 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 352 if (AntiDepBreak != NULL) 353 AntiDepBreak->Observe(MI, Count, InsertPosIndex); 354 } 355 356 /// FinishBlock - Clean up register live-range state. 357 /// 358 void SchedulePostRATDList::FinishBlock() { 359 if (AntiDepBreak != NULL) 360 AntiDepBreak->FinishBlock(); 361 362 // Call the superclass. 363 ScheduleDAGInstrs::FinishBlock(); 364 } 365 366 /// StartBlockForKills - Initialize register live-range state for updating kills 367 /// 368 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 369 // Start with no live registers. 370 LiveRegs.reset(); 371 372 // Determine the live-out physregs for this block. 373 if (!BB->empty() && BB->back().isReturn()) { 374 // In a return block, examine the function live-out regs. 375 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 376 E = MRI.liveout_end(); I != E; ++I) { 377 unsigned Reg = *I; 378 LiveRegs.set(Reg); 379 // Repeat, for all subregs. 380 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 381 *Subreg; ++Subreg) 382 LiveRegs.set(*Subreg); 383 } 384 } 385 else { 386 // In a non-return block, examine the live-in regs of all successors. 387 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 388 SE = BB->succ_end(); SI != SE; ++SI) { 389 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 390 E = (*SI)->livein_end(); I != E; ++I) { 391 unsigned Reg = *I; 392 LiveRegs.set(Reg); 393 // Repeat, for all subregs. 394 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 395 *Subreg; ++Subreg) 396 LiveRegs.set(*Subreg); 397 } 398 } 399 } 400 } 401 402 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 403 MachineOperand &MO) { 404 // Setting kill flag... 405 if (!MO.isKill()) { 406 MO.setIsKill(true); 407 return false; 408 } 409 410 // If MO itself is live, clear the kill flag... 411 if (LiveRegs.test(MO.getReg())) { 412 MO.setIsKill(false); 413 return false; 414 } 415 416 // If any subreg of MO is live, then create an imp-def for that 417 // subreg and keep MO marked as killed. 418 MO.setIsKill(false); 419 bool AllDead = true; 420 const unsigned SuperReg = MO.getReg(); 421 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg); 422 *Subreg; ++Subreg) { 423 if (LiveRegs.test(*Subreg)) { 424 MI->addOperand(MachineOperand::CreateReg(*Subreg, 425 true /*IsDef*/, 426 true /*IsImp*/, 427 false /*IsKill*/, 428 false /*IsDead*/)); 429 AllDead = false; 430 } 431 } 432 433 if(AllDead) 434 MO.setIsKill(true); 435 return false; 436 } 437 438 /// FixupKills - Fix the register kill flags, they may have been made 439 /// incorrect by instruction reordering. 440 /// 441 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 442 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 443 444 BitVector killedRegs(TRI->getNumRegs()); 445 BitVector ReservedRegs = TRI->getReservedRegs(MF); 446 447 StartBlockForKills(MBB); 448 449 // Examine block from end to start... 450 unsigned Count = MBB->size(); 451 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 452 I != E; --Count) { 453 MachineInstr *MI = --I; 454 if (MI->isDebugValue()) 455 continue; 456 457 // Update liveness. Registers that are defed but not used in this 458 // instruction are now dead. Mark register and all subregs as they 459 // are completely defined. 460 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 461 MachineOperand &MO = MI->getOperand(i); 462 if (MO.isRegMask()) 463 LiveRegs.clearBitsNotInMask(MO.getRegMask()); 464 if (!MO.isReg()) continue; 465 unsigned Reg = MO.getReg(); 466 if (Reg == 0) continue; 467 if (!MO.isDef()) continue; 468 // Ignore two-addr defs. 469 if (MI->isRegTiedToUseOperand(i)) continue; 470 471 LiveRegs.reset(Reg); 472 473 // Repeat for all subregs. 474 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 475 *Subreg; ++Subreg) 476 LiveRegs.reset(*Subreg); 477 } 478 479 // Examine all used registers and set/clear kill flag. When a 480 // register is used multiple times we only set the kill flag on 481 // the first use. 482 killedRegs.reset(); 483 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 484 MachineOperand &MO = MI->getOperand(i); 485 if (!MO.isReg() || !MO.isUse()) continue; 486 unsigned Reg = MO.getReg(); 487 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 488 489 bool kill = false; 490 if (!killedRegs.test(Reg)) { 491 kill = true; 492 // A register is not killed if any subregs are live... 493 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 494 *Subreg; ++Subreg) { 495 if (LiveRegs.test(*Subreg)) { 496 kill = false; 497 break; 498 } 499 } 500 501 // If subreg is not live, then register is killed if it became 502 // live in this instruction 503 if (kill) 504 kill = !LiveRegs.test(Reg); 505 } 506 507 if (MO.isKill() != kill) { 508 DEBUG(dbgs() << "Fixing " << MO << " in "); 509 // Warning: ToggleKillFlag may invalidate MO. 510 ToggleKillFlag(MI, MO); 511 DEBUG(MI->dump()); 512 } 513 514 killedRegs.set(Reg); 515 } 516 517 // Mark any used register (that is not using undef) and subregs as 518 // now live... 519 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 520 MachineOperand &MO = MI->getOperand(i); 521 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 522 unsigned Reg = MO.getReg(); 523 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 524 525 LiveRegs.set(Reg); 526 527 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 528 *Subreg; ++Subreg) 529 LiveRegs.set(*Subreg); 530 } 531 } 532 } 533 534 //===----------------------------------------------------------------------===// 535 // Top-Down Scheduling 536 //===----------------------------------------------------------------------===// 537 538 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 539 /// the PendingQueue if the count reaches zero. Also update its cycle bound. 540 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 541 SUnit *SuccSU = SuccEdge->getSUnit(); 542 543 #ifndef NDEBUG 544 if (SuccSU->NumPredsLeft == 0) { 545 dbgs() << "*** Scheduling failed! ***\n"; 546 SuccSU->dump(this); 547 dbgs() << " has been released too many times!\n"; 548 llvm_unreachable(0); 549 } 550 #endif 551 --SuccSU->NumPredsLeft; 552 553 // Standard scheduler algorithms will recompute the depth of the successor 554 // here as such: 555 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 556 // 557 // However, we lazily compute node depth instead. Note that 558 // ScheduleNodeTopDown has already updated the depth of this node which causes 559 // all descendents to be marked dirty. Setting the successor depth explicitly 560 // here would cause depth to be recomputed for all its ancestors. If the 561 // successor is not yet ready (because of a transitively redundant edge) then 562 // this causes depth computation to be quadratic in the size of the DAG. 563 564 // If all the node's predecessors are scheduled, this node is ready 565 // to be scheduled. Ignore the special ExitSU node. 566 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 567 PendingQueue.push_back(SuccSU); 568 } 569 570 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 571 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 572 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 573 I != E; ++I) { 574 ReleaseSucc(SU, &*I); 575 } 576 } 577 578 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 579 /// count of its successors. If a successor pending count is zero, add it to 580 /// the Available queue. 581 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 582 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 583 DEBUG(SU->dump(this)); 584 585 Sequence.push_back(SU); 586 assert(CurCycle >= SU->getDepth() && 587 "Node scheduled above its depth!"); 588 SU->setDepthToAtLeast(CurCycle); 589 590 ReleaseSuccessors(SU); 591 SU->isScheduled = true; 592 AvailableQueue.ScheduledNode(SU); 593 } 594 595 /// ListScheduleTopDown - The main loop of list scheduling for top-down 596 /// schedulers. 597 void SchedulePostRATDList::ListScheduleTopDown() { 598 unsigned CurCycle = 0; 599 600 // We're scheduling top-down but we're visiting the regions in 601 // bottom-up order, so we don't know the hazards at the start of a 602 // region. So assume no hazards (this should usually be ok as most 603 // blocks are a single region). 604 HazardRec->Reset(); 605 606 // Release any successors of the special Entry node. 607 ReleaseSuccessors(&EntrySU); 608 609 // Add all leaves to Available queue. 610 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 611 // It is available if it has no predecessors. 612 bool available = SUnits[i].Preds.empty(); 613 if (available) { 614 AvailableQueue.push(&SUnits[i]); 615 SUnits[i].isAvailable = true; 616 } 617 } 618 619 // In any cycle where we can't schedule any instructions, we must 620 // stall or emit a noop, depending on the target. 621 bool CycleHasInsts = false; 622 623 // While Available queue is not empty, grab the node with the highest 624 // priority. If it is not ready put it back. Schedule the node. 625 std::vector<SUnit*> NotReady; 626 Sequence.reserve(SUnits.size()); 627 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 628 // Check to see if any of the pending instructions are ready to issue. If 629 // so, add them to the available queue. 630 unsigned MinDepth = ~0u; 631 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 632 if (PendingQueue[i]->getDepth() <= CurCycle) { 633 AvailableQueue.push(PendingQueue[i]); 634 PendingQueue[i]->isAvailable = true; 635 PendingQueue[i] = PendingQueue.back(); 636 PendingQueue.pop_back(); 637 --i; --e; 638 } else if (PendingQueue[i]->getDepth() < MinDepth) 639 MinDepth = PendingQueue[i]->getDepth(); 640 } 641 642 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 643 644 SUnit *FoundSUnit = 0; 645 bool HasNoopHazards = false; 646 while (!AvailableQueue.empty()) { 647 SUnit *CurSUnit = AvailableQueue.pop(); 648 649 ScheduleHazardRecognizer::HazardType HT = 650 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 651 if (HT == ScheduleHazardRecognizer::NoHazard) { 652 FoundSUnit = CurSUnit; 653 break; 654 } 655 656 // Remember if this is a noop hazard. 657 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 658 659 NotReady.push_back(CurSUnit); 660 } 661 662 // Add the nodes that aren't ready back onto the available list. 663 if (!NotReady.empty()) { 664 AvailableQueue.push_all(NotReady); 665 NotReady.clear(); 666 } 667 668 // If we found a node to schedule... 669 if (FoundSUnit) { 670 // ... schedule the node... 671 ScheduleNodeTopDown(FoundSUnit, CurCycle); 672 HazardRec->EmitInstruction(FoundSUnit); 673 CycleHasInsts = true; 674 if (HazardRec->atIssueLimit()) { 675 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 676 HazardRec->AdvanceCycle(); 677 ++CurCycle; 678 CycleHasInsts = false; 679 } 680 } else { 681 if (CycleHasInsts) { 682 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 683 HazardRec->AdvanceCycle(); 684 } else if (!HasNoopHazards) { 685 // Otherwise, we have a pipeline stall, but no other problem, 686 // just advance the current cycle and try again. 687 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 688 HazardRec->AdvanceCycle(); 689 ++NumStalls; 690 } else { 691 // Otherwise, we have no instructions to issue and we have instructions 692 // that will fault if we don't do this right. This is the case for 693 // processors without pipeline interlocks and other cases. 694 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 695 HazardRec->EmitNoop(); 696 Sequence.push_back(0); // NULL here means noop 697 ++NumNoops; 698 } 699 700 ++CurCycle; 701 CycleHasInsts = false; 702 } 703 } 704 705 #ifndef NDEBUG 706 VerifySchedule(/*isBottomUp=*/false); 707 #endif 708 } 709