1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements a top-down list scheduler, using standard algorithms. 11 // The basic approach uses a priority queue of available nodes to schedule. 12 // One at a time, nodes are taken from the priority queue (thus in priority 13 // order), checked for legality to schedule, and emitted if legal. 14 // 15 // Nodes may not be legal to schedule either due to structural hazards (e.g. 16 // pipeline or resource constraints) or because an input to the instruction has 17 // not completed execution. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #define DEBUG_TYPE "post-RA-sched" 22 #include "AntiDepBreaker.h" 23 #include "AggressiveAntiDepBreaker.h" 24 #include "CriticalAntiDepBreaker.h" 25 #include "llvm/CodeGen/Passes.h" 26 #include "llvm/CodeGen/LatencyPriorityQueue.h" 27 #include "llvm/CodeGen/SchedulerRegistry.h" 28 #include "llvm/CodeGen/MachineDominators.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunctionPass.h" 31 #include "llvm/CodeGen/MachineLoopInfo.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/RegisterClassInfo.h" 34 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 35 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 36 #include "llvm/Analysis/AliasAnalysis.h" 37 #include "llvm/Target/TargetLowering.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/Target/TargetInstrInfo.h" 40 #include "llvm/Target/TargetRegisterInfo.h" 41 #include "llvm/Target/TargetSubtargetInfo.h" 42 #include "llvm/Support/CommandLine.h" 43 #include "llvm/Support/Debug.h" 44 #include "llvm/Support/ErrorHandling.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include "llvm/ADT/BitVector.h" 47 #include "llvm/ADT/Statistic.h" 48 using namespace llvm; 49 50 STATISTIC(NumNoops, "Number of noops inserted"); 51 STATISTIC(NumStalls, "Number of pipeline stalls"); 52 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 53 54 // Post-RA scheduling is enabled with 55 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 56 // override the target. 57 static cl::opt<bool> 58 EnablePostRAScheduler("post-RA-scheduler", 59 cl::desc("Enable scheduling after register allocation"), 60 cl::init(false), cl::Hidden); 61 static cl::opt<std::string> 62 EnableAntiDepBreaking("break-anti-dependencies", 63 cl::desc("Break post-RA scheduling anti-dependencies: " 64 "\"critical\", \"all\", or \"none\""), 65 cl::init("none"), cl::Hidden); 66 67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 68 static cl::opt<int> 69 DebugDiv("postra-sched-debugdiv", 70 cl::desc("Debug control MBBs that are scheduled"), 71 cl::init(0), cl::Hidden); 72 static cl::opt<int> 73 DebugMod("postra-sched-debugmod", 74 cl::desc("Debug control MBBs that are scheduled"), 75 cl::init(0), cl::Hidden); 76 77 AntiDepBreaker::~AntiDepBreaker() { } 78 79 namespace { 80 class PostRAScheduler : public MachineFunctionPass { 81 const TargetInstrInfo *TII; 82 RegisterClassInfo RegClassInfo; 83 84 public: 85 static char ID; 86 PostRAScheduler() : MachineFunctionPass(ID) {} 87 88 void getAnalysisUsage(AnalysisUsage &AU) const { 89 AU.setPreservesCFG(); 90 AU.addRequired<AliasAnalysis>(); 91 AU.addRequired<TargetPassConfig>(); 92 AU.addRequired<MachineDominatorTree>(); 93 AU.addPreserved<MachineDominatorTree>(); 94 AU.addRequired<MachineLoopInfo>(); 95 AU.addPreserved<MachineLoopInfo>(); 96 MachineFunctionPass::getAnalysisUsage(AU); 97 } 98 99 bool runOnMachineFunction(MachineFunction &Fn); 100 }; 101 char PostRAScheduler::ID = 0; 102 103 class SchedulePostRATDList : public ScheduleDAGInstrs { 104 /// AvailableQueue - The priority queue to use for the available SUnits. 105 /// 106 LatencyPriorityQueue AvailableQueue; 107 108 /// PendingQueue - This contains all of the instructions whose operands have 109 /// been issued, but their results are not ready yet (due to the latency of 110 /// the operation). Once the operands becomes available, the instruction is 111 /// added to the AvailableQueue. 112 std::vector<SUnit*> PendingQueue; 113 114 /// Topo - A topological ordering for SUnits. 115 ScheduleDAGTopologicalSort Topo; 116 117 /// HazardRec - The hazard recognizer to use. 118 ScheduleHazardRecognizer *HazardRec; 119 120 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 121 AntiDepBreaker *AntiDepBreak; 122 123 /// AA - AliasAnalysis for making memory reference queries. 124 AliasAnalysis *AA; 125 126 /// LiveRegs - true if the register is live. 127 BitVector LiveRegs; 128 129 /// The schedule. Null SUnit*'s represent noop instructions. 130 std::vector<SUnit*> Sequence; 131 132 public: 133 SchedulePostRATDList( 134 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 135 AliasAnalysis *AA, const RegisterClassInfo&, 136 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 137 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs); 138 139 ~SchedulePostRATDList(); 140 141 /// startBlock - Initialize register live-range state for scheduling in 142 /// this block. 143 /// 144 void startBlock(MachineBasicBlock *BB); 145 146 /// Initialize the scheduler state for the next scheduling region. 147 virtual void enterRegion(MachineBasicBlock *bb, 148 MachineBasicBlock::iterator begin, 149 MachineBasicBlock::iterator end, 150 unsigned endcount); 151 152 /// Notify that the scheduler has finished scheduling the current region. 153 virtual void exitRegion(); 154 155 /// Schedule - Schedule the instruction range using list scheduling. 156 /// 157 void schedule(); 158 159 void EmitSchedule(); 160 161 /// Observe - Update liveness information to account for the current 162 /// instruction, which will not be scheduled. 163 /// 164 void Observe(MachineInstr *MI, unsigned Count); 165 166 /// finishBlock - Clean up register live-range state. 167 /// 168 void finishBlock(); 169 170 /// FixupKills - Fix register kill flags that have been made 171 /// invalid due to scheduling 172 /// 173 void FixupKills(MachineBasicBlock *MBB); 174 175 private: 176 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 177 void ReleaseSuccessors(SUnit *SU); 178 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 179 void ListScheduleTopDown(); 180 void StartBlockForKills(MachineBasicBlock *BB); 181 182 // ToggleKillFlag - Toggle a register operand kill flag. Other 183 // adjustments may be made to the instruction if necessary. Return 184 // true if the operand has been deleted, false if not. 185 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 186 187 void dumpSchedule() const; 188 }; 189 } 190 191 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 192 193 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 194 "Post RA top-down list latency scheduler", false, false) 195 196 SchedulePostRATDList::SchedulePostRATDList( 197 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 198 AliasAnalysis *AA, const RegisterClassInfo &RCI, 199 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 200 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs) 201 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), Topo(SUnits), AA(AA), 202 LiveRegs(TRI->getNumRegs()) 203 { 204 const TargetMachine &TM = MF.getTarget(); 205 const InstrItineraryData *InstrItins = TM.getInstrItineraryData(); 206 HazardRec = 207 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this); 208 209 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE || 210 MRI.tracksLiveness()) && 211 "Live-ins must be accurate for anti-dependency breaking"); 212 AntiDepBreak = 213 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 214 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 215 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 216 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL)); 217 } 218 219 SchedulePostRATDList::~SchedulePostRATDList() { 220 delete HazardRec; 221 delete AntiDepBreak; 222 } 223 224 /// Initialize state associated with the next scheduling region. 225 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb, 226 MachineBasicBlock::iterator begin, 227 MachineBasicBlock::iterator end, 228 unsigned endcount) { 229 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 230 Sequence.clear(); 231 } 232 233 /// Print the schedule before exiting the region. 234 void SchedulePostRATDList::exitRegion() { 235 DEBUG({ 236 dbgs() << "*** Final schedule ***\n"; 237 dumpSchedule(); 238 dbgs() << '\n'; 239 }); 240 ScheduleDAGInstrs::exitRegion(); 241 } 242 243 /// dumpSchedule - dump the scheduled Sequence. 244 void SchedulePostRATDList::dumpSchedule() const { 245 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 246 if (SUnit *SU = Sequence[i]) 247 SU->dump(this); 248 else 249 dbgs() << "**** NOOP ****\n"; 250 } 251 } 252 253 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 254 TII = Fn.getTarget().getInstrInfo(); 255 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 256 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 257 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 258 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 259 260 RegClassInfo.runOnMachineFunction(Fn); 261 262 // Check for explicit enable/disable of post-ra scheduling. 263 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 264 TargetSubtargetInfo::ANTIDEP_NONE; 265 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; 266 if (EnablePostRAScheduler.getPosition() > 0) { 267 if (!EnablePostRAScheduler) 268 return false; 269 } else { 270 // Check that post-RA scheduling is enabled for this target. 271 // This may upgrade the AntiDepMode. 272 const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 273 if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode, 274 CriticalPathRCs)) 275 return false; 276 } 277 278 // Check for antidep breaking override... 279 if (EnableAntiDepBreaking.getPosition() > 0) { 280 AntiDepMode = (EnableAntiDepBreaking == "all") 281 ? TargetSubtargetInfo::ANTIDEP_ALL 282 : ((EnableAntiDepBreaking == "critical") 283 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 284 : TargetSubtargetInfo::ANTIDEP_NONE); 285 } 286 287 DEBUG(dbgs() << "PostRAScheduler\n"); 288 289 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 290 CriticalPathRCs); 291 292 // Loop over all of the basic blocks 293 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 294 MBB != MBBe; ++MBB) { 295 #ifndef NDEBUG 296 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 297 if (DebugDiv > 0) { 298 static int bbcnt = 0; 299 if (bbcnt++ % DebugDiv != DebugMod) 300 continue; 301 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getName() 302 << ":BB#" << MBB->getNumber() << " ***\n"; 303 } 304 #endif 305 306 // Initialize register live-range state for scheduling in this block. 307 Scheduler.startBlock(MBB); 308 309 // Schedule each sequence of instructions not interrupted by a label 310 // or anything else that effectively needs to shut down scheduling. 311 MachineBasicBlock::iterator Current = MBB->end(); 312 unsigned Count = MBB->size(), CurrentCount = Count; 313 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 314 MachineInstr *MI = llvm::prior(I); 315 // Calls are not scheduling boundaries before register allocation, but 316 // post-ra we don't gain anything by scheduling across calls since we 317 // don't need to worry about register pressure. 318 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { 319 Scheduler.enterRegion(MBB, I, Current, CurrentCount); 320 Scheduler.schedule(); 321 Scheduler.exitRegion(); 322 Scheduler.EmitSchedule(); 323 Current = MI; 324 CurrentCount = Count - 1; 325 Scheduler.Observe(MI, CurrentCount); 326 } 327 I = MI; 328 --Count; 329 if (MI->isBundle()) 330 Count -= MI->getBundleSize(); 331 } 332 assert(Count == 0 && "Instruction count mismatch!"); 333 assert((MBB->begin() == Current || CurrentCount != 0) && 334 "Instruction count mismatch!"); 335 Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount); 336 Scheduler.schedule(); 337 Scheduler.exitRegion(); 338 Scheduler.EmitSchedule(); 339 340 // Clean up register live-range state. 341 Scheduler.finishBlock(); 342 343 // Update register kills 344 Scheduler.FixupKills(MBB); 345 } 346 347 return true; 348 } 349 350 /// StartBlock - Initialize register live-range state for scheduling in 351 /// this block. 352 /// 353 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) { 354 // Call the superclass. 355 ScheduleDAGInstrs::startBlock(BB); 356 357 // Reset the hazard recognizer and anti-dep breaker. 358 HazardRec->Reset(); 359 if (AntiDepBreak != NULL) 360 AntiDepBreak->StartBlock(BB); 361 } 362 363 /// Schedule - Schedule the instruction range using list scheduling. 364 /// 365 void SchedulePostRATDList::schedule() { 366 // Build the scheduling graph. 367 buildSchedGraph(AA); 368 369 if (AntiDepBreak != NULL) { 370 unsigned Broken = 371 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, 372 EndIndex, DbgValues); 373 374 if (Broken != 0) { 375 // We made changes. Update the dependency graph. 376 // Theoretically we could update the graph in place: 377 // When a live range is changed to use a different register, remove 378 // the def's anti-dependence *and* output-dependence edges due to 379 // that register, and add new anti-dependence and output-dependence 380 // edges based on the next live range of the register. 381 ScheduleDAG::clearDAG(); 382 buildSchedGraph(AA); 383 384 NumFixedAnti += Broken; 385 } 386 } 387 388 DEBUG(dbgs() << "********** List Scheduling **********\n"); 389 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 390 SUnits[su].dumpAll(this)); 391 392 AvailableQueue.initNodes(SUnits); 393 ListScheduleTopDown(); 394 AvailableQueue.releaseState(); 395 } 396 397 /// Observe - Update liveness information to account for the current 398 /// instruction, which will not be scheduled. 399 /// 400 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 401 if (AntiDepBreak != NULL) 402 AntiDepBreak->Observe(MI, Count, EndIndex); 403 } 404 405 /// FinishBlock - Clean up register live-range state. 406 /// 407 void SchedulePostRATDList::finishBlock() { 408 if (AntiDepBreak != NULL) 409 AntiDepBreak->FinishBlock(); 410 411 // Call the superclass. 412 ScheduleDAGInstrs::finishBlock(); 413 } 414 415 /// StartBlockForKills - Initialize register live-range state for updating kills 416 /// 417 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 418 // Start with no live registers. 419 LiveRegs.reset(); 420 421 // Determine the live-out physregs for this block. 422 if (!BB->empty() && BB->back().isReturn()) { 423 // In a return block, examine the function live-out regs. 424 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 425 E = MRI.liveout_end(); I != E; ++I) { 426 unsigned Reg = *I; 427 LiveRegs.set(Reg); 428 // Repeat, for all subregs. 429 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) 430 LiveRegs.set(*SubRegs); 431 } 432 } 433 else { 434 // In a non-return block, examine the live-in regs of all successors. 435 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 436 SE = BB->succ_end(); SI != SE; ++SI) { 437 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 438 E = (*SI)->livein_end(); I != E; ++I) { 439 unsigned Reg = *I; 440 LiveRegs.set(Reg); 441 // Repeat, for all subregs. 442 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) 443 LiveRegs.set(*SubRegs); 444 } 445 } 446 } 447 } 448 449 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 450 MachineOperand &MO) { 451 // Setting kill flag... 452 if (!MO.isKill()) { 453 MO.setIsKill(true); 454 return false; 455 } 456 457 // If MO itself is live, clear the kill flag... 458 if (LiveRegs.test(MO.getReg())) { 459 MO.setIsKill(false); 460 return false; 461 } 462 463 // If any subreg of MO is live, then create an imp-def for that 464 // subreg and keep MO marked as killed. 465 MO.setIsKill(false); 466 bool AllDead = true; 467 const unsigned SuperReg = MO.getReg(); 468 for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) { 469 if (LiveRegs.test(*SubRegs)) { 470 MI->addOperand(MachineOperand::CreateReg(*SubRegs, 471 true /*IsDef*/, 472 true /*IsImp*/, 473 false /*IsKill*/, 474 false /*IsDead*/)); 475 AllDead = false; 476 } 477 } 478 479 if(AllDead) 480 MO.setIsKill(true); 481 return false; 482 } 483 484 /// FixupKills - Fix the register kill flags, they may have been made 485 /// incorrect by instruction reordering. 486 /// 487 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 488 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 489 490 BitVector killedRegs(TRI->getNumRegs()); 491 BitVector ReservedRegs = TRI->getReservedRegs(MF); 492 493 StartBlockForKills(MBB); 494 495 // Examine block from end to start... 496 unsigned Count = MBB->size(); 497 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 498 I != E; --Count) { 499 MachineInstr *MI = --I; 500 if (MI->isDebugValue()) 501 continue; 502 503 // Update liveness. Registers that are defed but not used in this 504 // instruction are now dead. Mark register and all subregs as they 505 // are completely defined. 506 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 507 MachineOperand &MO = MI->getOperand(i); 508 if (MO.isRegMask()) 509 LiveRegs.clearBitsNotInMask(MO.getRegMask()); 510 if (!MO.isReg()) continue; 511 unsigned Reg = MO.getReg(); 512 if (Reg == 0) continue; 513 if (!MO.isDef()) continue; 514 // Ignore two-addr defs. 515 if (MI->isRegTiedToUseOperand(i)) continue; 516 517 LiveRegs.reset(Reg); 518 519 // Repeat for all subregs. 520 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) 521 LiveRegs.reset(*SubRegs); 522 } 523 524 // Examine all used registers and set/clear kill flag. When a 525 // register is used multiple times we only set the kill flag on 526 // the first use. 527 killedRegs.reset(); 528 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 529 MachineOperand &MO = MI->getOperand(i); 530 if (!MO.isReg() || !MO.isUse()) continue; 531 unsigned Reg = MO.getReg(); 532 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 533 534 bool kill = false; 535 if (!killedRegs.test(Reg)) { 536 kill = true; 537 // A register is not killed if any subregs are live... 538 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) { 539 if (LiveRegs.test(*SubRegs)) { 540 kill = false; 541 break; 542 } 543 } 544 545 // If subreg is not live, then register is killed if it became 546 // live in this instruction 547 if (kill) 548 kill = !LiveRegs.test(Reg); 549 } 550 551 if (MO.isKill() != kill) { 552 DEBUG(dbgs() << "Fixing " << MO << " in "); 553 // Warning: ToggleKillFlag may invalidate MO. 554 ToggleKillFlag(MI, MO); 555 DEBUG(MI->dump()); 556 } 557 558 killedRegs.set(Reg); 559 } 560 561 // Mark any used register (that is not using undef) and subregs as 562 // now live... 563 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 564 MachineOperand &MO = MI->getOperand(i); 565 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 566 unsigned Reg = MO.getReg(); 567 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 568 569 LiveRegs.set(Reg); 570 571 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) 572 LiveRegs.set(*SubRegs); 573 } 574 } 575 } 576 577 //===----------------------------------------------------------------------===// 578 // Top-Down Scheduling 579 //===----------------------------------------------------------------------===// 580 581 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 582 /// the PendingQueue if the count reaches zero. Also update its cycle bound. 583 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 584 SUnit *SuccSU = SuccEdge->getSUnit(); 585 586 #ifndef NDEBUG 587 if (SuccSU->NumPredsLeft == 0) { 588 dbgs() << "*** Scheduling failed! ***\n"; 589 SuccSU->dump(this); 590 dbgs() << " has been released too many times!\n"; 591 llvm_unreachable(0); 592 } 593 #endif 594 --SuccSU->NumPredsLeft; 595 596 // Standard scheduler algorithms will recompute the depth of the successor 597 // here as such: 598 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 599 // 600 // However, we lazily compute node depth instead. Note that 601 // ScheduleNodeTopDown has already updated the depth of this node which causes 602 // all descendents to be marked dirty. Setting the successor depth explicitly 603 // here would cause depth to be recomputed for all its ancestors. If the 604 // successor is not yet ready (because of a transitively redundant edge) then 605 // this causes depth computation to be quadratic in the size of the DAG. 606 607 // If all the node's predecessors are scheduled, this node is ready 608 // to be scheduled. Ignore the special ExitSU node. 609 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 610 PendingQueue.push_back(SuccSU); 611 } 612 613 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 614 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 615 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 616 I != E; ++I) { 617 ReleaseSucc(SU, &*I); 618 } 619 } 620 621 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 622 /// count of its successors. If a successor pending count is zero, add it to 623 /// the Available queue. 624 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 625 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 626 DEBUG(SU->dump(this)); 627 628 Sequence.push_back(SU); 629 assert(CurCycle >= SU->getDepth() && 630 "Node scheduled above its depth!"); 631 SU->setDepthToAtLeast(CurCycle); 632 633 ReleaseSuccessors(SU); 634 SU->isScheduled = true; 635 AvailableQueue.scheduledNode(SU); 636 } 637 638 /// ListScheduleTopDown - The main loop of list scheduling for top-down 639 /// schedulers. 640 void SchedulePostRATDList::ListScheduleTopDown() { 641 unsigned CurCycle = 0; 642 643 // We're scheduling top-down but we're visiting the regions in 644 // bottom-up order, so we don't know the hazards at the start of a 645 // region. So assume no hazards (this should usually be ok as most 646 // blocks are a single region). 647 HazardRec->Reset(); 648 649 // Release any successors of the special Entry node. 650 ReleaseSuccessors(&EntrySU); 651 652 // Add all leaves to Available queue. 653 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 654 // It is available if it has no predecessors. 655 bool available = SUnits[i].Preds.empty(); 656 if (available) { 657 AvailableQueue.push(&SUnits[i]); 658 SUnits[i].isAvailable = true; 659 } 660 } 661 662 // In any cycle where we can't schedule any instructions, we must 663 // stall or emit a noop, depending on the target. 664 bool CycleHasInsts = false; 665 666 // While Available queue is not empty, grab the node with the highest 667 // priority. If it is not ready put it back. Schedule the node. 668 std::vector<SUnit*> NotReady; 669 Sequence.reserve(SUnits.size()); 670 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 671 // Check to see if any of the pending instructions are ready to issue. If 672 // so, add them to the available queue. 673 unsigned MinDepth = ~0u; 674 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 675 if (PendingQueue[i]->getDepth() <= CurCycle) { 676 AvailableQueue.push(PendingQueue[i]); 677 PendingQueue[i]->isAvailable = true; 678 PendingQueue[i] = PendingQueue.back(); 679 PendingQueue.pop_back(); 680 --i; --e; 681 } else if (PendingQueue[i]->getDepth() < MinDepth) 682 MinDepth = PendingQueue[i]->getDepth(); 683 } 684 685 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 686 687 SUnit *FoundSUnit = 0; 688 bool HasNoopHazards = false; 689 while (!AvailableQueue.empty()) { 690 SUnit *CurSUnit = AvailableQueue.pop(); 691 692 ScheduleHazardRecognizer::HazardType HT = 693 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 694 if (HT == ScheduleHazardRecognizer::NoHazard) { 695 FoundSUnit = CurSUnit; 696 break; 697 } 698 699 // Remember if this is a noop hazard. 700 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 701 702 NotReady.push_back(CurSUnit); 703 } 704 705 // Add the nodes that aren't ready back onto the available list. 706 if (!NotReady.empty()) { 707 AvailableQueue.push_all(NotReady); 708 NotReady.clear(); 709 } 710 711 // If we found a node to schedule... 712 if (FoundSUnit) { 713 // ... schedule the node... 714 ScheduleNodeTopDown(FoundSUnit, CurCycle); 715 HazardRec->EmitInstruction(FoundSUnit); 716 CycleHasInsts = true; 717 if (HazardRec->atIssueLimit()) { 718 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 719 HazardRec->AdvanceCycle(); 720 ++CurCycle; 721 CycleHasInsts = false; 722 } 723 } else { 724 if (CycleHasInsts) { 725 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 726 HazardRec->AdvanceCycle(); 727 } else if (!HasNoopHazards) { 728 // Otherwise, we have a pipeline stall, but no other problem, 729 // just advance the current cycle and try again. 730 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 731 HazardRec->AdvanceCycle(); 732 ++NumStalls; 733 } else { 734 // Otherwise, we have no instructions to issue and we have instructions 735 // that will fault if we don't do this right. This is the case for 736 // processors without pipeline interlocks and other cases. 737 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 738 HazardRec->EmitNoop(); 739 Sequence.push_back(0); // NULL here means noop 740 ++NumNoops; 741 } 742 743 ++CurCycle; 744 CycleHasInsts = false; 745 } 746 } 747 748 #ifndef NDEBUG 749 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false); 750 unsigned Noops = 0; 751 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 752 if (!Sequence[i]) 753 ++Noops; 754 assert(Sequence.size() - Noops == ScheduledNodes && 755 "The number of nodes scheduled doesn't match the expected number!"); 756 #endif // NDEBUG 757 } 758 759 // EmitSchedule - Emit the machine code in scheduled order. 760 void SchedulePostRATDList::EmitSchedule() { 761 RegionBegin = RegionEnd; 762 763 // If first instruction was a DBG_VALUE then put it back. 764 if (FirstDbgValue) 765 BB->splice(RegionEnd, BB, FirstDbgValue); 766 767 // Then re-insert them according to the given schedule. 768 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 769 if (SUnit *SU = Sequence[i]) 770 BB->splice(RegionEnd, BB, SU->getInstr()); 771 else 772 // Null SUnit* is a noop. 773 TII->insertNoop(*BB, RegionEnd); 774 775 // Update the Begin iterator, as the first instruction in the block 776 // may have been scheduled later. 777 if (i == 0) 778 RegionBegin = prior(RegionEnd); 779 } 780 781 // Reinsert any remaining debug_values. 782 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 783 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 784 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 785 MachineInstr *DbgValue = P.first; 786 MachineBasicBlock::iterator OrigPrivMI = P.second; 787 BB->splice(++OrigPrivMI, BB, DbgValue); 788 } 789 DbgValues.clear(); 790 FirstDbgValue = NULL; 791 } 792