1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements a top-down list scheduler, using standard algorithms. 11 // The basic approach uses a priority queue of available nodes to schedule. 12 // One at a time, nodes are taken from the priority queue (thus in priority 13 // order), checked for legality to schedule, and emitted if legal. 14 // 15 // Nodes may not be legal to schedule either due to structural hazards (e.g. 16 // pipeline or resource constraints) or because an input to the instruction has 17 // not completed execution. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #define DEBUG_TYPE "post-RA-sched" 22 #include "AntiDepBreaker.h" 23 #include "AggressiveAntiDepBreaker.h" 24 #include "CriticalAntiDepBreaker.h" 25 #include "RegisterClassInfo.h" 26 #include "llvm/CodeGen/Passes.h" 27 #include "llvm/CodeGen/LatencyPriorityQueue.h" 28 #include "llvm/CodeGen/SchedulerRegistry.h" 29 #include "llvm/CodeGen/MachineDominators.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunctionPass.h" 32 #include "llvm/CodeGen/MachineLoopInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 35 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 36 #include "llvm/Analysis/AliasAnalysis.h" 37 #include "llvm/Target/TargetLowering.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/Target/TargetInstrInfo.h" 40 #include "llvm/Target/TargetRegisterInfo.h" 41 #include "llvm/Target/TargetSubtargetInfo.h" 42 #include "llvm/Support/CommandLine.h" 43 #include "llvm/Support/Debug.h" 44 #include "llvm/Support/ErrorHandling.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include "llvm/ADT/BitVector.h" 47 #include "llvm/ADT/Statistic.h" 48 using namespace llvm; 49 50 STATISTIC(NumNoops, "Number of noops inserted"); 51 STATISTIC(NumStalls, "Number of pipeline stalls"); 52 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 53 54 // Post-RA scheduling is enabled with 55 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 56 // override the target. 57 static cl::opt<bool> 58 EnablePostRAScheduler("post-RA-scheduler", 59 cl::desc("Enable scheduling after register allocation"), 60 cl::init(false), cl::Hidden); 61 static cl::opt<std::string> 62 EnableAntiDepBreaking("break-anti-dependencies", 63 cl::desc("Break post-RA scheduling anti-dependencies: " 64 "\"critical\", \"all\", or \"none\""), 65 cl::init("none"), cl::Hidden); 66 67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 68 static cl::opt<int> 69 DebugDiv("postra-sched-debugdiv", 70 cl::desc("Debug control MBBs that are scheduled"), 71 cl::init(0), cl::Hidden); 72 static cl::opt<int> 73 DebugMod("postra-sched-debugmod", 74 cl::desc("Debug control MBBs that are scheduled"), 75 cl::init(0), cl::Hidden); 76 77 AntiDepBreaker::~AntiDepBreaker() { } 78 79 namespace { 80 class PostRAScheduler : public MachineFunctionPass { 81 AliasAnalysis *AA; 82 const TargetInstrInfo *TII; 83 RegisterClassInfo RegClassInfo; 84 85 public: 86 static char ID; 87 PostRAScheduler() : MachineFunctionPass(ID) {} 88 89 void getAnalysisUsage(AnalysisUsage &AU) const { 90 AU.setPreservesCFG(); 91 AU.addRequired<AliasAnalysis>(); 92 AU.addRequired<TargetPassConfig>(); 93 AU.addRequired<MachineDominatorTree>(); 94 AU.addPreserved<MachineDominatorTree>(); 95 AU.addRequired<MachineLoopInfo>(); 96 AU.addPreserved<MachineLoopInfo>(); 97 MachineFunctionPass::getAnalysisUsage(AU); 98 } 99 100 bool runOnMachineFunction(MachineFunction &Fn); 101 }; 102 char PostRAScheduler::ID = 0; 103 104 class SchedulePostRATDList : public ScheduleDAGInstrs { 105 /// AvailableQueue - The priority queue to use for the available SUnits. 106 /// 107 LatencyPriorityQueue AvailableQueue; 108 109 /// PendingQueue - This contains all of the instructions whose operands have 110 /// been issued, but their results are not ready yet (due to the latency of 111 /// the operation). Once the operands becomes available, the instruction is 112 /// added to the AvailableQueue. 113 std::vector<SUnit*> PendingQueue; 114 115 /// Topo - A topological ordering for SUnits. 116 ScheduleDAGTopologicalSort Topo; 117 118 /// HazardRec - The hazard recognizer to use. 119 ScheduleHazardRecognizer *HazardRec; 120 121 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 122 AntiDepBreaker *AntiDepBreak; 123 124 /// AA - AliasAnalysis for making memory reference queries. 125 AliasAnalysis *AA; 126 127 /// LiveRegs - true if the register is live. 128 BitVector LiveRegs; 129 130 /// The schedule. Null SUnit*'s represent noop instructions. 131 std::vector<SUnit*> Sequence; 132 133 public: 134 SchedulePostRATDList( 135 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 136 AliasAnalysis *AA, const RegisterClassInfo&, 137 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 138 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs); 139 140 ~SchedulePostRATDList(); 141 142 /// startBlock - Initialize register live-range state for scheduling in 143 /// this block. 144 /// 145 void startBlock(MachineBasicBlock *BB); 146 147 /// Initialize the scheduler state for the next scheduling region. 148 virtual void enterRegion(MachineBasicBlock *bb, 149 MachineBasicBlock::iterator begin, 150 MachineBasicBlock::iterator end, 151 unsigned endcount); 152 153 /// Notify that the scheduler has finished scheduling the current region. 154 virtual void exitRegion(); 155 156 /// Schedule - Schedule the instruction range using list scheduling. 157 /// 158 void schedule(); 159 160 void EmitSchedule(); 161 162 /// Observe - Update liveness information to account for the current 163 /// instruction, which will not be scheduled. 164 /// 165 void Observe(MachineInstr *MI, unsigned Count); 166 167 /// finishBlock - Clean up register live-range state. 168 /// 169 void finishBlock(); 170 171 /// FixupKills - Fix register kill flags that have been made 172 /// invalid due to scheduling 173 /// 174 void FixupKills(MachineBasicBlock *MBB); 175 176 private: 177 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 178 void ReleaseSuccessors(SUnit *SU); 179 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 180 void ListScheduleTopDown(); 181 void StartBlockForKills(MachineBasicBlock *BB); 182 183 // ToggleKillFlag - Toggle a register operand kill flag. Other 184 // adjustments may be made to the instruction if necessary. Return 185 // true if the operand has been deleted, false if not. 186 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 187 188 void dumpSchedule() const; 189 }; 190 } 191 192 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 193 194 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 195 "Post RA top-down list latency scheduler", false, false) 196 197 SchedulePostRATDList::SchedulePostRATDList( 198 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 199 AliasAnalysis *AA, const RegisterClassInfo &RCI, 200 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 201 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs) 202 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), Topo(SUnits), AA(AA), 203 LiveRegs(TRI->getNumRegs()) 204 { 205 const TargetMachine &TM = MF.getTarget(); 206 const InstrItineraryData *InstrItins = TM.getInstrItineraryData(); 207 HazardRec = 208 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this); 209 210 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE || 211 MRI.tracksLiveness()) && 212 "Live-ins must be accurate for anti-dependency breaking"); 213 AntiDepBreak = 214 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 215 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 216 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 217 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL)); 218 } 219 220 SchedulePostRATDList::~SchedulePostRATDList() { 221 delete HazardRec; 222 delete AntiDepBreak; 223 } 224 225 /// Initialize state associated with the next scheduling region. 226 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb, 227 MachineBasicBlock::iterator begin, 228 MachineBasicBlock::iterator end, 229 unsigned endcount) { 230 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 231 Sequence.clear(); 232 } 233 234 /// Print the schedule before exiting the region. 235 void SchedulePostRATDList::exitRegion() { 236 DEBUG({ 237 dbgs() << "*** Final schedule ***\n"; 238 dumpSchedule(); 239 dbgs() << '\n'; 240 }); 241 ScheduleDAGInstrs::exitRegion(); 242 } 243 244 /// dumpSchedule - dump the scheduled Sequence. 245 void SchedulePostRATDList::dumpSchedule() const { 246 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 247 if (SUnit *SU = Sequence[i]) 248 SU->dump(this); 249 else 250 dbgs() << "**** NOOP ****\n"; 251 } 252 } 253 254 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 255 TII = Fn.getTarget().getInstrInfo(); 256 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 257 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 258 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 259 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 260 261 RegClassInfo.runOnMachineFunction(Fn); 262 263 // Check for explicit enable/disable of post-ra scheduling. 264 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 265 TargetSubtargetInfo::ANTIDEP_NONE; 266 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; 267 if (EnablePostRAScheduler.getPosition() > 0) { 268 if (!EnablePostRAScheduler) 269 return false; 270 } else { 271 // Check that post-RA scheduling is enabled for this target. 272 // This may upgrade the AntiDepMode. 273 const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 274 if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode, 275 CriticalPathRCs)) 276 return false; 277 } 278 279 // Check for antidep breaking override... 280 if (EnableAntiDepBreaking.getPosition() > 0) { 281 AntiDepMode = (EnableAntiDepBreaking == "all") 282 ? TargetSubtargetInfo::ANTIDEP_ALL 283 : ((EnableAntiDepBreaking == "critical") 284 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 285 : TargetSubtargetInfo::ANTIDEP_NONE); 286 } 287 288 DEBUG(dbgs() << "PostRAScheduler\n"); 289 290 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 291 CriticalPathRCs); 292 293 // Loop over all of the basic blocks 294 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 295 MBB != MBBe; ++MBB) { 296 #ifndef NDEBUG 297 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 298 if (DebugDiv > 0) { 299 static int bbcnt = 0; 300 if (bbcnt++ % DebugDiv != DebugMod) 301 continue; 302 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getName() 303 << ":BB#" << MBB->getNumber() << " ***\n"; 304 } 305 #endif 306 307 // Initialize register live-range state for scheduling in this block. 308 Scheduler.startBlock(MBB); 309 310 // Schedule each sequence of instructions not interrupted by a label 311 // or anything else that effectively needs to shut down scheduling. 312 MachineBasicBlock::iterator Current = MBB->end(); 313 unsigned Count = MBB->size(), CurrentCount = Count; 314 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 315 MachineInstr *MI = llvm::prior(I); 316 // Calls are not scheduling boundaries before register allocation, but 317 // post-ra we don't gain anything by scheduling across calls since we 318 // don't need to worry about register pressure. 319 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { 320 Scheduler.enterRegion(MBB, I, Current, CurrentCount); 321 Scheduler.schedule(); 322 Scheduler.exitRegion(); 323 Scheduler.EmitSchedule(); 324 Current = MI; 325 CurrentCount = Count - 1; 326 Scheduler.Observe(MI, CurrentCount); 327 } 328 I = MI; 329 --Count; 330 if (MI->isBundle()) 331 Count -= MI->getBundleSize(); 332 } 333 assert(Count == 0 && "Instruction count mismatch!"); 334 assert((MBB->begin() == Current || CurrentCount != 0) && 335 "Instruction count mismatch!"); 336 Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount); 337 Scheduler.schedule(); 338 Scheduler.exitRegion(); 339 Scheduler.EmitSchedule(); 340 341 // Clean up register live-range state. 342 Scheduler.finishBlock(); 343 344 // Update register kills 345 Scheduler.FixupKills(MBB); 346 } 347 348 return true; 349 } 350 351 /// StartBlock - Initialize register live-range state for scheduling in 352 /// this block. 353 /// 354 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) { 355 // Call the superclass. 356 ScheduleDAGInstrs::startBlock(BB); 357 358 // Reset the hazard recognizer and anti-dep breaker. 359 HazardRec->Reset(); 360 if (AntiDepBreak != NULL) 361 AntiDepBreak->StartBlock(BB); 362 } 363 364 /// Schedule - Schedule the instruction range using list scheduling. 365 /// 366 void SchedulePostRATDList::schedule() { 367 // Build the scheduling graph. 368 buildSchedGraph(AA); 369 370 if (AntiDepBreak != NULL) { 371 unsigned Broken = 372 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, 373 EndIndex, DbgValues); 374 375 if (Broken != 0) { 376 // We made changes. Update the dependency graph. 377 // Theoretically we could update the graph in place: 378 // When a live range is changed to use a different register, remove 379 // the def's anti-dependence *and* output-dependence edges due to 380 // that register, and add new anti-dependence and output-dependence 381 // edges based on the next live range of the register. 382 ScheduleDAG::clearDAG(); 383 buildSchedGraph(AA); 384 385 NumFixedAnti += Broken; 386 } 387 } 388 389 DEBUG(dbgs() << "********** List Scheduling **********\n"); 390 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 391 SUnits[su].dumpAll(this)); 392 393 AvailableQueue.initNodes(SUnits); 394 ListScheduleTopDown(); 395 AvailableQueue.releaseState(); 396 } 397 398 /// Observe - Update liveness information to account for the current 399 /// instruction, which will not be scheduled. 400 /// 401 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 402 if (AntiDepBreak != NULL) 403 AntiDepBreak->Observe(MI, Count, EndIndex); 404 } 405 406 /// FinishBlock - Clean up register live-range state. 407 /// 408 void SchedulePostRATDList::finishBlock() { 409 if (AntiDepBreak != NULL) 410 AntiDepBreak->FinishBlock(); 411 412 // Call the superclass. 413 ScheduleDAGInstrs::finishBlock(); 414 } 415 416 /// StartBlockForKills - Initialize register live-range state for updating kills 417 /// 418 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 419 // Start with no live registers. 420 LiveRegs.reset(); 421 422 // Determine the live-out physregs for this block. 423 if (!BB->empty() && BB->back().isReturn()) { 424 // In a return block, examine the function live-out regs. 425 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 426 E = MRI.liveout_end(); I != E; ++I) { 427 unsigned Reg = *I; 428 LiveRegs.set(Reg); 429 // Repeat, for all subregs. 430 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg); 431 *Subreg; ++Subreg) 432 LiveRegs.set(*Subreg); 433 } 434 } 435 else { 436 // In a non-return block, examine the live-in regs of all successors. 437 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 438 SE = BB->succ_end(); SI != SE; ++SI) { 439 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 440 E = (*SI)->livein_end(); I != E; ++I) { 441 unsigned Reg = *I; 442 LiveRegs.set(Reg); 443 // Repeat, for all subregs. 444 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg); 445 *Subreg; ++Subreg) 446 LiveRegs.set(*Subreg); 447 } 448 } 449 } 450 } 451 452 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 453 MachineOperand &MO) { 454 // Setting kill flag... 455 if (!MO.isKill()) { 456 MO.setIsKill(true); 457 return false; 458 } 459 460 // If MO itself is live, clear the kill flag... 461 if (LiveRegs.test(MO.getReg())) { 462 MO.setIsKill(false); 463 return false; 464 } 465 466 // If any subreg of MO is live, then create an imp-def for that 467 // subreg and keep MO marked as killed. 468 MO.setIsKill(false); 469 bool AllDead = true; 470 const unsigned SuperReg = MO.getReg(); 471 for (const uint16_t *Subreg = TRI->getSubRegisters(SuperReg); 472 *Subreg; ++Subreg) { 473 if (LiveRegs.test(*Subreg)) { 474 MI->addOperand(MachineOperand::CreateReg(*Subreg, 475 true /*IsDef*/, 476 true /*IsImp*/, 477 false /*IsKill*/, 478 false /*IsDead*/)); 479 AllDead = false; 480 } 481 } 482 483 if(AllDead) 484 MO.setIsKill(true); 485 return false; 486 } 487 488 /// FixupKills - Fix the register kill flags, they may have been made 489 /// incorrect by instruction reordering. 490 /// 491 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 492 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 493 494 BitVector killedRegs(TRI->getNumRegs()); 495 BitVector ReservedRegs = TRI->getReservedRegs(MF); 496 497 StartBlockForKills(MBB); 498 499 // Examine block from end to start... 500 unsigned Count = MBB->size(); 501 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 502 I != E; --Count) { 503 MachineInstr *MI = --I; 504 if (MI->isDebugValue()) 505 continue; 506 507 // Update liveness. Registers that are defed but not used in this 508 // instruction are now dead. Mark register and all subregs as they 509 // are completely defined. 510 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 511 MachineOperand &MO = MI->getOperand(i); 512 if (MO.isRegMask()) 513 LiveRegs.clearBitsNotInMask(MO.getRegMask()); 514 if (!MO.isReg()) continue; 515 unsigned Reg = MO.getReg(); 516 if (Reg == 0) continue; 517 if (!MO.isDef()) continue; 518 // Ignore two-addr defs. 519 if (MI->isRegTiedToUseOperand(i)) continue; 520 521 LiveRegs.reset(Reg); 522 523 // Repeat for all subregs. 524 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg); 525 *Subreg; ++Subreg) 526 LiveRegs.reset(*Subreg); 527 } 528 529 // Examine all used registers and set/clear kill flag. When a 530 // register is used multiple times we only set the kill flag on 531 // the first use. 532 killedRegs.reset(); 533 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 534 MachineOperand &MO = MI->getOperand(i); 535 if (!MO.isReg() || !MO.isUse()) continue; 536 unsigned Reg = MO.getReg(); 537 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 538 539 bool kill = false; 540 if (!killedRegs.test(Reg)) { 541 kill = true; 542 // A register is not killed if any subregs are live... 543 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg); 544 *Subreg; ++Subreg) { 545 if (LiveRegs.test(*Subreg)) { 546 kill = false; 547 break; 548 } 549 } 550 551 // If subreg is not live, then register is killed if it became 552 // live in this instruction 553 if (kill) 554 kill = !LiveRegs.test(Reg); 555 } 556 557 if (MO.isKill() != kill) { 558 DEBUG(dbgs() << "Fixing " << MO << " in "); 559 // Warning: ToggleKillFlag may invalidate MO. 560 ToggleKillFlag(MI, MO); 561 DEBUG(MI->dump()); 562 } 563 564 killedRegs.set(Reg); 565 } 566 567 // Mark any used register (that is not using undef) and subregs as 568 // now live... 569 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 570 MachineOperand &MO = MI->getOperand(i); 571 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 572 unsigned Reg = MO.getReg(); 573 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 574 575 LiveRegs.set(Reg); 576 577 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg); 578 *Subreg; ++Subreg) 579 LiveRegs.set(*Subreg); 580 } 581 } 582 } 583 584 //===----------------------------------------------------------------------===// 585 // Top-Down Scheduling 586 //===----------------------------------------------------------------------===// 587 588 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 589 /// the PendingQueue if the count reaches zero. Also update its cycle bound. 590 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 591 SUnit *SuccSU = SuccEdge->getSUnit(); 592 593 #ifndef NDEBUG 594 if (SuccSU->NumPredsLeft == 0) { 595 dbgs() << "*** Scheduling failed! ***\n"; 596 SuccSU->dump(this); 597 dbgs() << " has been released too many times!\n"; 598 llvm_unreachable(0); 599 } 600 #endif 601 --SuccSU->NumPredsLeft; 602 603 // Standard scheduler algorithms will recompute the depth of the successor 604 // here as such: 605 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 606 // 607 // However, we lazily compute node depth instead. Note that 608 // ScheduleNodeTopDown has already updated the depth of this node which causes 609 // all descendents to be marked dirty. Setting the successor depth explicitly 610 // here would cause depth to be recomputed for all its ancestors. If the 611 // successor is not yet ready (because of a transitively redundant edge) then 612 // this causes depth computation to be quadratic in the size of the DAG. 613 614 // If all the node's predecessors are scheduled, this node is ready 615 // to be scheduled. Ignore the special ExitSU node. 616 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 617 PendingQueue.push_back(SuccSU); 618 } 619 620 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 621 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 622 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 623 I != E; ++I) { 624 ReleaseSucc(SU, &*I); 625 } 626 } 627 628 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 629 /// count of its successors. If a successor pending count is zero, add it to 630 /// the Available queue. 631 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 632 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 633 DEBUG(SU->dump(this)); 634 635 Sequence.push_back(SU); 636 assert(CurCycle >= SU->getDepth() && 637 "Node scheduled above its depth!"); 638 SU->setDepthToAtLeast(CurCycle); 639 640 ReleaseSuccessors(SU); 641 SU->isScheduled = true; 642 AvailableQueue.scheduledNode(SU); 643 } 644 645 /// ListScheduleTopDown - The main loop of list scheduling for top-down 646 /// schedulers. 647 void SchedulePostRATDList::ListScheduleTopDown() { 648 unsigned CurCycle = 0; 649 650 // We're scheduling top-down but we're visiting the regions in 651 // bottom-up order, so we don't know the hazards at the start of a 652 // region. So assume no hazards (this should usually be ok as most 653 // blocks are a single region). 654 HazardRec->Reset(); 655 656 // Release any successors of the special Entry node. 657 ReleaseSuccessors(&EntrySU); 658 659 // Add all leaves to Available queue. 660 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 661 // It is available if it has no predecessors. 662 bool available = SUnits[i].Preds.empty(); 663 if (available) { 664 AvailableQueue.push(&SUnits[i]); 665 SUnits[i].isAvailable = true; 666 } 667 } 668 669 // In any cycle where we can't schedule any instructions, we must 670 // stall or emit a noop, depending on the target. 671 bool CycleHasInsts = false; 672 673 // While Available queue is not empty, grab the node with the highest 674 // priority. If it is not ready put it back. Schedule the node. 675 std::vector<SUnit*> NotReady; 676 Sequence.reserve(SUnits.size()); 677 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 678 // Check to see if any of the pending instructions are ready to issue. If 679 // so, add them to the available queue. 680 unsigned MinDepth = ~0u; 681 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 682 if (PendingQueue[i]->getDepth() <= CurCycle) { 683 AvailableQueue.push(PendingQueue[i]); 684 PendingQueue[i]->isAvailable = true; 685 PendingQueue[i] = PendingQueue.back(); 686 PendingQueue.pop_back(); 687 --i; --e; 688 } else if (PendingQueue[i]->getDepth() < MinDepth) 689 MinDepth = PendingQueue[i]->getDepth(); 690 } 691 692 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 693 694 SUnit *FoundSUnit = 0; 695 bool HasNoopHazards = false; 696 while (!AvailableQueue.empty()) { 697 SUnit *CurSUnit = AvailableQueue.pop(); 698 699 ScheduleHazardRecognizer::HazardType HT = 700 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 701 if (HT == ScheduleHazardRecognizer::NoHazard) { 702 FoundSUnit = CurSUnit; 703 break; 704 } 705 706 // Remember if this is a noop hazard. 707 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 708 709 NotReady.push_back(CurSUnit); 710 } 711 712 // Add the nodes that aren't ready back onto the available list. 713 if (!NotReady.empty()) { 714 AvailableQueue.push_all(NotReady); 715 NotReady.clear(); 716 } 717 718 // If we found a node to schedule... 719 if (FoundSUnit) { 720 // ... schedule the node... 721 ScheduleNodeTopDown(FoundSUnit, CurCycle); 722 HazardRec->EmitInstruction(FoundSUnit); 723 CycleHasInsts = true; 724 if (HazardRec->atIssueLimit()) { 725 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 726 HazardRec->AdvanceCycle(); 727 ++CurCycle; 728 CycleHasInsts = false; 729 } 730 } else { 731 if (CycleHasInsts) { 732 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 733 HazardRec->AdvanceCycle(); 734 } else if (!HasNoopHazards) { 735 // Otherwise, we have a pipeline stall, but no other problem, 736 // just advance the current cycle and try again. 737 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 738 HazardRec->AdvanceCycle(); 739 ++NumStalls; 740 } else { 741 // Otherwise, we have no instructions to issue and we have instructions 742 // that will fault if we don't do this right. This is the case for 743 // processors without pipeline interlocks and other cases. 744 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 745 HazardRec->EmitNoop(); 746 Sequence.push_back(0); // NULL here means noop 747 ++NumNoops; 748 } 749 750 ++CurCycle; 751 CycleHasInsts = false; 752 } 753 } 754 755 #ifndef NDEBUG 756 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false); 757 unsigned Noops = 0; 758 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 759 if (!Sequence[i]) 760 ++Noops; 761 assert(Sequence.size() - Noops == ScheduledNodes && 762 "The number of nodes scheduled doesn't match the expected number!"); 763 #endif // NDEBUG 764 } 765 766 // EmitSchedule - Emit the machine code in scheduled order. 767 void SchedulePostRATDList::EmitSchedule() { 768 RegionBegin = RegionEnd; 769 770 // If first instruction was a DBG_VALUE then put it back. 771 if (FirstDbgValue) 772 BB->splice(RegionEnd, BB, FirstDbgValue); 773 774 // Then re-insert them according to the given schedule. 775 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 776 if (SUnit *SU = Sequence[i]) 777 BB->splice(RegionEnd, BB, SU->getInstr()); 778 else 779 // Null SUnit* is a noop. 780 TII->insertNoop(*BB, RegionEnd); 781 782 // Update the Begin iterator, as the first instruction in the block 783 // may have been scheduled later. 784 if (i == 0) 785 RegionBegin = prior(RegionEnd); 786 } 787 788 // Reinsert any remaining debug_values. 789 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 790 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 791 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 792 MachineInstr *DbgValue = P.first; 793 MachineBasicBlock::iterator OrigPrivMI = P.second; 794 BB->splice(++OrigPrivMI, BB, DbgValue); 795 } 796 DbgValues.clear(); 797 FirstDbgValue = NULL; 798 } 799