1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements a top-down list scheduler, using standard algorithms. 11 // The basic approach uses a priority queue of available nodes to schedule. 12 // One at a time, nodes are taken from the priority queue (thus in priority 13 // order), checked for legality to schedule, and emitted if legal. 14 // 15 // Nodes may not be legal to schedule either due to structural hazards (e.g. 16 // pipeline or resource constraints) or because an input to the instruction has 17 // not completed execution. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #define DEBUG_TYPE "post-RA-sched" 22 #include "llvm/CodeGen/Passes.h" 23 #include "AggressiveAntiDepBreaker.h" 24 #include "AntiDepBreaker.h" 25 #include "CriticalAntiDepBreaker.h" 26 #include "llvm/ADT/BitVector.h" 27 #include "llvm/ADT/Statistic.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/CodeGen/LatencyPriorityQueue.h" 30 #include "llvm/CodeGen/MachineDominators.h" 31 #include "llvm/CodeGen/MachineFrameInfo.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineLoopInfo.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/RegisterClassInfo.h" 36 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 37 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 38 #include "llvm/CodeGen/SchedulerRegistry.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Target/TargetInstrInfo.h" 44 #include "llvm/Target/TargetLowering.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetRegisterInfo.h" 47 #include "llvm/Target/TargetSubtargetInfo.h" 48 using namespace llvm; 49 50 STATISTIC(NumNoops, "Number of noops inserted"); 51 STATISTIC(NumStalls, "Number of pipeline stalls"); 52 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 53 54 // Post-RA scheduling is enabled with 55 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 56 // override the target. 57 static cl::opt<bool> 58 EnablePostRAScheduler("post-RA-scheduler", 59 cl::desc("Enable scheduling after register allocation"), 60 cl::init(false), cl::Hidden); 61 static cl::opt<std::string> 62 EnableAntiDepBreaking("break-anti-dependencies", 63 cl::desc("Break post-RA scheduling anti-dependencies: " 64 "\"critical\", \"all\", or \"none\""), 65 cl::init("none"), cl::Hidden); 66 67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 68 static cl::opt<int> 69 DebugDiv("postra-sched-debugdiv", 70 cl::desc("Debug control MBBs that are scheduled"), 71 cl::init(0), cl::Hidden); 72 static cl::opt<int> 73 DebugMod("postra-sched-debugmod", 74 cl::desc("Debug control MBBs that are scheduled"), 75 cl::init(0), cl::Hidden); 76 77 AntiDepBreaker::~AntiDepBreaker() { } 78 79 namespace { 80 class PostRAScheduler : public MachineFunctionPass { 81 const TargetInstrInfo *TII; 82 RegisterClassInfo RegClassInfo; 83 84 public: 85 static char ID; 86 PostRAScheduler() : MachineFunctionPass(ID) {} 87 88 void getAnalysisUsage(AnalysisUsage &AU) const override { 89 AU.setPreservesCFG(); 90 AU.addRequired<AliasAnalysis>(); 91 AU.addRequired<TargetPassConfig>(); 92 AU.addRequired<MachineDominatorTree>(); 93 AU.addPreserved<MachineDominatorTree>(); 94 AU.addRequired<MachineLoopInfo>(); 95 AU.addPreserved<MachineLoopInfo>(); 96 MachineFunctionPass::getAnalysisUsage(AU); 97 } 98 99 bool runOnMachineFunction(MachineFunction &Fn) override; 100 }; 101 char PostRAScheduler::ID = 0; 102 103 class SchedulePostRATDList : public ScheduleDAGInstrs { 104 /// AvailableQueue - The priority queue to use for the available SUnits. 105 /// 106 LatencyPriorityQueue AvailableQueue; 107 108 /// PendingQueue - This contains all of the instructions whose operands have 109 /// been issued, but their results are not ready yet (due to the latency of 110 /// the operation). Once the operands becomes available, the instruction is 111 /// added to the AvailableQueue. 112 std::vector<SUnit*> PendingQueue; 113 114 /// HazardRec - The hazard recognizer to use. 115 ScheduleHazardRecognizer *HazardRec; 116 117 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 118 AntiDepBreaker *AntiDepBreak; 119 120 /// AA - AliasAnalysis for making memory reference queries. 121 AliasAnalysis *AA; 122 123 /// The schedule. Null SUnit*'s represent noop instructions. 124 std::vector<SUnit*> Sequence; 125 126 /// The index in BB of RegionEnd. 127 /// 128 /// This is the instruction number from the top of the current block, not 129 /// the SlotIndex. It is only used by the AntiDepBreaker. 130 unsigned EndIndex; 131 132 public: 133 SchedulePostRATDList( 134 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 135 AliasAnalysis *AA, const RegisterClassInfo&, 136 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 137 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs); 138 139 ~SchedulePostRATDList(); 140 141 /// startBlock - Initialize register live-range state for scheduling in 142 /// this block. 143 /// 144 void startBlock(MachineBasicBlock *BB) override; 145 146 // Set the index of RegionEnd within the current BB. 147 void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; } 148 149 /// Initialize the scheduler state for the next scheduling region. 150 void enterRegion(MachineBasicBlock *bb, 151 MachineBasicBlock::iterator begin, 152 MachineBasicBlock::iterator end, 153 unsigned regioninstrs) override; 154 155 /// Notify that the scheduler has finished scheduling the current region. 156 void exitRegion() override; 157 158 /// Schedule - Schedule the instruction range using list scheduling. 159 /// 160 void schedule() override; 161 162 void EmitSchedule(); 163 164 /// Observe - Update liveness information to account for the current 165 /// instruction, which will not be scheduled. 166 /// 167 void Observe(MachineInstr *MI, unsigned Count); 168 169 /// finishBlock - Clean up register live-range state. 170 /// 171 void finishBlock() override; 172 173 private: 174 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 175 void ReleaseSuccessors(SUnit *SU); 176 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 177 void ListScheduleTopDown(); 178 179 void dumpSchedule() const; 180 void emitNoop(unsigned CurCycle); 181 }; 182 } 183 184 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 185 186 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 187 "Post RA top-down list latency scheduler", false, false) 188 189 SchedulePostRATDList::SchedulePostRATDList( 190 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 191 AliasAnalysis *AA, const RegisterClassInfo &RCI, 192 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 193 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs) 194 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), AA(AA), EndIndex(0) { 195 196 const TargetMachine &TM = MF.getTarget(); 197 const InstrItineraryData *InstrItins = TM.getInstrItineraryData(); 198 HazardRec = 199 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this); 200 201 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE || 202 MRI.tracksLiveness()) && 203 "Live-ins must be accurate for anti-dependency breaking"); 204 AntiDepBreak = 205 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 206 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 207 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 208 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL)); 209 } 210 211 SchedulePostRATDList::~SchedulePostRATDList() { 212 delete HazardRec; 213 delete AntiDepBreak; 214 } 215 216 /// Initialize state associated with the next scheduling region. 217 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb, 218 MachineBasicBlock::iterator begin, 219 MachineBasicBlock::iterator end, 220 unsigned regioninstrs) { 221 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 222 Sequence.clear(); 223 } 224 225 /// Print the schedule before exiting the region. 226 void SchedulePostRATDList::exitRegion() { 227 DEBUG({ 228 dbgs() << "*** Final schedule ***\n"; 229 dumpSchedule(); 230 dbgs() << '\n'; 231 }); 232 ScheduleDAGInstrs::exitRegion(); 233 } 234 235 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 236 /// dumpSchedule - dump the scheduled Sequence. 237 void SchedulePostRATDList::dumpSchedule() const { 238 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 239 if (SUnit *SU = Sequence[i]) 240 SU->dump(this); 241 else 242 dbgs() << "**** NOOP ****\n"; 243 } 244 } 245 #endif 246 247 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 248 TII = Fn.getTarget().getInstrInfo(); 249 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 250 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 251 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 252 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 253 254 RegClassInfo.runOnMachineFunction(Fn); 255 256 // Check for explicit enable/disable of post-ra scheduling. 257 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 258 TargetSubtargetInfo::ANTIDEP_NONE; 259 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; 260 if (EnablePostRAScheduler.getPosition() > 0) { 261 if (!EnablePostRAScheduler) 262 return false; 263 } else { 264 // Check that post-RA scheduling is enabled for this target. 265 // This may upgrade the AntiDepMode. 266 const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 267 if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode, 268 CriticalPathRCs)) 269 return false; 270 } 271 272 // Check for antidep breaking override... 273 if (EnableAntiDepBreaking.getPosition() > 0) { 274 AntiDepMode = (EnableAntiDepBreaking == "all") 275 ? TargetSubtargetInfo::ANTIDEP_ALL 276 : ((EnableAntiDepBreaking == "critical") 277 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 278 : TargetSubtargetInfo::ANTIDEP_NONE); 279 } 280 281 DEBUG(dbgs() << "PostRAScheduler\n"); 282 283 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 284 CriticalPathRCs); 285 286 // Loop over all of the basic blocks 287 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 288 MBB != MBBe; ++MBB) { 289 #ifndef NDEBUG 290 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 291 if (DebugDiv > 0) { 292 static int bbcnt = 0; 293 if (bbcnt++ % DebugDiv != DebugMod) 294 continue; 295 dbgs() << "*** DEBUG scheduling " << Fn.getName() 296 << ":BB#" << MBB->getNumber() << " ***\n"; 297 } 298 #endif 299 300 // Initialize register live-range state for scheduling in this block. 301 Scheduler.startBlock(MBB); 302 303 // Schedule each sequence of instructions not interrupted by a label 304 // or anything else that effectively needs to shut down scheduling. 305 MachineBasicBlock::iterator Current = MBB->end(); 306 unsigned Count = MBB->size(), CurrentCount = Count; 307 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 308 MachineInstr *MI = std::prev(I); 309 --Count; 310 // Calls are not scheduling boundaries before register allocation, but 311 // post-ra we don't gain anything by scheduling across calls since we 312 // don't need to worry about register pressure. 313 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { 314 Scheduler.enterRegion(MBB, I, Current, CurrentCount - Count); 315 Scheduler.setEndIndex(CurrentCount); 316 Scheduler.schedule(); 317 Scheduler.exitRegion(); 318 Scheduler.EmitSchedule(); 319 Current = MI; 320 CurrentCount = Count; 321 Scheduler.Observe(MI, CurrentCount); 322 } 323 I = MI; 324 if (MI->isBundle()) 325 Count -= MI->getBundleSize(); 326 } 327 assert(Count == 0 && "Instruction count mismatch!"); 328 assert((MBB->begin() == Current || CurrentCount != 0) && 329 "Instruction count mismatch!"); 330 Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount); 331 Scheduler.setEndIndex(CurrentCount); 332 Scheduler.schedule(); 333 Scheduler.exitRegion(); 334 Scheduler.EmitSchedule(); 335 336 // Clean up register live-range state. 337 Scheduler.finishBlock(); 338 339 // Update register kills 340 Scheduler.fixupKills(MBB); 341 } 342 343 return true; 344 } 345 346 /// StartBlock - Initialize register live-range state for scheduling in 347 /// this block. 348 /// 349 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) { 350 // Call the superclass. 351 ScheduleDAGInstrs::startBlock(BB); 352 353 // Reset the hazard recognizer and anti-dep breaker. 354 HazardRec->Reset(); 355 if (AntiDepBreak != NULL) 356 AntiDepBreak->StartBlock(BB); 357 } 358 359 /// Schedule - Schedule the instruction range using list scheduling. 360 /// 361 void SchedulePostRATDList::schedule() { 362 // Build the scheduling graph. 363 buildSchedGraph(AA); 364 365 if (AntiDepBreak != NULL) { 366 unsigned Broken = 367 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, 368 EndIndex, DbgValues); 369 370 if (Broken != 0) { 371 // We made changes. Update the dependency graph. 372 // Theoretically we could update the graph in place: 373 // When a live range is changed to use a different register, remove 374 // the def's anti-dependence *and* output-dependence edges due to 375 // that register, and add new anti-dependence and output-dependence 376 // edges based on the next live range of the register. 377 ScheduleDAG::clearDAG(); 378 buildSchedGraph(AA); 379 380 NumFixedAnti += Broken; 381 } 382 } 383 384 DEBUG(dbgs() << "********** List Scheduling **********\n"); 385 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 386 SUnits[su].dumpAll(this)); 387 388 AvailableQueue.initNodes(SUnits); 389 ListScheduleTopDown(); 390 AvailableQueue.releaseState(); 391 } 392 393 /// Observe - Update liveness information to account for the current 394 /// instruction, which will not be scheduled. 395 /// 396 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 397 if (AntiDepBreak != NULL) 398 AntiDepBreak->Observe(MI, Count, EndIndex); 399 } 400 401 /// FinishBlock - Clean up register live-range state. 402 /// 403 void SchedulePostRATDList::finishBlock() { 404 if (AntiDepBreak != NULL) 405 AntiDepBreak->FinishBlock(); 406 407 // Call the superclass. 408 ScheduleDAGInstrs::finishBlock(); 409 } 410 411 //===----------------------------------------------------------------------===// 412 // Top-Down Scheduling 413 //===----------------------------------------------------------------------===// 414 415 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 416 /// the PendingQueue if the count reaches zero. 417 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 418 SUnit *SuccSU = SuccEdge->getSUnit(); 419 420 if (SuccEdge->isWeak()) { 421 --SuccSU->WeakPredsLeft; 422 return; 423 } 424 #ifndef NDEBUG 425 if (SuccSU->NumPredsLeft == 0) { 426 dbgs() << "*** Scheduling failed! ***\n"; 427 SuccSU->dump(this); 428 dbgs() << " has been released too many times!\n"; 429 llvm_unreachable(0); 430 } 431 #endif 432 --SuccSU->NumPredsLeft; 433 434 // Standard scheduler algorithms will recompute the depth of the successor 435 // here as such: 436 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 437 // 438 // However, we lazily compute node depth instead. Note that 439 // ScheduleNodeTopDown has already updated the depth of this node which causes 440 // all descendents to be marked dirty. Setting the successor depth explicitly 441 // here would cause depth to be recomputed for all its ancestors. If the 442 // successor is not yet ready (because of a transitively redundant edge) then 443 // this causes depth computation to be quadratic in the size of the DAG. 444 445 // If all the node's predecessors are scheduled, this node is ready 446 // to be scheduled. Ignore the special ExitSU node. 447 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 448 PendingQueue.push_back(SuccSU); 449 } 450 451 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 452 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 453 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 454 I != E; ++I) { 455 ReleaseSucc(SU, &*I); 456 } 457 } 458 459 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 460 /// count of its successors. If a successor pending count is zero, add it to 461 /// the Available queue. 462 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 463 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 464 DEBUG(SU->dump(this)); 465 466 Sequence.push_back(SU); 467 assert(CurCycle >= SU->getDepth() && 468 "Node scheduled above its depth!"); 469 SU->setDepthToAtLeast(CurCycle); 470 471 ReleaseSuccessors(SU); 472 SU->isScheduled = true; 473 AvailableQueue.scheduledNode(SU); 474 } 475 476 /// emitNoop - Add a noop to the current instruction sequence. 477 void SchedulePostRATDList::emitNoop(unsigned CurCycle) { 478 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 479 HazardRec->EmitNoop(); 480 Sequence.push_back(0); // NULL here means noop 481 ++NumNoops; 482 } 483 484 /// ListScheduleTopDown - The main loop of list scheduling for top-down 485 /// schedulers. 486 void SchedulePostRATDList::ListScheduleTopDown() { 487 unsigned CurCycle = 0; 488 489 // We're scheduling top-down but we're visiting the regions in 490 // bottom-up order, so we don't know the hazards at the start of a 491 // region. So assume no hazards (this should usually be ok as most 492 // blocks are a single region). 493 HazardRec->Reset(); 494 495 // Release any successors of the special Entry node. 496 ReleaseSuccessors(&EntrySU); 497 498 // Add all leaves to Available queue. 499 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 500 // It is available if it has no predecessors. 501 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) { 502 AvailableQueue.push(&SUnits[i]); 503 SUnits[i].isAvailable = true; 504 } 505 } 506 507 // In any cycle where we can't schedule any instructions, we must 508 // stall or emit a noop, depending on the target. 509 bool CycleHasInsts = false; 510 511 // While Available queue is not empty, grab the node with the highest 512 // priority. If it is not ready put it back. Schedule the node. 513 std::vector<SUnit*> NotReady; 514 Sequence.reserve(SUnits.size()); 515 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 516 // Check to see if any of the pending instructions are ready to issue. If 517 // so, add them to the available queue. 518 unsigned MinDepth = ~0u; 519 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 520 if (PendingQueue[i]->getDepth() <= CurCycle) { 521 AvailableQueue.push(PendingQueue[i]); 522 PendingQueue[i]->isAvailable = true; 523 PendingQueue[i] = PendingQueue.back(); 524 PendingQueue.pop_back(); 525 --i; --e; 526 } else if (PendingQueue[i]->getDepth() < MinDepth) 527 MinDepth = PendingQueue[i]->getDepth(); 528 } 529 530 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 531 532 SUnit *FoundSUnit = 0, *NotPreferredSUnit = 0; 533 bool HasNoopHazards = false; 534 while (!AvailableQueue.empty()) { 535 SUnit *CurSUnit = AvailableQueue.pop(); 536 537 ScheduleHazardRecognizer::HazardType HT = 538 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 539 if (HT == ScheduleHazardRecognizer::NoHazard) { 540 if (HazardRec->ShouldPreferAnother(CurSUnit)) { 541 if (!NotPreferredSUnit) { 542 // If this is the first non-preferred node for this cycle, then 543 // record it and continue searching for a preferred node. If this 544 // is not the first non-preferred node, then treat it as though 545 // there had been a hazard. 546 NotPreferredSUnit = CurSUnit; 547 continue; 548 } 549 } else { 550 FoundSUnit = CurSUnit; 551 break; 552 } 553 } 554 555 // Remember if this is a noop hazard. 556 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 557 558 NotReady.push_back(CurSUnit); 559 } 560 561 // If we have a non-preferred node, push it back onto the available list. 562 // If we did not find a preferred node, then schedule this first 563 // non-preferred node. 564 if (NotPreferredSUnit) { 565 if (!FoundSUnit) { 566 DEBUG(dbgs() << "*** Will schedule a non-preferred instruction...\n"); 567 FoundSUnit = NotPreferredSUnit; 568 } else { 569 AvailableQueue.push(NotPreferredSUnit); 570 } 571 572 NotPreferredSUnit = 0; 573 } 574 575 // Add the nodes that aren't ready back onto the available list. 576 if (!NotReady.empty()) { 577 AvailableQueue.push_all(NotReady); 578 NotReady.clear(); 579 } 580 581 // If we found a node to schedule... 582 if (FoundSUnit) { 583 // If we need to emit noops prior to this instruction, then do so. 584 unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit); 585 for (unsigned i = 0; i != NumPreNoops; ++i) 586 emitNoop(CurCycle); 587 588 // ... schedule the node... 589 ScheduleNodeTopDown(FoundSUnit, CurCycle); 590 HazardRec->EmitInstruction(FoundSUnit); 591 CycleHasInsts = true; 592 if (HazardRec->atIssueLimit()) { 593 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 594 HazardRec->AdvanceCycle(); 595 ++CurCycle; 596 CycleHasInsts = false; 597 } 598 } else { 599 if (CycleHasInsts) { 600 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 601 HazardRec->AdvanceCycle(); 602 } else if (!HasNoopHazards) { 603 // Otherwise, we have a pipeline stall, but no other problem, 604 // just advance the current cycle and try again. 605 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 606 HazardRec->AdvanceCycle(); 607 ++NumStalls; 608 } else { 609 // Otherwise, we have no instructions to issue and we have instructions 610 // that will fault if we don't do this right. This is the case for 611 // processors without pipeline interlocks and other cases. 612 emitNoop(CurCycle); 613 } 614 615 ++CurCycle; 616 CycleHasInsts = false; 617 } 618 } 619 620 #ifndef NDEBUG 621 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false); 622 unsigned Noops = 0; 623 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 624 if (!Sequence[i]) 625 ++Noops; 626 assert(Sequence.size() - Noops == ScheduledNodes && 627 "The number of nodes scheduled doesn't match the expected number!"); 628 #endif // NDEBUG 629 } 630 631 // EmitSchedule - Emit the machine code in scheduled order. 632 void SchedulePostRATDList::EmitSchedule() { 633 RegionBegin = RegionEnd; 634 635 // If first instruction was a DBG_VALUE then put it back. 636 if (FirstDbgValue) 637 BB->splice(RegionEnd, BB, FirstDbgValue); 638 639 // Then re-insert them according to the given schedule. 640 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 641 if (SUnit *SU = Sequence[i]) 642 BB->splice(RegionEnd, BB, SU->getInstr()); 643 else 644 // Null SUnit* is a noop. 645 TII->insertNoop(*BB, RegionEnd); 646 647 // Update the Begin iterator, as the first instruction in the block 648 // may have been scheduled later. 649 if (i == 0) 650 RegionBegin = std::prev(RegionEnd); 651 } 652 653 // Reinsert any remaining debug_values. 654 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 655 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 656 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 657 MachineInstr *DbgValue = P.first; 658 MachineBasicBlock::iterator OrigPrivMI = P.second; 659 BB->splice(++OrigPrivMI, BB, DbgValue); 660 } 661 DbgValues.clear(); 662 FirstDbgValue = NULL; 663 } 664