1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements a top-down list scheduler, using standard algorithms. 11 // The basic approach uses a priority queue of available nodes to schedule. 12 // One at a time, nodes are taken from the priority queue (thus in priority 13 // order), checked for legality to schedule, and emitted if legal. 14 // 15 // Nodes may not be legal to schedule either due to structural hazards (e.g. 16 // pipeline or resource constraints) or because an input to the instruction has 17 // not completed execution. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/CodeGen/Passes.h" 22 #include "AggressiveAntiDepBreaker.h" 23 #include "AntiDepBreaker.h" 24 #include "CriticalAntiDepBreaker.h" 25 #include "llvm/ADT/BitVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/CodeGen/LatencyPriorityQueue.h" 29 #include "llvm/CodeGen/MachineDominators.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunctionPass.h" 32 #include "llvm/CodeGen/MachineLoopInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/RegisterClassInfo.h" 35 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 36 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 37 #include "llvm/CodeGen/SchedulerRegistry.h" 38 #include "llvm/Support/CommandLine.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/ErrorHandling.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Target/TargetInstrInfo.h" 43 #include "llvm/Target/TargetLowering.h" 44 #include "llvm/Target/TargetMachine.h" 45 #include "llvm/Target/TargetRegisterInfo.h" 46 #include "llvm/Target/TargetSubtargetInfo.h" 47 using namespace llvm; 48 49 #define DEBUG_TYPE "post-RA-sched" 50 51 STATISTIC(NumNoops, "Number of noops inserted"); 52 STATISTIC(NumStalls, "Number of pipeline stalls"); 53 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 54 55 // Post-RA scheduling is enabled with 56 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 57 // override the target. 58 static cl::opt<bool> 59 EnablePostRAScheduler("post-RA-scheduler", 60 cl::desc("Enable scheduling after register allocation"), 61 cl::init(false), cl::Hidden); 62 static cl::opt<std::string> 63 EnableAntiDepBreaking("break-anti-dependencies", 64 cl::desc("Break post-RA scheduling anti-dependencies: " 65 "\"critical\", \"all\", or \"none\""), 66 cl::init("none"), cl::Hidden); 67 68 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 69 static cl::opt<int> 70 DebugDiv("postra-sched-debugdiv", 71 cl::desc("Debug control MBBs that are scheduled"), 72 cl::init(0), cl::Hidden); 73 static cl::opt<int> 74 DebugMod("postra-sched-debugmod", 75 cl::desc("Debug control MBBs that are scheduled"), 76 cl::init(0), cl::Hidden); 77 78 AntiDepBreaker::~AntiDepBreaker() { } 79 80 namespace { 81 class PostRAScheduler : public MachineFunctionPass { 82 const TargetInstrInfo *TII; 83 RegisterClassInfo RegClassInfo; 84 85 public: 86 static char ID; 87 PostRAScheduler() : MachineFunctionPass(ID) {} 88 89 void getAnalysisUsage(AnalysisUsage &AU) const override { 90 AU.setPreservesCFG(); 91 AU.addRequired<AliasAnalysis>(); 92 AU.addRequired<TargetPassConfig>(); 93 AU.addRequired<MachineDominatorTree>(); 94 AU.addPreserved<MachineDominatorTree>(); 95 AU.addRequired<MachineLoopInfo>(); 96 AU.addPreserved<MachineLoopInfo>(); 97 MachineFunctionPass::getAnalysisUsage(AU); 98 } 99 100 bool runOnMachineFunction(MachineFunction &Fn) override; 101 102 bool enablePostRAScheduler( 103 const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel, 104 TargetSubtargetInfo::AntiDepBreakMode &Mode, 105 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const; 106 }; 107 char PostRAScheduler::ID = 0; 108 109 class SchedulePostRATDList : public ScheduleDAGInstrs { 110 /// AvailableQueue - The priority queue to use for the available SUnits. 111 /// 112 LatencyPriorityQueue AvailableQueue; 113 114 /// PendingQueue - This contains all of the instructions whose operands have 115 /// been issued, but their results are not ready yet (due to the latency of 116 /// the operation). Once the operands becomes available, the instruction is 117 /// added to the AvailableQueue. 118 std::vector<SUnit*> PendingQueue; 119 120 /// HazardRec - The hazard recognizer to use. 121 ScheduleHazardRecognizer *HazardRec; 122 123 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 124 AntiDepBreaker *AntiDepBreak; 125 126 /// AA - AliasAnalysis for making memory reference queries. 127 AliasAnalysis *AA; 128 129 /// The schedule. Null SUnit*'s represent noop instructions. 130 std::vector<SUnit*> Sequence; 131 132 /// The index in BB of RegionEnd. 133 /// 134 /// This is the instruction number from the top of the current block, not 135 /// the SlotIndex. It is only used by the AntiDepBreaker. 136 unsigned EndIndex; 137 138 public: 139 SchedulePostRATDList( 140 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 141 AliasAnalysis *AA, const RegisterClassInfo&, 142 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 143 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs); 144 145 ~SchedulePostRATDList(); 146 147 /// startBlock - Initialize register live-range state for scheduling in 148 /// this block. 149 /// 150 void startBlock(MachineBasicBlock *BB) override; 151 152 // Set the index of RegionEnd within the current BB. 153 void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; } 154 155 /// Initialize the scheduler state for the next scheduling region. 156 void enterRegion(MachineBasicBlock *bb, 157 MachineBasicBlock::iterator begin, 158 MachineBasicBlock::iterator end, 159 unsigned regioninstrs) override; 160 161 /// Notify that the scheduler has finished scheduling the current region. 162 void exitRegion() override; 163 164 /// Schedule - Schedule the instruction range using list scheduling. 165 /// 166 void schedule() override; 167 168 void EmitSchedule(); 169 170 /// Observe - Update liveness information to account for the current 171 /// instruction, which will not be scheduled. 172 /// 173 void Observe(MachineInstr *MI, unsigned Count); 174 175 /// finishBlock - Clean up register live-range state. 176 /// 177 void finishBlock() override; 178 179 private: 180 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 181 void ReleaseSuccessors(SUnit *SU); 182 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 183 void ListScheduleTopDown(); 184 185 void dumpSchedule() const; 186 void emitNoop(unsigned CurCycle); 187 }; 188 } 189 190 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 191 192 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 193 "Post RA top-down list latency scheduler", false, false) 194 195 SchedulePostRATDList::SchedulePostRATDList( 196 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 197 AliasAnalysis *AA, const RegisterClassInfo &RCI, 198 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 199 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs) 200 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), AA(AA), EndIndex(0) { 201 202 const TargetMachine &TM = MF.getTarget(); 203 const InstrItineraryData *InstrItins = TM.getInstrItineraryData(); 204 HazardRec = 205 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this); 206 207 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE || 208 MRI.tracksLiveness()) && 209 "Live-ins must be accurate for anti-dependency breaking"); 210 AntiDepBreak = 211 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 212 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 213 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 214 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr)); 215 } 216 217 SchedulePostRATDList::~SchedulePostRATDList() { 218 delete HazardRec; 219 delete AntiDepBreak; 220 } 221 222 /// Initialize state associated with the next scheduling region. 223 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb, 224 MachineBasicBlock::iterator begin, 225 MachineBasicBlock::iterator end, 226 unsigned regioninstrs) { 227 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 228 Sequence.clear(); 229 } 230 231 /// Print the schedule before exiting the region. 232 void SchedulePostRATDList::exitRegion() { 233 DEBUG({ 234 dbgs() << "*** Final schedule ***\n"; 235 dumpSchedule(); 236 dbgs() << '\n'; 237 }); 238 ScheduleDAGInstrs::exitRegion(); 239 } 240 241 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 242 /// dumpSchedule - dump the scheduled Sequence. 243 void SchedulePostRATDList::dumpSchedule() const { 244 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 245 if (SUnit *SU = Sequence[i]) 246 SU->dump(this); 247 else 248 dbgs() << "**** NOOP ****\n"; 249 } 250 } 251 #endif 252 253 bool PostRAScheduler::enablePostRAScheduler( 254 const TargetSubtargetInfo &ST, 255 CodeGenOpt::Level OptLevel, 256 TargetSubtargetInfo::AntiDepBreakMode &Mode, 257 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const { 258 Mode = ST.getAntiDepBreakMode(); 259 ST.getCriticalPathRCs(CriticalPathRCs); 260 return ST.enablePostMachineScheduler() && 261 OptLevel >= ST.getOptLevelToEnablePostRAScheduler(); 262 } 263 264 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 265 if (skipOptnoneFunction(*Fn.getFunction())) 266 return false; 267 268 TII = Fn.getTarget().getInstrInfo(); 269 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 270 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 271 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 272 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 273 274 RegClassInfo.runOnMachineFunction(Fn); 275 276 // Check for explicit enable/disable of post-ra scheduling. 277 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 278 TargetSubtargetInfo::ANTIDEP_NONE; 279 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; 280 if (EnablePostRAScheduler.getPosition() > 0) { 281 if (!EnablePostRAScheduler) 282 return false; 283 } else { 284 // Check that post-RA scheduling is enabled for this target. 285 // This may upgrade the AntiDepMode. 286 const TargetSubtargetInfo &ST = 287 Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 288 if (!enablePostRAScheduler(ST, PassConfig->getOptLevel(), 289 AntiDepMode, CriticalPathRCs)) 290 return false; 291 } 292 293 // Check for antidep breaking override... 294 if (EnableAntiDepBreaking.getPosition() > 0) { 295 AntiDepMode = (EnableAntiDepBreaking == "all") 296 ? TargetSubtargetInfo::ANTIDEP_ALL 297 : ((EnableAntiDepBreaking == "critical") 298 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 299 : TargetSubtargetInfo::ANTIDEP_NONE); 300 } 301 302 DEBUG(dbgs() << "PostRAScheduler\n"); 303 304 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 305 CriticalPathRCs); 306 307 // Loop over all of the basic blocks 308 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 309 MBB != MBBe; ++MBB) { 310 #ifndef NDEBUG 311 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 312 if (DebugDiv > 0) { 313 static int bbcnt = 0; 314 if (bbcnt++ % DebugDiv != DebugMod) 315 continue; 316 dbgs() << "*** DEBUG scheduling " << Fn.getName() 317 << ":BB#" << MBB->getNumber() << " ***\n"; 318 } 319 #endif 320 321 // Initialize register live-range state for scheduling in this block. 322 Scheduler.startBlock(MBB); 323 324 // Schedule each sequence of instructions not interrupted by a label 325 // or anything else that effectively needs to shut down scheduling. 326 MachineBasicBlock::iterator Current = MBB->end(); 327 unsigned Count = MBB->size(), CurrentCount = Count; 328 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 329 MachineInstr *MI = std::prev(I); 330 --Count; 331 // Calls are not scheduling boundaries before register allocation, but 332 // post-ra we don't gain anything by scheduling across calls since we 333 // don't need to worry about register pressure. 334 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { 335 Scheduler.enterRegion(MBB, I, Current, CurrentCount - Count); 336 Scheduler.setEndIndex(CurrentCount); 337 Scheduler.schedule(); 338 Scheduler.exitRegion(); 339 Scheduler.EmitSchedule(); 340 Current = MI; 341 CurrentCount = Count; 342 Scheduler.Observe(MI, CurrentCount); 343 } 344 I = MI; 345 if (MI->isBundle()) 346 Count -= MI->getBundleSize(); 347 } 348 assert(Count == 0 && "Instruction count mismatch!"); 349 assert((MBB->begin() == Current || CurrentCount != 0) && 350 "Instruction count mismatch!"); 351 Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount); 352 Scheduler.setEndIndex(CurrentCount); 353 Scheduler.schedule(); 354 Scheduler.exitRegion(); 355 Scheduler.EmitSchedule(); 356 357 // Clean up register live-range state. 358 Scheduler.finishBlock(); 359 360 // Update register kills 361 Scheduler.fixupKills(MBB); 362 } 363 364 return true; 365 } 366 367 /// StartBlock - Initialize register live-range state for scheduling in 368 /// this block. 369 /// 370 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) { 371 // Call the superclass. 372 ScheduleDAGInstrs::startBlock(BB); 373 374 // Reset the hazard recognizer and anti-dep breaker. 375 HazardRec->Reset(); 376 if (AntiDepBreak) 377 AntiDepBreak->StartBlock(BB); 378 } 379 380 /// Schedule - Schedule the instruction range using list scheduling. 381 /// 382 void SchedulePostRATDList::schedule() { 383 // Build the scheduling graph. 384 buildSchedGraph(AA); 385 386 if (AntiDepBreak) { 387 unsigned Broken = 388 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, 389 EndIndex, DbgValues); 390 391 if (Broken != 0) { 392 // We made changes. Update the dependency graph. 393 // Theoretically we could update the graph in place: 394 // When a live range is changed to use a different register, remove 395 // the def's anti-dependence *and* output-dependence edges due to 396 // that register, and add new anti-dependence and output-dependence 397 // edges based on the next live range of the register. 398 ScheduleDAG::clearDAG(); 399 buildSchedGraph(AA); 400 401 NumFixedAnti += Broken; 402 } 403 } 404 405 DEBUG(dbgs() << "********** List Scheduling **********\n"); 406 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 407 SUnits[su].dumpAll(this)); 408 409 AvailableQueue.initNodes(SUnits); 410 ListScheduleTopDown(); 411 AvailableQueue.releaseState(); 412 } 413 414 /// Observe - Update liveness information to account for the current 415 /// instruction, which will not be scheduled. 416 /// 417 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 418 if (AntiDepBreak) 419 AntiDepBreak->Observe(MI, Count, EndIndex); 420 } 421 422 /// FinishBlock - Clean up register live-range state. 423 /// 424 void SchedulePostRATDList::finishBlock() { 425 if (AntiDepBreak) 426 AntiDepBreak->FinishBlock(); 427 428 // Call the superclass. 429 ScheduleDAGInstrs::finishBlock(); 430 } 431 432 //===----------------------------------------------------------------------===// 433 // Top-Down Scheduling 434 //===----------------------------------------------------------------------===// 435 436 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 437 /// the PendingQueue if the count reaches zero. 438 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 439 SUnit *SuccSU = SuccEdge->getSUnit(); 440 441 if (SuccEdge->isWeak()) { 442 --SuccSU->WeakPredsLeft; 443 return; 444 } 445 #ifndef NDEBUG 446 if (SuccSU->NumPredsLeft == 0) { 447 dbgs() << "*** Scheduling failed! ***\n"; 448 SuccSU->dump(this); 449 dbgs() << " has been released too many times!\n"; 450 llvm_unreachable(nullptr); 451 } 452 #endif 453 --SuccSU->NumPredsLeft; 454 455 // Standard scheduler algorithms will recompute the depth of the successor 456 // here as such: 457 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 458 // 459 // However, we lazily compute node depth instead. Note that 460 // ScheduleNodeTopDown has already updated the depth of this node which causes 461 // all descendents to be marked dirty. Setting the successor depth explicitly 462 // here would cause depth to be recomputed for all its ancestors. If the 463 // successor is not yet ready (because of a transitively redundant edge) then 464 // this causes depth computation to be quadratic in the size of the DAG. 465 466 // If all the node's predecessors are scheduled, this node is ready 467 // to be scheduled. Ignore the special ExitSU node. 468 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 469 PendingQueue.push_back(SuccSU); 470 } 471 472 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 473 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 474 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 475 I != E; ++I) { 476 ReleaseSucc(SU, &*I); 477 } 478 } 479 480 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 481 /// count of its successors. If a successor pending count is zero, add it to 482 /// the Available queue. 483 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 484 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 485 DEBUG(SU->dump(this)); 486 487 Sequence.push_back(SU); 488 assert(CurCycle >= SU->getDepth() && 489 "Node scheduled above its depth!"); 490 SU->setDepthToAtLeast(CurCycle); 491 492 ReleaseSuccessors(SU); 493 SU->isScheduled = true; 494 AvailableQueue.scheduledNode(SU); 495 } 496 497 /// emitNoop - Add a noop to the current instruction sequence. 498 void SchedulePostRATDList::emitNoop(unsigned CurCycle) { 499 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 500 HazardRec->EmitNoop(); 501 Sequence.push_back(nullptr); // NULL here means noop 502 ++NumNoops; 503 } 504 505 /// ListScheduleTopDown - The main loop of list scheduling for top-down 506 /// schedulers. 507 void SchedulePostRATDList::ListScheduleTopDown() { 508 unsigned CurCycle = 0; 509 510 // We're scheduling top-down but we're visiting the regions in 511 // bottom-up order, so we don't know the hazards at the start of a 512 // region. So assume no hazards (this should usually be ok as most 513 // blocks are a single region). 514 HazardRec->Reset(); 515 516 // Release any successors of the special Entry node. 517 ReleaseSuccessors(&EntrySU); 518 519 // Add all leaves to Available queue. 520 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 521 // It is available if it has no predecessors. 522 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) { 523 AvailableQueue.push(&SUnits[i]); 524 SUnits[i].isAvailable = true; 525 } 526 } 527 528 // In any cycle where we can't schedule any instructions, we must 529 // stall or emit a noop, depending on the target. 530 bool CycleHasInsts = false; 531 532 // While Available queue is not empty, grab the node with the highest 533 // priority. If it is not ready put it back. Schedule the node. 534 std::vector<SUnit*> NotReady; 535 Sequence.reserve(SUnits.size()); 536 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 537 // Check to see if any of the pending instructions are ready to issue. If 538 // so, add them to the available queue. 539 unsigned MinDepth = ~0u; 540 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 541 if (PendingQueue[i]->getDepth() <= CurCycle) { 542 AvailableQueue.push(PendingQueue[i]); 543 PendingQueue[i]->isAvailable = true; 544 PendingQueue[i] = PendingQueue.back(); 545 PendingQueue.pop_back(); 546 --i; --e; 547 } else if (PendingQueue[i]->getDepth() < MinDepth) 548 MinDepth = PendingQueue[i]->getDepth(); 549 } 550 551 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 552 553 SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr; 554 bool HasNoopHazards = false; 555 while (!AvailableQueue.empty()) { 556 SUnit *CurSUnit = AvailableQueue.pop(); 557 558 ScheduleHazardRecognizer::HazardType HT = 559 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 560 if (HT == ScheduleHazardRecognizer::NoHazard) { 561 if (HazardRec->ShouldPreferAnother(CurSUnit)) { 562 if (!NotPreferredSUnit) { 563 // If this is the first non-preferred node for this cycle, then 564 // record it and continue searching for a preferred node. If this 565 // is not the first non-preferred node, then treat it as though 566 // there had been a hazard. 567 NotPreferredSUnit = CurSUnit; 568 continue; 569 } 570 } else { 571 FoundSUnit = CurSUnit; 572 break; 573 } 574 } 575 576 // Remember if this is a noop hazard. 577 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 578 579 NotReady.push_back(CurSUnit); 580 } 581 582 // If we have a non-preferred node, push it back onto the available list. 583 // If we did not find a preferred node, then schedule this first 584 // non-preferred node. 585 if (NotPreferredSUnit) { 586 if (!FoundSUnit) { 587 DEBUG(dbgs() << "*** Will schedule a non-preferred instruction...\n"); 588 FoundSUnit = NotPreferredSUnit; 589 } else { 590 AvailableQueue.push(NotPreferredSUnit); 591 } 592 593 NotPreferredSUnit = nullptr; 594 } 595 596 // Add the nodes that aren't ready back onto the available list. 597 if (!NotReady.empty()) { 598 AvailableQueue.push_all(NotReady); 599 NotReady.clear(); 600 } 601 602 // If we found a node to schedule... 603 if (FoundSUnit) { 604 // If we need to emit noops prior to this instruction, then do so. 605 unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit); 606 for (unsigned i = 0; i != NumPreNoops; ++i) 607 emitNoop(CurCycle); 608 609 // ... schedule the node... 610 ScheduleNodeTopDown(FoundSUnit, CurCycle); 611 HazardRec->EmitInstruction(FoundSUnit); 612 CycleHasInsts = true; 613 if (HazardRec->atIssueLimit()) { 614 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 615 HazardRec->AdvanceCycle(); 616 ++CurCycle; 617 CycleHasInsts = false; 618 } 619 } else { 620 if (CycleHasInsts) { 621 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 622 HazardRec->AdvanceCycle(); 623 } else if (!HasNoopHazards) { 624 // Otherwise, we have a pipeline stall, but no other problem, 625 // just advance the current cycle and try again. 626 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 627 HazardRec->AdvanceCycle(); 628 ++NumStalls; 629 } else { 630 // Otherwise, we have no instructions to issue and we have instructions 631 // that will fault if we don't do this right. This is the case for 632 // processors without pipeline interlocks and other cases. 633 emitNoop(CurCycle); 634 } 635 636 ++CurCycle; 637 CycleHasInsts = false; 638 } 639 } 640 641 #ifndef NDEBUG 642 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false); 643 unsigned Noops = 0; 644 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 645 if (!Sequence[i]) 646 ++Noops; 647 assert(Sequence.size() - Noops == ScheduledNodes && 648 "The number of nodes scheduled doesn't match the expected number!"); 649 #endif // NDEBUG 650 } 651 652 // EmitSchedule - Emit the machine code in scheduled order. 653 void SchedulePostRATDList::EmitSchedule() { 654 RegionBegin = RegionEnd; 655 656 // If first instruction was a DBG_VALUE then put it back. 657 if (FirstDbgValue) 658 BB->splice(RegionEnd, BB, FirstDbgValue); 659 660 // Then re-insert them according to the given schedule. 661 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 662 if (SUnit *SU = Sequence[i]) 663 BB->splice(RegionEnd, BB, SU->getInstr()); 664 else 665 // Null SUnit* is a noop. 666 TII->insertNoop(*BB, RegionEnd); 667 668 // Update the Begin iterator, as the first instruction in the block 669 // may have been scheduled later. 670 if (i == 0) 671 RegionBegin = std::prev(RegionEnd); 672 } 673 674 // Reinsert any remaining debug_values. 675 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 676 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 677 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 678 MachineInstr *DbgValue = P.first; 679 MachineBasicBlock::iterator OrigPrivMI = P.second; 680 BB->splice(++OrigPrivMI, BB, DbgValue); 681 } 682 DbgValues.clear(); 683 FirstDbgValue = nullptr; 684 } 685