1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements a top-down list scheduler, using standard algorithms. 11 // The basic approach uses a priority queue of available nodes to schedule. 12 // One at a time, nodes are taken from the priority queue (thus in priority 13 // order), checked for legality to schedule, and emitted if legal. 14 // 15 // Nodes may not be legal to schedule either due to structural hazards (e.g. 16 // pipeline or resource constraints) or because an input to the instruction has 17 // not completed execution. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/CodeGen/Passes.h" 22 #include "AggressiveAntiDepBreaker.h" 23 #include "AntiDepBreaker.h" 24 #include "CriticalAntiDepBreaker.h" 25 #include "llvm/ADT/BitVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/CodeGen/LatencyPriorityQueue.h" 29 #include "llvm/CodeGen/MachineDominators.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunctionPass.h" 32 #include "llvm/CodeGen/MachineLoopInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/RegisterClassInfo.h" 35 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 36 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 37 #include "llvm/CodeGen/SchedulerRegistry.h" 38 #include "llvm/Support/CommandLine.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/ErrorHandling.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Target/TargetInstrInfo.h" 43 #include "llvm/Target/TargetLowering.h" 44 #include "llvm/Target/TargetMachine.h" 45 #include "llvm/Target/TargetRegisterInfo.h" 46 #include "llvm/Target/TargetSubtargetInfo.h" 47 using namespace llvm; 48 49 #define DEBUG_TYPE "post-RA-sched" 50 51 STATISTIC(NumNoops, "Number of noops inserted"); 52 STATISTIC(NumStalls, "Number of pipeline stalls"); 53 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 54 55 // Post-RA scheduling is enabled with 56 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 57 // override the target. 58 static cl::opt<bool> 59 EnablePostRAScheduler("post-RA-scheduler", 60 cl::desc("Enable scheduling after register allocation"), 61 cl::init(false), cl::Hidden); 62 static cl::opt<std::string> 63 EnableAntiDepBreaking("break-anti-dependencies", 64 cl::desc("Break post-RA scheduling anti-dependencies: " 65 "\"critical\", \"all\", or \"none\""), 66 cl::init("none"), cl::Hidden); 67 68 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 69 static cl::opt<int> 70 DebugDiv("postra-sched-debugdiv", 71 cl::desc("Debug control MBBs that are scheduled"), 72 cl::init(0), cl::Hidden); 73 static cl::opt<int> 74 DebugMod("postra-sched-debugmod", 75 cl::desc("Debug control MBBs that are scheduled"), 76 cl::init(0), cl::Hidden); 77 78 AntiDepBreaker::~AntiDepBreaker() { } 79 80 namespace { 81 class PostRAScheduler : public MachineFunctionPass { 82 const TargetInstrInfo *TII; 83 RegisterClassInfo RegClassInfo; 84 85 public: 86 static char ID; 87 PostRAScheduler() : MachineFunctionPass(ID) {} 88 89 void getAnalysisUsage(AnalysisUsage &AU) const override { 90 AU.setPreservesCFG(); 91 AU.addRequired<AliasAnalysis>(); 92 AU.addRequired<TargetPassConfig>(); 93 AU.addRequired<MachineDominatorTree>(); 94 AU.addPreserved<MachineDominatorTree>(); 95 AU.addRequired<MachineLoopInfo>(); 96 AU.addPreserved<MachineLoopInfo>(); 97 MachineFunctionPass::getAnalysisUsage(AU); 98 } 99 100 bool runOnMachineFunction(MachineFunction &Fn) override; 101 102 bool enablePostRAScheduler( 103 const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel, 104 TargetSubtargetInfo::AntiDepBreakMode &Mode, 105 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const; 106 }; 107 char PostRAScheduler::ID = 0; 108 109 class SchedulePostRATDList : public ScheduleDAGInstrs { 110 /// AvailableQueue - The priority queue to use for the available SUnits. 111 /// 112 LatencyPriorityQueue AvailableQueue; 113 114 /// PendingQueue - This contains all of the instructions whose operands have 115 /// been issued, but their results are not ready yet (due to the latency of 116 /// the operation). Once the operands becomes available, the instruction is 117 /// added to the AvailableQueue. 118 std::vector<SUnit*> PendingQueue; 119 120 /// HazardRec - The hazard recognizer to use. 121 ScheduleHazardRecognizer *HazardRec; 122 123 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 124 AntiDepBreaker *AntiDepBreak; 125 126 /// AA - AliasAnalysis for making memory reference queries. 127 AliasAnalysis *AA; 128 129 /// The schedule. Null SUnit*'s represent noop instructions. 130 std::vector<SUnit*> Sequence; 131 132 /// The index in BB of RegionEnd. 133 /// 134 /// This is the instruction number from the top of the current block, not 135 /// the SlotIndex. It is only used by the AntiDepBreaker. 136 unsigned EndIndex; 137 138 public: 139 SchedulePostRATDList( 140 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA, 141 const RegisterClassInfo &, 142 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 143 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs); 144 145 ~SchedulePostRATDList(); 146 147 /// startBlock - Initialize register live-range state for scheduling in 148 /// this block. 149 /// 150 void startBlock(MachineBasicBlock *BB) override; 151 152 // Set the index of RegionEnd within the current BB. 153 void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; } 154 155 /// Initialize the scheduler state for the next scheduling region. 156 void enterRegion(MachineBasicBlock *bb, 157 MachineBasicBlock::iterator begin, 158 MachineBasicBlock::iterator end, 159 unsigned regioninstrs) override; 160 161 /// Notify that the scheduler has finished scheduling the current region. 162 void exitRegion() override; 163 164 /// Schedule - Schedule the instruction range using list scheduling. 165 /// 166 void schedule() override; 167 168 void EmitSchedule(); 169 170 /// Observe - Update liveness information to account for the current 171 /// instruction, which will not be scheduled. 172 /// 173 void Observe(MachineInstr *MI, unsigned Count); 174 175 /// finishBlock - Clean up register live-range state. 176 /// 177 void finishBlock() override; 178 179 private: 180 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 181 void ReleaseSuccessors(SUnit *SU); 182 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 183 void ListScheduleTopDown(); 184 185 void dumpSchedule() const; 186 void emitNoop(unsigned CurCycle); 187 }; 188 } 189 190 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 191 192 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 193 "Post RA top-down list latency scheduler", false, false) 194 195 SchedulePostRATDList::SchedulePostRATDList( 196 MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA, 197 const RegisterClassInfo &RCI, 198 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 199 SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs) 200 : ScheduleDAGInstrs(MF, &MLI, /*IsPostRA=*/true), AA(AA), EndIndex(0) { 201 202 const TargetMachine &TM = MF.getTarget(); 203 const InstrItineraryData *InstrItins = 204 TM.getSubtargetImpl()->getInstrItineraryData(); 205 HazardRec = 206 TM.getSubtargetImpl()->getInstrInfo()->CreateTargetPostRAHazardRecognizer( 207 InstrItins, this); 208 209 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE || 210 MRI.tracksLiveness()) && 211 "Live-ins must be accurate for anti-dependency breaking"); 212 AntiDepBreak = 213 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 214 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 215 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 216 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr)); 217 } 218 219 SchedulePostRATDList::~SchedulePostRATDList() { 220 delete HazardRec; 221 delete AntiDepBreak; 222 } 223 224 /// Initialize state associated with the next scheduling region. 225 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb, 226 MachineBasicBlock::iterator begin, 227 MachineBasicBlock::iterator end, 228 unsigned regioninstrs) { 229 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 230 Sequence.clear(); 231 } 232 233 /// Print the schedule before exiting the region. 234 void SchedulePostRATDList::exitRegion() { 235 DEBUG({ 236 dbgs() << "*** Final schedule ***\n"; 237 dumpSchedule(); 238 dbgs() << '\n'; 239 }); 240 ScheduleDAGInstrs::exitRegion(); 241 } 242 243 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 244 /// dumpSchedule - dump the scheduled Sequence. 245 void SchedulePostRATDList::dumpSchedule() const { 246 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 247 if (SUnit *SU = Sequence[i]) 248 SU->dump(this); 249 else 250 dbgs() << "**** NOOP ****\n"; 251 } 252 } 253 #endif 254 255 bool PostRAScheduler::enablePostRAScheduler( 256 const TargetSubtargetInfo &ST, 257 CodeGenOpt::Level OptLevel, 258 TargetSubtargetInfo::AntiDepBreakMode &Mode, 259 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const { 260 Mode = ST.getAntiDepBreakMode(); 261 ST.getCriticalPathRCs(CriticalPathRCs); 262 return ST.enablePostMachineScheduler() && 263 OptLevel >= ST.getOptLevelToEnablePostRAScheduler(); 264 } 265 266 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 267 if (skipOptnoneFunction(*Fn.getFunction())) 268 return false; 269 270 TII = Fn.getSubtarget().getInstrInfo(); 271 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 272 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 273 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 274 275 RegClassInfo.runOnMachineFunction(Fn); 276 277 // Check for explicit enable/disable of post-ra scheduling. 278 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 279 TargetSubtargetInfo::ANTIDEP_NONE; 280 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; 281 if (EnablePostRAScheduler.getPosition() > 0) { 282 if (!EnablePostRAScheduler) 283 return false; 284 } else { 285 // Check that post-RA scheduling is enabled for this target. 286 // This may upgrade the AntiDepMode. 287 const TargetSubtargetInfo &ST = 288 Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 289 if (!enablePostRAScheduler(ST, PassConfig->getOptLevel(), 290 AntiDepMode, CriticalPathRCs)) 291 return false; 292 } 293 294 // Check for antidep breaking override... 295 if (EnableAntiDepBreaking.getPosition() > 0) { 296 AntiDepMode = (EnableAntiDepBreaking == "all") 297 ? TargetSubtargetInfo::ANTIDEP_ALL 298 : ((EnableAntiDepBreaking == "critical") 299 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 300 : TargetSubtargetInfo::ANTIDEP_NONE); 301 } 302 303 DEBUG(dbgs() << "PostRAScheduler\n"); 304 305 SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode, 306 CriticalPathRCs); 307 308 // Loop over all of the basic blocks 309 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 310 MBB != MBBe; ++MBB) { 311 #ifndef NDEBUG 312 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 313 if (DebugDiv > 0) { 314 static int bbcnt = 0; 315 if (bbcnt++ % DebugDiv != DebugMod) 316 continue; 317 dbgs() << "*** DEBUG scheduling " << Fn.getName() 318 << ":BB#" << MBB->getNumber() << " ***\n"; 319 } 320 #endif 321 322 // Initialize register live-range state for scheduling in this block. 323 Scheduler.startBlock(MBB); 324 325 // Schedule each sequence of instructions not interrupted by a label 326 // or anything else that effectively needs to shut down scheduling. 327 MachineBasicBlock::iterator Current = MBB->end(); 328 unsigned Count = MBB->size(), CurrentCount = Count; 329 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 330 MachineInstr *MI = std::prev(I); 331 --Count; 332 // Calls are not scheduling boundaries before register allocation, but 333 // post-ra we don't gain anything by scheduling across calls since we 334 // don't need to worry about register pressure. 335 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { 336 Scheduler.enterRegion(MBB, I, Current, CurrentCount - Count); 337 Scheduler.setEndIndex(CurrentCount); 338 Scheduler.schedule(); 339 Scheduler.exitRegion(); 340 Scheduler.EmitSchedule(); 341 Current = MI; 342 CurrentCount = Count; 343 Scheduler.Observe(MI, CurrentCount); 344 } 345 I = MI; 346 if (MI->isBundle()) 347 Count -= MI->getBundleSize(); 348 } 349 assert(Count == 0 && "Instruction count mismatch!"); 350 assert((MBB->begin() == Current || CurrentCount != 0) && 351 "Instruction count mismatch!"); 352 Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount); 353 Scheduler.setEndIndex(CurrentCount); 354 Scheduler.schedule(); 355 Scheduler.exitRegion(); 356 Scheduler.EmitSchedule(); 357 358 // Clean up register live-range state. 359 Scheduler.finishBlock(); 360 361 // Update register kills 362 Scheduler.fixupKills(MBB); 363 } 364 365 return true; 366 } 367 368 /// StartBlock - Initialize register live-range state for scheduling in 369 /// this block. 370 /// 371 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) { 372 // Call the superclass. 373 ScheduleDAGInstrs::startBlock(BB); 374 375 // Reset the hazard recognizer and anti-dep breaker. 376 HazardRec->Reset(); 377 if (AntiDepBreak) 378 AntiDepBreak->StartBlock(BB); 379 } 380 381 /// Schedule - Schedule the instruction range using list scheduling. 382 /// 383 void SchedulePostRATDList::schedule() { 384 // Build the scheduling graph. 385 buildSchedGraph(AA); 386 387 if (AntiDepBreak) { 388 unsigned Broken = 389 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, 390 EndIndex, DbgValues); 391 392 if (Broken != 0) { 393 // We made changes. Update the dependency graph. 394 // Theoretically we could update the graph in place: 395 // When a live range is changed to use a different register, remove 396 // the def's anti-dependence *and* output-dependence edges due to 397 // that register, and add new anti-dependence and output-dependence 398 // edges based on the next live range of the register. 399 ScheduleDAG::clearDAG(); 400 buildSchedGraph(AA); 401 402 NumFixedAnti += Broken; 403 } 404 } 405 406 DEBUG(dbgs() << "********** List Scheduling **********\n"); 407 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 408 SUnits[su].dumpAll(this)); 409 410 AvailableQueue.initNodes(SUnits); 411 ListScheduleTopDown(); 412 AvailableQueue.releaseState(); 413 } 414 415 /// Observe - Update liveness information to account for the current 416 /// instruction, which will not be scheduled. 417 /// 418 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 419 if (AntiDepBreak) 420 AntiDepBreak->Observe(MI, Count, EndIndex); 421 } 422 423 /// FinishBlock - Clean up register live-range state. 424 /// 425 void SchedulePostRATDList::finishBlock() { 426 if (AntiDepBreak) 427 AntiDepBreak->FinishBlock(); 428 429 // Call the superclass. 430 ScheduleDAGInstrs::finishBlock(); 431 } 432 433 //===----------------------------------------------------------------------===// 434 // Top-Down Scheduling 435 //===----------------------------------------------------------------------===// 436 437 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 438 /// the PendingQueue if the count reaches zero. 439 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 440 SUnit *SuccSU = SuccEdge->getSUnit(); 441 442 if (SuccEdge->isWeak()) { 443 --SuccSU->WeakPredsLeft; 444 return; 445 } 446 #ifndef NDEBUG 447 if (SuccSU->NumPredsLeft == 0) { 448 dbgs() << "*** Scheduling failed! ***\n"; 449 SuccSU->dump(this); 450 dbgs() << " has been released too many times!\n"; 451 llvm_unreachable(nullptr); 452 } 453 #endif 454 --SuccSU->NumPredsLeft; 455 456 // Standard scheduler algorithms will recompute the depth of the successor 457 // here as such: 458 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 459 // 460 // However, we lazily compute node depth instead. Note that 461 // ScheduleNodeTopDown has already updated the depth of this node which causes 462 // all descendents to be marked dirty. Setting the successor depth explicitly 463 // here would cause depth to be recomputed for all its ancestors. If the 464 // successor is not yet ready (because of a transitively redundant edge) then 465 // this causes depth computation to be quadratic in the size of the DAG. 466 467 // If all the node's predecessors are scheduled, this node is ready 468 // to be scheduled. Ignore the special ExitSU node. 469 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 470 PendingQueue.push_back(SuccSU); 471 } 472 473 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 474 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 475 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 476 I != E; ++I) { 477 ReleaseSucc(SU, &*I); 478 } 479 } 480 481 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 482 /// count of its successors. If a successor pending count is zero, add it to 483 /// the Available queue. 484 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 485 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 486 DEBUG(SU->dump(this)); 487 488 Sequence.push_back(SU); 489 assert(CurCycle >= SU->getDepth() && 490 "Node scheduled above its depth!"); 491 SU->setDepthToAtLeast(CurCycle); 492 493 ReleaseSuccessors(SU); 494 SU->isScheduled = true; 495 AvailableQueue.scheduledNode(SU); 496 } 497 498 /// emitNoop - Add a noop to the current instruction sequence. 499 void SchedulePostRATDList::emitNoop(unsigned CurCycle) { 500 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 501 HazardRec->EmitNoop(); 502 Sequence.push_back(nullptr); // NULL here means noop 503 ++NumNoops; 504 } 505 506 /// ListScheduleTopDown - The main loop of list scheduling for top-down 507 /// schedulers. 508 void SchedulePostRATDList::ListScheduleTopDown() { 509 unsigned CurCycle = 0; 510 511 // We're scheduling top-down but we're visiting the regions in 512 // bottom-up order, so we don't know the hazards at the start of a 513 // region. So assume no hazards (this should usually be ok as most 514 // blocks are a single region). 515 HazardRec->Reset(); 516 517 // Release any successors of the special Entry node. 518 ReleaseSuccessors(&EntrySU); 519 520 // Add all leaves to Available queue. 521 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 522 // It is available if it has no predecessors. 523 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) { 524 AvailableQueue.push(&SUnits[i]); 525 SUnits[i].isAvailable = true; 526 } 527 } 528 529 // In any cycle where we can't schedule any instructions, we must 530 // stall or emit a noop, depending on the target. 531 bool CycleHasInsts = false; 532 533 // While Available queue is not empty, grab the node with the highest 534 // priority. If it is not ready put it back. Schedule the node. 535 std::vector<SUnit*> NotReady; 536 Sequence.reserve(SUnits.size()); 537 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 538 // Check to see if any of the pending instructions are ready to issue. If 539 // so, add them to the available queue. 540 unsigned MinDepth = ~0u; 541 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 542 if (PendingQueue[i]->getDepth() <= CurCycle) { 543 AvailableQueue.push(PendingQueue[i]); 544 PendingQueue[i]->isAvailable = true; 545 PendingQueue[i] = PendingQueue.back(); 546 PendingQueue.pop_back(); 547 --i; --e; 548 } else if (PendingQueue[i]->getDepth() < MinDepth) 549 MinDepth = PendingQueue[i]->getDepth(); 550 } 551 552 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 553 554 SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr; 555 bool HasNoopHazards = false; 556 while (!AvailableQueue.empty()) { 557 SUnit *CurSUnit = AvailableQueue.pop(); 558 559 ScheduleHazardRecognizer::HazardType HT = 560 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 561 if (HT == ScheduleHazardRecognizer::NoHazard) { 562 if (HazardRec->ShouldPreferAnother(CurSUnit)) { 563 if (!NotPreferredSUnit) { 564 // If this is the first non-preferred node for this cycle, then 565 // record it and continue searching for a preferred node. If this 566 // is not the first non-preferred node, then treat it as though 567 // there had been a hazard. 568 NotPreferredSUnit = CurSUnit; 569 continue; 570 } 571 } else { 572 FoundSUnit = CurSUnit; 573 break; 574 } 575 } 576 577 // Remember if this is a noop hazard. 578 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 579 580 NotReady.push_back(CurSUnit); 581 } 582 583 // If we have a non-preferred node, push it back onto the available list. 584 // If we did not find a preferred node, then schedule this first 585 // non-preferred node. 586 if (NotPreferredSUnit) { 587 if (!FoundSUnit) { 588 DEBUG(dbgs() << "*** Will schedule a non-preferred instruction...\n"); 589 FoundSUnit = NotPreferredSUnit; 590 } else { 591 AvailableQueue.push(NotPreferredSUnit); 592 } 593 594 NotPreferredSUnit = nullptr; 595 } 596 597 // Add the nodes that aren't ready back onto the available list. 598 if (!NotReady.empty()) { 599 AvailableQueue.push_all(NotReady); 600 NotReady.clear(); 601 } 602 603 // If we found a node to schedule... 604 if (FoundSUnit) { 605 // If we need to emit noops prior to this instruction, then do so. 606 unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit); 607 for (unsigned i = 0; i != NumPreNoops; ++i) 608 emitNoop(CurCycle); 609 610 // ... schedule the node... 611 ScheduleNodeTopDown(FoundSUnit, CurCycle); 612 HazardRec->EmitInstruction(FoundSUnit); 613 CycleHasInsts = true; 614 if (HazardRec->atIssueLimit()) { 615 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 616 HazardRec->AdvanceCycle(); 617 ++CurCycle; 618 CycleHasInsts = false; 619 } 620 } else { 621 if (CycleHasInsts) { 622 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 623 HazardRec->AdvanceCycle(); 624 } else if (!HasNoopHazards) { 625 // Otherwise, we have a pipeline stall, but no other problem, 626 // just advance the current cycle and try again. 627 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 628 HazardRec->AdvanceCycle(); 629 ++NumStalls; 630 } else { 631 // Otherwise, we have no instructions to issue and we have instructions 632 // that will fault if we don't do this right. This is the case for 633 // processors without pipeline interlocks and other cases. 634 emitNoop(CurCycle); 635 } 636 637 ++CurCycle; 638 CycleHasInsts = false; 639 } 640 } 641 642 #ifndef NDEBUG 643 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false); 644 unsigned Noops = 0; 645 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 646 if (!Sequence[i]) 647 ++Noops; 648 assert(Sequence.size() - Noops == ScheduledNodes && 649 "The number of nodes scheduled doesn't match the expected number!"); 650 #endif // NDEBUG 651 } 652 653 // EmitSchedule - Emit the machine code in scheduled order. 654 void SchedulePostRATDList::EmitSchedule() { 655 RegionBegin = RegionEnd; 656 657 // If first instruction was a DBG_VALUE then put it back. 658 if (FirstDbgValue) 659 BB->splice(RegionEnd, BB, FirstDbgValue); 660 661 // Then re-insert them according to the given schedule. 662 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 663 if (SUnit *SU = Sequence[i]) 664 BB->splice(RegionEnd, BB, SU->getInstr()); 665 else 666 // Null SUnit* is a noop. 667 TII->insertNoop(*BB, RegionEnd); 668 669 // Update the Begin iterator, as the first instruction in the block 670 // may have been scheduled later. 671 if (i == 0) 672 RegionBegin = std::prev(RegionEnd); 673 } 674 675 // Reinsert any remaining debug_values. 676 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 677 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 678 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 679 MachineInstr *DbgValue = P.first; 680 MachineBasicBlock::iterator OrigPrivMI = P.second; 681 BB->splice(++OrigPrivMI, BB, DbgValue); 682 } 683 DbgValues.clear(); 684 FirstDbgValue = nullptr; 685 } 686