1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements a top-down list scheduler, using standard algorithms. 11 // The basic approach uses a priority queue of available nodes to schedule. 12 // One at a time, nodes are taken from the priority queue (thus in priority 13 // order), checked for legality to schedule, and emitted if legal. 14 // 15 // Nodes may not be legal to schedule either due to structural hazards (e.g. 16 // pipeline or resource constraints) or because an input to the instruction has 17 // not completed execution. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/CodeGen/Passes.h" 22 #include "AggressiveAntiDepBreaker.h" 23 #include "AntiDepBreaker.h" 24 #include "CriticalAntiDepBreaker.h" 25 #include "llvm/ADT/BitVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/CodeGen/LatencyPriorityQueue.h" 29 #include "llvm/CodeGen/MachineDominators.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunctionPass.h" 32 #include "llvm/CodeGen/MachineLoopInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/RegisterClassInfo.h" 35 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 36 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 37 #include "llvm/CodeGen/SchedulerRegistry.h" 38 #include "llvm/Support/CommandLine.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/ErrorHandling.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Target/TargetInstrInfo.h" 43 #include "llvm/Target/TargetLowering.h" 44 #include "llvm/Target/TargetMachine.h" 45 #include "llvm/Target/TargetRegisterInfo.h" 46 #include "llvm/Target/TargetSubtargetInfo.h" 47 using namespace llvm; 48 49 #define DEBUG_TYPE "post-RA-sched" 50 51 STATISTIC(NumNoops, "Number of noops inserted"); 52 STATISTIC(NumStalls, "Number of pipeline stalls"); 53 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 54 55 // Post-RA scheduling is enabled with 56 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 57 // override the target. 58 static cl::opt<bool> 59 EnablePostRAScheduler("post-RA-scheduler", 60 cl::desc("Enable scheduling after register allocation"), 61 cl::init(false), cl::Hidden); 62 static cl::opt<std::string> 63 EnableAntiDepBreaking("break-anti-dependencies", 64 cl::desc("Break post-RA scheduling anti-dependencies: " 65 "\"critical\", \"all\", or \"none\""), 66 cl::init("none"), cl::Hidden); 67 68 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 69 static cl::opt<int> 70 DebugDiv("postra-sched-debugdiv", 71 cl::desc("Debug control MBBs that are scheduled"), 72 cl::init(0), cl::Hidden); 73 static cl::opt<int> 74 DebugMod("postra-sched-debugmod", 75 cl::desc("Debug control MBBs that are scheduled"), 76 cl::init(0), cl::Hidden); 77 78 AntiDepBreaker::~AntiDepBreaker() { } 79 80 namespace { 81 class PostRAScheduler : public MachineFunctionPass { 82 const TargetInstrInfo *TII; 83 RegisterClassInfo RegClassInfo; 84 85 public: 86 static char ID; 87 PostRAScheduler() : MachineFunctionPass(ID) {} 88 89 void getAnalysisUsage(AnalysisUsage &AU) const override { 90 AU.setPreservesCFG(); 91 AU.addRequired<AliasAnalysis>(); 92 AU.addRequired<TargetPassConfig>(); 93 AU.addRequired<MachineDominatorTree>(); 94 AU.addPreserved<MachineDominatorTree>(); 95 AU.addRequired<MachineLoopInfo>(); 96 AU.addPreserved<MachineLoopInfo>(); 97 MachineFunctionPass::getAnalysisUsage(AU); 98 } 99 100 bool runOnMachineFunction(MachineFunction &Fn) override; 101 102 bool enablePostRAScheduler( 103 const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel, 104 TargetSubtargetInfo::AntiDepBreakMode &Mode, 105 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const; 106 }; 107 char PostRAScheduler::ID = 0; 108 109 class SchedulePostRATDList : public ScheduleDAGInstrs { 110 /// AvailableQueue - The priority queue to use for the available SUnits. 111 /// 112 LatencyPriorityQueue AvailableQueue; 113 114 /// PendingQueue - This contains all of the instructions whose operands have 115 /// been issued, but their results are not ready yet (due to the latency of 116 /// the operation). Once the operands becomes available, the instruction is 117 /// added to the AvailableQueue. 118 std::vector<SUnit*> PendingQueue; 119 120 /// HazardRec - The hazard recognizer to use. 121 ScheduleHazardRecognizer *HazardRec; 122 123 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 124 AntiDepBreaker *AntiDepBreak; 125 126 /// AA - AliasAnalysis for making memory reference queries. 127 AliasAnalysis *AA; 128 129 /// The schedule. Null SUnit*'s represent noop instructions. 130 std::vector<SUnit*> Sequence; 131 132 /// The index in BB of RegionEnd. 133 /// 134 /// This is the instruction number from the top of the current block, not 135 /// the SlotIndex. It is only used by the AntiDepBreaker. 136 unsigned EndIndex; 137 138 public: 139 SchedulePostRATDList( 140 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 141 AliasAnalysis *AA, const RegisterClassInfo&, 142 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 143 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs); 144 145 ~SchedulePostRATDList(); 146 147 /// startBlock - Initialize register live-range state for scheduling in 148 /// this block. 149 /// 150 void startBlock(MachineBasicBlock *BB) override; 151 152 // Set the index of RegionEnd within the current BB. 153 void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; } 154 155 /// Initialize the scheduler state for the next scheduling region. 156 void enterRegion(MachineBasicBlock *bb, 157 MachineBasicBlock::iterator begin, 158 MachineBasicBlock::iterator end, 159 unsigned regioninstrs) override; 160 161 /// Notify that the scheduler has finished scheduling the current region. 162 void exitRegion() override; 163 164 /// Schedule - Schedule the instruction range using list scheduling. 165 /// 166 void schedule() override; 167 168 void EmitSchedule(); 169 170 /// Observe - Update liveness information to account for the current 171 /// instruction, which will not be scheduled. 172 /// 173 void Observe(MachineInstr *MI, unsigned Count); 174 175 /// finishBlock - Clean up register live-range state. 176 /// 177 void finishBlock() override; 178 179 private: 180 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 181 void ReleaseSuccessors(SUnit *SU); 182 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 183 void ListScheduleTopDown(); 184 185 void dumpSchedule() const; 186 void emitNoop(unsigned CurCycle); 187 }; 188 } 189 190 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 191 192 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 193 "Post RA top-down list latency scheduler", false, false) 194 195 SchedulePostRATDList::SchedulePostRATDList( 196 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 197 AliasAnalysis *AA, const RegisterClassInfo &RCI, 198 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 199 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs) 200 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), AA(AA), EndIndex(0) { 201 202 const TargetMachine &TM = MF.getTarget(); 203 const InstrItineraryData *InstrItins = 204 TM.getSubtargetImpl()->getInstrItineraryData(); 205 HazardRec = 206 TM.getSubtargetImpl()->getInstrInfo()->CreateTargetPostRAHazardRecognizer( 207 InstrItins, this); 208 209 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE || 210 MRI.tracksLiveness()) && 211 "Live-ins must be accurate for anti-dependency breaking"); 212 AntiDepBreak = 213 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 214 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 215 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 216 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr)); 217 } 218 219 SchedulePostRATDList::~SchedulePostRATDList() { 220 delete HazardRec; 221 delete AntiDepBreak; 222 } 223 224 /// Initialize state associated with the next scheduling region. 225 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb, 226 MachineBasicBlock::iterator begin, 227 MachineBasicBlock::iterator end, 228 unsigned regioninstrs) { 229 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs); 230 Sequence.clear(); 231 } 232 233 /// Print the schedule before exiting the region. 234 void SchedulePostRATDList::exitRegion() { 235 DEBUG({ 236 dbgs() << "*** Final schedule ***\n"; 237 dumpSchedule(); 238 dbgs() << '\n'; 239 }); 240 ScheduleDAGInstrs::exitRegion(); 241 } 242 243 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 244 /// dumpSchedule - dump the scheduled Sequence. 245 void SchedulePostRATDList::dumpSchedule() const { 246 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 247 if (SUnit *SU = Sequence[i]) 248 SU->dump(this); 249 else 250 dbgs() << "**** NOOP ****\n"; 251 } 252 } 253 #endif 254 255 bool PostRAScheduler::enablePostRAScheduler( 256 const TargetSubtargetInfo &ST, 257 CodeGenOpt::Level OptLevel, 258 TargetSubtargetInfo::AntiDepBreakMode &Mode, 259 TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const { 260 Mode = ST.getAntiDepBreakMode(); 261 ST.getCriticalPathRCs(CriticalPathRCs); 262 return ST.enablePostMachineScheduler() && 263 OptLevel >= ST.getOptLevelToEnablePostRAScheduler(); 264 } 265 266 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 267 if (skipOptnoneFunction(*Fn.getFunction())) 268 return false; 269 270 TII = Fn.getSubtarget().getInstrInfo(); 271 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 272 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 273 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 274 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 275 276 RegClassInfo.runOnMachineFunction(Fn); 277 278 // Check for explicit enable/disable of post-ra scheduling. 279 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 280 TargetSubtargetInfo::ANTIDEP_NONE; 281 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; 282 if (EnablePostRAScheduler.getPosition() > 0) { 283 if (!EnablePostRAScheduler) 284 return false; 285 } else { 286 // Check that post-RA scheduling is enabled for this target. 287 // This may upgrade the AntiDepMode. 288 const TargetSubtargetInfo &ST = 289 Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 290 if (!enablePostRAScheduler(ST, PassConfig->getOptLevel(), 291 AntiDepMode, CriticalPathRCs)) 292 return false; 293 } 294 295 // Check for antidep breaking override... 296 if (EnableAntiDepBreaking.getPosition() > 0) { 297 AntiDepMode = (EnableAntiDepBreaking == "all") 298 ? TargetSubtargetInfo::ANTIDEP_ALL 299 : ((EnableAntiDepBreaking == "critical") 300 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 301 : TargetSubtargetInfo::ANTIDEP_NONE); 302 } 303 304 DEBUG(dbgs() << "PostRAScheduler\n"); 305 306 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 307 CriticalPathRCs); 308 309 // Loop over all of the basic blocks 310 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 311 MBB != MBBe; ++MBB) { 312 #ifndef NDEBUG 313 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 314 if (DebugDiv > 0) { 315 static int bbcnt = 0; 316 if (bbcnt++ % DebugDiv != DebugMod) 317 continue; 318 dbgs() << "*** DEBUG scheduling " << Fn.getName() 319 << ":BB#" << MBB->getNumber() << " ***\n"; 320 } 321 #endif 322 323 // Initialize register live-range state for scheduling in this block. 324 Scheduler.startBlock(MBB); 325 326 // Schedule each sequence of instructions not interrupted by a label 327 // or anything else that effectively needs to shut down scheduling. 328 MachineBasicBlock::iterator Current = MBB->end(); 329 unsigned Count = MBB->size(), CurrentCount = Count; 330 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 331 MachineInstr *MI = std::prev(I); 332 --Count; 333 // Calls are not scheduling boundaries before register allocation, but 334 // post-ra we don't gain anything by scheduling across calls since we 335 // don't need to worry about register pressure. 336 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { 337 Scheduler.enterRegion(MBB, I, Current, CurrentCount - Count); 338 Scheduler.setEndIndex(CurrentCount); 339 Scheduler.schedule(); 340 Scheduler.exitRegion(); 341 Scheduler.EmitSchedule(); 342 Current = MI; 343 CurrentCount = Count; 344 Scheduler.Observe(MI, CurrentCount); 345 } 346 I = MI; 347 if (MI->isBundle()) 348 Count -= MI->getBundleSize(); 349 } 350 assert(Count == 0 && "Instruction count mismatch!"); 351 assert((MBB->begin() == Current || CurrentCount != 0) && 352 "Instruction count mismatch!"); 353 Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount); 354 Scheduler.setEndIndex(CurrentCount); 355 Scheduler.schedule(); 356 Scheduler.exitRegion(); 357 Scheduler.EmitSchedule(); 358 359 // Clean up register live-range state. 360 Scheduler.finishBlock(); 361 362 // Update register kills 363 Scheduler.fixupKills(MBB); 364 } 365 366 return true; 367 } 368 369 /// StartBlock - Initialize register live-range state for scheduling in 370 /// this block. 371 /// 372 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) { 373 // Call the superclass. 374 ScheduleDAGInstrs::startBlock(BB); 375 376 // Reset the hazard recognizer and anti-dep breaker. 377 HazardRec->Reset(); 378 if (AntiDepBreak) 379 AntiDepBreak->StartBlock(BB); 380 } 381 382 /// Schedule - Schedule the instruction range using list scheduling. 383 /// 384 void SchedulePostRATDList::schedule() { 385 // Build the scheduling graph. 386 buildSchedGraph(AA); 387 388 if (AntiDepBreak) { 389 unsigned Broken = 390 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, 391 EndIndex, DbgValues); 392 393 if (Broken != 0) { 394 // We made changes. Update the dependency graph. 395 // Theoretically we could update the graph in place: 396 // When a live range is changed to use a different register, remove 397 // the def's anti-dependence *and* output-dependence edges due to 398 // that register, and add new anti-dependence and output-dependence 399 // edges based on the next live range of the register. 400 ScheduleDAG::clearDAG(); 401 buildSchedGraph(AA); 402 403 NumFixedAnti += Broken; 404 } 405 } 406 407 DEBUG(dbgs() << "********** List Scheduling **********\n"); 408 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 409 SUnits[su].dumpAll(this)); 410 411 AvailableQueue.initNodes(SUnits); 412 ListScheduleTopDown(); 413 AvailableQueue.releaseState(); 414 } 415 416 /// Observe - Update liveness information to account for the current 417 /// instruction, which will not be scheduled. 418 /// 419 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 420 if (AntiDepBreak) 421 AntiDepBreak->Observe(MI, Count, EndIndex); 422 } 423 424 /// FinishBlock - Clean up register live-range state. 425 /// 426 void SchedulePostRATDList::finishBlock() { 427 if (AntiDepBreak) 428 AntiDepBreak->FinishBlock(); 429 430 // Call the superclass. 431 ScheduleDAGInstrs::finishBlock(); 432 } 433 434 //===----------------------------------------------------------------------===// 435 // Top-Down Scheduling 436 //===----------------------------------------------------------------------===// 437 438 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 439 /// the PendingQueue if the count reaches zero. 440 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 441 SUnit *SuccSU = SuccEdge->getSUnit(); 442 443 if (SuccEdge->isWeak()) { 444 --SuccSU->WeakPredsLeft; 445 return; 446 } 447 #ifndef NDEBUG 448 if (SuccSU->NumPredsLeft == 0) { 449 dbgs() << "*** Scheduling failed! ***\n"; 450 SuccSU->dump(this); 451 dbgs() << " has been released too many times!\n"; 452 llvm_unreachable(nullptr); 453 } 454 #endif 455 --SuccSU->NumPredsLeft; 456 457 // Standard scheduler algorithms will recompute the depth of the successor 458 // here as such: 459 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 460 // 461 // However, we lazily compute node depth instead. Note that 462 // ScheduleNodeTopDown has already updated the depth of this node which causes 463 // all descendents to be marked dirty. Setting the successor depth explicitly 464 // here would cause depth to be recomputed for all its ancestors. If the 465 // successor is not yet ready (because of a transitively redundant edge) then 466 // this causes depth computation to be quadratic in the size of the DAG. 467 468 // If all the node's predecessors are scheduled, this node is ready 469 // to be scheduled. Ignore the special ExitSU node. 470 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 471 PendingQueue.push_back(SuccSU); 472 } 473 474 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 475 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 476 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 477 I != E; ++I) { 478 ReleaseSucc(SU, &*I); 479 } 480 } 481 482 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 483 /// count of its successors. If a successor pending count is zero, add it to 484 /// the Available queue. 485 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 486 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 487 DEBUG(SU->dump(this)); 488 489 Sequence.push_back(SU); 490 assert(CurCycle >= SU->getDepth() && 491 "Node scheduled above its depth!"); 492 SU->setDepthToAtLeast(CurCycle); 493 494 ReleaseSuccessors(SU); 495 SU->isScheduled = true; 496 AvailableQueue.scheduledNode(SU); 497 } 498 499 /// emitNoop - Add a noop to the current instruction sequence. 500 void SchedulePostRATDList::emitNoop(unsigned CurCycle) { 501 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 502 HazardRec->EmitNoop(); 503 Sequence.push_back(nullptr); // NULL here means noop 504 ++NumNoops; 505 } 506 507 /// ListScheduleTopDown - The main loop of list scheduling for top-down 508 /// schedulers. 509 void SchedulePostRATDList::ListScheduleTopDown() { 510 unsigned CurCycle = 0; 511 512 // We're scheduling top-down but we're visiting the regions in 513 // bottom-up order, so we don't know the hazards at the start of a 514 // region. So assume no hazards (this should usually be ok as most 515 // blocks are a single region). 516 HazardRec->Reset(); 517 518 // Release any successors of the special Entry node. 519 ReleaseSuccessors(&EntrySU); 520 521 // Add all leaves to Available queue. 522 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 523 // It is available if it has no predecessors. 524 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) { 525 AvailableQueue.push(&SUnits[i]); 526 SUnits[i].isAvailable = true; 527 } 528 } 529 530 // In any cycle where we can't schedule any instructions, we must 531 // stall or emit a noop, depending on the target. 532 bool CycleHasInsts = false; 533 534 // While Available queue is not empty, grab the node with the highest 535 // priority. If it is not ready put it back. Schedule the node. 536 std::vector<SUnit*> NotReady; 537 Sequence.reserve(SUnits.size()); 538 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 539 // Check to see if any of the pending instructions are ready to issue. If 540 // so, add them to the available queue. 541 unsigned MinDepth = ~0u; 542 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 543 if (PendingQueue[i]->getDepth() <= CurCycle) { 544 AvailableQueue.push(PendingQueue[i]); 545 PendingQueue[i]->isAvailable = true; 546 PendingQueue[i] = PendingQueue.back(); 547 PendingQueue.pop_back(); 548 --i; --e; 549 } else if (PendingQueue[i]->getDepth() < MinDepth) 550 MinDepth = PendingQueue[i]->getDepth(); 551 } 552 553 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 554 555 SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr; 556 bool HasNoopHazards = false; 557 while (!AvailableQueue.empty()) { 558 SUnit *CurSUnit = AvailableQueue.pop(); 559 560 ScheduleHazardRecognizer::HazardType HT = 561 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 562 if (HT == ScheduleHazardRecognizer::NoHazard) { 563 if (HazardRec->ShouldPreferAnother(CurSUnit)) { 564 if (!NotPreferredSUnit) { 565 // If this is the first non-preferred node for this cycle, then 566 // record it and continue searching for a preferred node. If this 567 // is not the first non-preferred node, then treat it as though 568 // there had been a hazard. 569 NotPreferredSUnit = CurSUnit; 570 continue; 571 } 572 } else { 573 FoundSUnit = CurSUnit; 574 break; 575 } 576 } 577 578 // Remember if this is a noop hazard. 579 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 580 581 NotReady.push_back(CurSUnit); 582 } 583 584 // If we have a non-preferred node, push it back onto the available list. 585 // If we did not find a preferred node, then schedule this first 586 // non-preferred node. 587 if (NotPreferredSUnit) { 588 if (!FoundSUnit) { 589 DEBUG(dbgs() << "*** Will schedule a non-preferred instruction...\n"); 590 FoundSUnit = NotPreferredSUnit; 591 } else { 592 AvailableQueue.push(NotPreferredSUnit); 593 } 594 595 NotPreferredSUnit = nullptr; 596 } 597 598 // Add the nodes that aren't ready back onto the available list. 599 if (!NotReady.empty()) { 600 AvailableQueue.push_all(NotReady); 601 NotReady.clear(); 602 } 603 604 // If we found a node to schedule... 605 if (FoundSUnit) { 606 // If we need to emit noops prior to this instruction, then do so. 607 unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit); 608 for (unsigned i = 0; i != NumPreNoops; ++i) 609 emitNoop(CurCycle); 610 611 // ... schedule the node... 612 ScheduleNodeTopDown(FoundSUnit, CurCycle); 613 HazardRec->EmitInstruction(FoundSUnit); 614 CycleHasInsts = true; 615 if (HazardRec->atIssueLimit()) { 616 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 617 HazardRec->AdvanceCycle(); 618 ++CurCycle; 619 CycleHasInsts = false; 620 } 621 } else { 622 if (CycleHasInsts) { 623 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 624 HazardRec->AdvanceCycle(); 625 } else if (!HasNoopHazards) { 626 // Otherwise, we have a pipeline stall, but no other problem, 627 // just advance the current cycle and try again. 628 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 629 HazardRec->AdvanceCycle(); 630 ++NumStalls; 631 } else { 632 // Otherwise, we have no instructions to issue and we have instructions 633 // that will fault if we don't do this right. This is the case for 634 // processors without pipeline interlocks and other cases. 635 emitNoop(CurCycle); 636 } 637 638 ++CurCycle; 639 CycleHasInsts = false; 640 } 641 } 642 643 #ifndef NDEBUG 644 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false); 645 unsigned Noops = 0; 646 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 647 if (!Sequence[i]) 648 ++Noops; 649 assert(Sequence.size() - Noops == ScheduledNodes && 650 "The number of nodes scheduled doesn't match the expected number!"); 651 #endif // NDEBUG 652 } 653 654 // EmitSchedule - Emit the machine code in scheduled order. 655 void SchedulePostRATDList::EmitSchedule() { 656 RegionBegin = RegionEnd; 657 658 // If first instruction was a DBG_VALUE then put it back. 659 if (FirstDbgValue) 660 BB->splice(RegionEnd, BB, FirstDbgValue); 661 662 // Then re-insert them according to the given schedule. 663 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 664 if (SUnit *SU = Sequence[i]) 665 BB->splice(RegionEnd, BB, SU->getInstr()); 666 else 667 // Null SUnit* is a noop. 668 TII->insertNoop(*BB, RegionEnd); 669 670 // Update the Begin iterator, as the first instruction in the block 671 // may have been scheduled later. 672 if (i == 0) 673 RegionBegin = std::prev(RegionEnd); 674 } 675 676 // Reinsert any remaining debug_values. 677 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 678 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 679 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI); 680 MachineInstr *DbgValue = P.first; 681 MachineBasicBlock::iterator OrigPrivMI = P.second; 682 BB->splice(++OrigPrivMI, BB, DbgValue); 683 } 684 DbgValues.clear(); 685 FirstDbgValue = nullptr; 686 } 687