1 //===-- SIMachineScheduler.cpp - SI Scheduler Interface -*- C++ -*-----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief SI Machine Scheduler interface 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "SIMachineScheduler.h" 16 #include "AMDGPUSubtarget.h" 17 #include "llvm/CodeGen/LiveInterval.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/MachineScheduler.h" 21 #include "llvm/CodeGen/RegisterPressure.h" 22 23 using namespace llvm; 24 25 #define DEBUG_TYPE "misched" 26 27 // This scheduler implements a different scheduling algorithm than 28 // GenericScheduler. 29 // 30 // There are several specific architecture behaviours that can't be modelled 31 // for GenericScheduler: 32 // . When accessing the result of an SGPR load instruction, you have to wait 33 // for all the SGPR load instructions before your current instruction to 34 // have finished. 35 // . When accessing the result of an VGPR load instruction, you have to wait 36 // for all the VGPR load instructions previous to the VGPR load instruction 37 // you are interested in to finish. 38 // . The less the register pressure, the best load latencies are hidden 39 // 40 // Moreover some specifities (like the fact a lot of instructions in the shader 41 // have few dependencies) makes the generic scheduler have some unpredictable 42 // behaviours. For example when register pressure becomes high, it can either 43 // manage to prevent register pressure from going too high, or it can 44 // increase register pressure even more than if it hadn't taken register 45 // pressure into account. 46 // 47 // Also some other bad behaviours are generated, like loading at the beginning 48 // of the shader a constant in VGPR you won't need until the end of the shader. 49 // 50 // The scheduling problem for SI can distinguish three main parts: 51 // . Hiding high latencies (texture sampling, etc) 52 // . Hiding low latencies (SGPR constant loading, etc) 53 // . Keeping register usage low for better latency hiding and general 54 // performance 55 // 56 // Some other things can also affect performance, but are hard to predict 57 // (cache usage, the fact the HW can issue several instructions from different 58 // wavefronts if different types, etc) 59 // 60 // This scheduler tries to solve the scheduling problem by dividing it into 61 // simpler sub-problems. It divides the instructions into blocks, schedules 62 // locally inside the blocks where it takes care of low latencies, and then 63 // chooses the order of the blocks by taking care of high latencies. 64 // Dividing the instructions into blocks helps control keeping register 65 // usage low. 66 // 67 // First the instructions are put into blocks. 68 // We want the blocks help control register usage and hide high latencies 69 // later. To help control register usage, we typically want all local 70 // computations, when for example you create a result that can be comsummed 71 // right away, to be contained in a block. Block inputs and outputs would 72 // typically be important results that are needed in several locations of 73 // the shader. Since we do want blocks to help hide high latencies, we want 74 // the instructions inside the block to have a minimal set of dependencies 75 // on high latencies. It will make it easy to pick blocks to hide specific 76 // high latencies. 77 // The block creation algorithm is divided into several steps, and several 78 // variants can be tried during the scheduling process. 79 // 80 // Second the order of the instructions inside the blocks is choosen. 81 // At that step we do take into account only register usage and hiding 82 // low latency instructions 83 // 84 // Third the block order is choosen, there we try to hide high latencies 85 // and keep register usage low. 86 // 87 // After the third step, a pass is done to improve the hiding of low 88 // latencies. 89 // 90 // Actually when talking about 'low latency' or 'high latency' it includes 91 // both the latency to get the cache (or global mem) data go to the register, 92 // and the bandwith limitations. 93 // Increasing the number of active wavefronts helps hide the former, but it 94 // doesn't solve the latter, thus why even if wavefront count is high, we have 95 // to try have as many instructions hiding high latencies as possible. 96 // The OpenCL doc says for example latency of 400 cycles for a global mem access, 97 // which is hidden by 10 instructions if the wavefront count is 10. 98 99 // Some figures taken from AMD docs: 100 // Both texture and constant L1 caches are 4-way associative with 64 bytes 101 // lines. 102 // Constant cache is shared with 4 CUs. 103 // For texture sampling, the address generation unit receives 4 texture 104 // addresses per cycle, thus we could expect texture sampling latency to be 105 // equivalent to 4 instructions in the very best case (a VGPR is 64 work items, 106 // instructions in a wavefront group are executed every 4 cycles), 107 // or 16 instructions if the other wavefronts associated to the 3 other VALUs 108 // of the CU do texture sampling too. (Don't take these figures too seriously, 109 // as I'm not 100% sure of the computation) 110 // Data exports should get similar latency. 111 // For constant loading, the cache is shader with 4 CUs. 112 // The doc says "a throughput of 16B/cycle for each of the 4 Compute Unit" 113 // I guess if the other CU don't read the cache, it can go up to 64B/cycle. 114 // It means a simple s_buffer_load should take one instruction to hide, as 115 // well as a s_buffer_loadx2 and potentially a s_buffer_loadx8 if on the same 116 // cache line. 117 // 118 // As of today the driver doesn't preload the constants in cache, thus the 119 // first loads get extra latency. The doc says global memory access can be 120 // 300-600 cycles. We do not specially take that into account when scheduling 121 // As we expect the driver to be able to preload the constants soon. 122 123 124 // common code // 125 126 #ifndef NDEBUG 127 128 static const char *getReasonStr(SIScheduleCandReason Reason) { 129 switch (Reason) { 130 case NoCand: return "NOCAND"; 131 case RegUsage: return "REGUSAGE"; 132 case Latency: return "LATENCY"; 133 case Successor: return "SUCCESSOR"; 134 case Depth: return "DEPTH"; 135 case NodeOrder: return "ORDER"; 136 } 137 llvm_unreachable("Unknown reason!"); 138 } 139 140 #endif 141 142 static bool tryLess(int TryVal, int CandVal, 143 SISchedulerCandidate &TryCand, 144 SISchedulerCandidate &Cand, 145 SIScheduleCandReason Reason) { 146 if (TryVal < CandVal) { 147 TryCand.Reason = Reason; 148 return true; 149 } 150 if (TryVal > CandVal) { 151 if (Cand.Reason > Reason) 152 Cand.Reason = Reason; 153 return true; 154 } 155 Cand.setRepeat(Reason); 156 return false; 157 } 158 159 static bool tryGreater(int TryVal, int CandVal, 160 SISchedulerCandidate &TryCand, 161 SISchedulerCandidate &Cand, 162 SIScheduleCandReason Reason) { 163 if (TryVal > CandVal) { 164 TryCand.Reason = Reason; 165 return true; 166 } 167 if (TryVal < CandVal) { 168 if (Cand.Reason > Reason) 169 Cand.Reason = Reason; 170 return true; 171 } 172 Cand.setRepeat(Reason); 173 return false; 174 } 175 176 // SIScheduleBlock // 177 178 void SIScheduleBlock::addUnit(SUnit *SU) { 179 NodeNum2Index[SU->NodeNum] = SUnits.size(); 180 SUnits.push_back(SU); 181 } 182 183 #ifndef NDEBUG 184 185 void SIScheduleBlock::traceCandidate(const SISchedCandidate &Cand) { 186 187 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 188 dbgs() << '\n'; 189 } 190 #endif 191 192 void SIScheduleBlock::tryCandidateTopDown(SISchedCandidate &Cand, 193 SISchedCandidate &TryCand) { 194 // Initialize the candidate if needed. 195 if (!Cand.isValid()) { 196 TryCand.Reason = NodeOrder; 197 return; 198 } 199 200 if (Cand.SGPRUsage > 60 && 201 tryLess(TryCand.SGPRUsage, Cand.SGPRUsage, TryCand, Cand, RegUsage)) 202 return; 203 204 // Schedule low latency instructions as top as possible. 205 // Order of priority is: 206 // . Low latency instructions which do not depend on other low latency 207 // instructions we haven't waited for 208 // . Other instructions which do not depend on low latency instructions 209 // we haven't waited for 210 // . Low latencies 211 // . All other instructions 212 // Goal is to get: low latency instructions - independant instructions 213 // - (eventually some more low latency instructions) 214 // - instructions that depend on the first low latency instructions. 215 // If in the block there is a lot of constant loads, the SGPR usage 216 // could go quite high, thus above the arbitrary limit of 60 will encourage 217 // use the already loaded constants (in order to release some SGPRs) before 218 // loading more. 219 if (tryLess(TryCand.HasLowLatencyNonWaitedParent, 220 Cand.HasLowLatencyNonWaitedParent, 221 TryCand, Cand, SIScheduleCandReason::Depth)) 222 return; 223 224 if (tryGreater(TryCand.IsLowLatency, Cand.IsLowLatency, 225 TryCand, Cand, SIScheduleCandReason::Depth)) 226 return; 227 228 if (TryCand.IsLowLatency && 229 tryLess(TryCand.LowLatencyOffset, Cand.LowLatencyOffset, 230 TryCand, Cand, SIScheduleCandReason::Depth)) 231 return; 232 233 if (tryLess(TryCand.VGPRUsage, Cand.VGPRUsage, TryCand, Cand, RegUsage)) 234 return; 235 236 // Fall through to original instruction order. 237 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) { 238 TryCand.Reason = NodeOrder; 239 } 240 } 241 242 SUnit* SIScheduleBlock::pickNode() { 243 SISchedCandidate TopCand; 244 245 for (SUnit* SU : TopReadySUs) { 246 SISchedCandidate TryCand; 247 std::vector<unsigned> pressure; 248 std::vector<unsigned> MaxPressure; 249 // Predict register usage after this instruction. 250 TryCand.SU = SU; 251 TopRPTracker.getDownwardPressure(SU->getInstr(), pressure, MaxPressure); 252 TryCand.SGPRUsage = pressure[DAG->getSGPRSetID()]; 253 TryCand.VGPRUsage = pressure[DAG->getVGPRSetID()]; 254 TryCand.IsLowLatency = DAG->IsLowLatencySU[SU->NodeNum]; 255 TryCand.LowLatencyOffset = DAG->LowLatencyOffset[SU->NodeNum]; 256 TryCand.HasLowLatencyNonWaitedParent = 257 HasLowLatencyNonWaitedParent[NodeNum2Index[SU->NodeNum]]; 258 tryCandidateTopDown(TopCand, TryCand); 259 if (TryCand.Reason != NoCand) 260 TopCand.setBest(TryCand); 261 } 262 263 return TopCand.SU; 264 } 265 266 267 // Schedule something valid. 268 void SIScheduleBlock::fastSchedule() { 269 TopReadySUs.clear(); 270 if (Scheduled) 271 undoSchedule(); 272 273 for (SUnit* SU : SUnits) { 274 if (!SU->NumPredsLeft) 275 TopReadySUs.push_back(SU); 276 } 277 278 while (!TopReadySUs.empty()) { 279 SUnit *SU = TopReadySUs[0]; 280 ScheduledSUnits.push_back(SU); 281 nodeScheduled(SU); 282 } 283 284 Scheduled = true; 285 } 286 287 // Returns if the register was set between first and last. 288 static bool isDefBetween(unsigned Reg, 289 SlotIndex First, SlotIndex Last, 290 const MachineRegisterInfo *MRI, 291 const LiveIntervals *LIS) { 292 for (MachineRegisterInfo::def_instr_iterator 293 UI = MRI->def_instr_begin(Reg), 294 UE = MRI->def_instr_end(); UI != UE; ++UI) { 295 const MachineInstr* MI = &*UI; 296 if (MI->isDebugValue()) 297 continue; 298 SlotIndex InstSlot = LIS->getInstructionIndex(MI).getRegSlot(); 299 if (InstSlot >= First && InstSlot <= Last) 300 return true; 301 } 302 return false; 303 } 304 305 void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock, 306 MachineBasicBlock::iterator EndBlock) { 307 IntervalPressure Pressure, BotPressure; 308 RegPressureTracker RPTracker(Pressure), BotRPTracker(BotPressure); 309 LiveIntervals *LIS = DAG->getLIS(); 310 MachineRegisterInfo *MRI = DAG->getMRI(); 311 DAG->initRPTracker(TopRPTracker); 312 DAG->initRPTracker(BotRPTracker); 313 DAG->initRPTracker(RPTracker); 314 315 // Goes though all SU. RPTracker captures what had to be alive for the SUs 316 // to execute, and what is still alive at the end. 317 for (SUnit* SU : ScheduledSUnits) { 318 RPTracker.setPos(SU->getInstr()); 319 RPTracker.advance(); 320 } 321 322 // Close the RPTracker to finalize live ins/outs. 323 RPTracker.closeRegion(); 324 325 // Initialize the live ins and live outs. 326 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 327 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 328 329 // Do not Track Physical Registers, because it messes up. 330 for (unsigned Reg : RPTracker.getPressure().LiveInRegs) { 331 if (TargetRegisterInfo::isVirtualRegister(Reg)) 332 LiveInRegs.insert(Reg); 333 } 334 LiveOutRegs.clear(); 335 // There is several possibilities to distinguish: 336 // 1) Reg is not input to any instruction in the block, but is output of one 337 // 2) 1) + read in the block and not needed after it 338 // 3) 1) + read in the block but needed in another block 339 // 4) Reg is input of an instruction but another block will read it too 340 // 5) Reg is input of an instruction and then rewritten in the block. 341 // result is not read in the block (implies used in another block) 342 // 6) Reg is input of an instruction and then rewritten in the block. 343 // result is read in the block and not needed in another block 344 // 7) Reg is input of an instruction and then rewritten in the block. 345 // result is read in the block but also needed in another block 346 // LiveInRegs will contains all the regs in situation 4, 5, 6, 7 347 // We want LiveOutRegs to contain only Regs whose content will be read after 348 // in another block, and whose content was written in the current block, 349 // that is we want it to get 1, 3, 5, 7 350 // Since we made the MIs of a block to be packed all together before 351 // scheduling, then the LiveIntervals were correct, and the RPTracker was 352 // able to correctly handle 5 vs 6, 2 vs 3. 353 // (Note: This is not sufficient for RPTracker to not do mistakes for case 4) 354 // The RPTracker's LiveOutRegs has 1, 3, (some correct or incorrect)4, 5, 7 355 // Comparing to LiveInRegs is not sufficient to differenciate 4 vs 5, 7 356 // The use of findDefBetween removes the case 4. 357 for (unsigned Reg : RPTracker.getPressure().LiveOutRegs) { 358 if (TargetRegisterInfo::isVirtualRegister(Reg) && 359 isDefBetween(Reg, LIS->getInstructionIndex(BeginBlock).getRegSlot(), 360 LIS->getInstructionIndex(EndBlock).getRegSlot(), 361 MRI, LIS)) { 362 LiveOutRegs.insert(Reg); 363 } 364 } 365 366 // Pressure = sum_alive_registers register size 367 // Internally llvm will represent some registers as big 128 bits registers 368 // for example, but they actually correspond to 4 actual 32 bits registers. 369 // Thus Pressure is not equal to num_alive_registers * constant. 370 LiveInPressure = TopPressure.MaxSetPressure; 371 LiveOutPressure = BotPressure.MaxSetPressure; 372 373 // Prepares TopRPTracker for top down scheduling. 374 TopRPTracker.closeTop(); 375 } 376 377 void SIScheduleBlock::schedule(MachineBasicBlock::iterator BeginBlock, 378 MachineBasicBlock::iterator EndBlock) { 379 if (!Scheduled) 380 fastSchedule(); 381 382 // PreScheduling phase to set LiveIn and LiveOut. 383 initRegPressure(BeginBlock, EndBlock); 384 undoSchedule(); 385 386 // Schedule for real now. 387 388 TopReadySUs.clear(); 389 390 for (SUnit* SU : SUnits) { 391 if (!SU->NumPredsLeft) 392 TopReadySUs.push_back(SU); 393 } 394 395 while (!TopReadySUs.empty()) { 396 SUnit *SU = pickNode(); 397 ScheduledSUnits.push_back(SU); 398 TopRPTracker.setPos(SU->getInstr()); 399 TopRPTracker.advance(); 400 nodeScheduled(SU); 401 } 402 403 // TODO: compute InternalAdditionnalPressure. 404 InternalAdditionnalPressure.resize(TopPressure.MaxSetPressure.size()); 405 406 // Check everything is right. 407 #ifndef NDEBUG 408 assert(SUnits.size() == ScheduledSUnits.size() && 409 TopReadySUs.empty()); 410 for (SUnit* SU : SUnits) { 411 assert(SU->isScheduled && 412 SU->NumPredsLeft == 0); 413 } 414 #endif 415 416 Scheduled = true; 417 } 418 419 void SIScheduleBlock::undoSchedule() { 420 for (SUnit* SU : SUnits) { 421 SU->isScheduled = false; 422 for (SDep& Succ : SU->Succs) { 423 if (BC->isSUInBlock(Succ.getSUnit(), ID)) 424 undoReleaseSucc(SU, &Succ); 425 } 426 } 427 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0); 428 ScheduledSUnits.clear(); 429 Scheduled = false; 430 } 431 432 void SIScheduleBlock::undoReleaseSucc(SUnit *SU, SDep *SuccEdge) { 433 SUnit *SuccSU = SuccEdge->getSUnit(); 434 435 if (SuccEdge->isWeak()) { 436 ++SuccSU->WeakPredsLeft; 437 return; 438 } 439 ++SuccSU->NumPredsLeft; 440 } 441 442 void SIScheduleBlock::releaseSucc(SUnit *SU, SDep *SuccEdge) { 443 SUnit *SuccSU = SuccEdge->getSUnit(); 444 445 if (SuccEdge->isWeak()) { 446 --SuccSU->WeakPredsLeft; 447 return; 448 } 449 #ifndef NDEBUG 450 if (SuccSU->NumPredsLeft == 0) { 451 dbgs() << "*** Scheduling failed! ***\n"; 452 SuccSU->dump(DAG); 453 dbgs() << " has been released too many times!\n"; 454 llvm_unreachable(nullptr); 455 } 456 #endif 457 458 --SuccSU->NumPredsLeft; 459 } 460 461 /// Release Successors of the SU that are in the block or not. 462 void SIScheduleBlock::releaseSuccessors(SUnit *SU, bool InOrOutBlock) { 463 for (SDep& Succ : SU->Succs) { 464 SUnit *SuccSU = Succ.getSUnit(); 465 466 if (BC->isSUInBlock(SuccSU, ID) != InOrOutBlock) 467 continue; 468 469 releaseSucc(SU, &Succ); 470 if (SuccSU->NumPredsLeft == 0 && InOrOutBlock) 471 TopReadySUs.push_back(SuccSU); 472 } 473 } 474 475 void SIScheduleBlock::nodeScheduled(SUnit *SU) { 476 // Is in TopReadySUs 477 assert (!SU->NumPredsLeft); 478 std::vector<SUnit*>::iterator I = 479 std::find(TopReadySUs.begin(), TopReadySUs.end(), SU); 480 if (I == TopReadySUs.end()) { 481 dbgs() << "Data Structure Bug in SI Scheduler\n"; 482 llvm_unreachable(nullptr); 483 } 484 TopReadySUs.erase(I); 485 486 releaseSuccessors(SU, true); 487 // Scheduling this node will trigger a wait, 488 // thus propagate to other instructions that they do not need to wait either. 489 if (HasLowLatencyNonWaitedParent[NodeNum2Index[SU->NodeNum]]) 490 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0); 491 492 if (DAG->IsLowLatencySU[SU->NodeNum]) { 493 for (SDep& Succ : SU->Succs) { 494 std::map<unsigned, unsigned>::iterator I = 495 NodeNum2Index.find(Succ.getSUnit()->NodeNum); 496 if (I != NodeNum2Index.end()) 497 HasLowLatencyNonWaitedParent[I->second] = 1; 498 } 499 } 500 SU->isScheduled = true; 501 } 502 503 void SIScheduleBlock::finalizeUnits() { 504 // We remove links from outside blocks to enable scheduling inside the block. 505 for (SUnit* SU : SUnits) { 506 releaseSuccessors(SU, false); 507 if (DAG->IsHighLatencySU[SU->NodeNum]) 508 HighLatencyBlock = true; 509 } 510 HasLowLatencyNonWaitedParent.resize(SUnits.size(), 0); 511 } 512 513 // we maintain ascending order of IDs 514 void SIScheduleBlock::addPred(SIScheduleBlock *Pred) { 515 unsigned PredID = Pred->getID(); 516 517 // Check if not already predecessor. 518 for (SIScheduleBlock* P : Preds) { 519 if (PredID == P->getID()) 520 return; 521 } 522 Preds.push_back(Pred); 523 524 #ifndef NDEBUG 525 for (SIScheduleBlock* S : Succs) { 526 if (PredID == S->getID()) 527 assert(!"Loop in the Block Graph!\n"); 528 } 529 #endif 530 } 531 532 void SIScheduleBlock::addSucc(SIScheduleBlock *Succ) { 533 unsigned SuccID = Succ->getID(); 534 535 // Check if not already predecessor. 536 for (SIScheduleBlock* S : Succs) { 537 if (SuccID == S->getID()) 538 return; 539 } 540 if (Succ->isHighLatencyBlock()) 541 ++NumHighLatencySuccessors; 542 Succs.push_back(Succ); 543 #ifndef NDEBUG 544 for (SIScheduleBlock* P : Preds) { 545 if (SuccID == P->getID()) 546 assert("Loop in the Block Graph!\n"); 547 } 548 #endif 549 } 550 551 #ifndef NDEBUG 552 void SIScheduleBlock::printDebug(bool full) { 553 dbgs() << "Block (" << ID << ")\n"; 554 if (!full) 555 return; 556 557 dbgs() << "\nContains High Latency Instruction: " 558 << HighLatencyBlock << '\n'; 559 dbgs() << "\nDepends On:\n"; 560 for (SIScheduleBlock* P : Preds) { 561 P->printDebug(false); 562 } 563 564 dbgs() << "\nSuccessors:\n"; 565 for (SIScheduleBlock* S : Succs) { 566 S->printDebug(false); 567 } 568 569 if (Scheduled) { 570 dbgs() << "LiveInPressure " << LiveInPressure[DAG->getSGPRSetID()] << ' ' 571 << LiveInPressure[DAG->getVGPRSetID()] << '\n'; 572 dbgs() << "LiveOutPressure " << LiveOutPressure[DAG->getSGPRSetID()] << ' ' 573 << LiveOutPressure[DAG->getVGPRSetID()] << "\n\n"; 574 dbgs() << "LiveIns:\n"; 575 for (unsigned Reg : LiveInRegs) 576 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' '; 577 578 dbgs() << "\nLiveOuts:\n"; 579 for (unsigned Reg : LiveOutRegs) 580 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' '; 581 } 582 583 dbgs() << "\nInstructions:\n"; 584 if (!Scheduled) { 585 for (SUnit* SU : SUnits) { 586 SU->dump(DAG); 587 } 588 } else { 589 for (SUnit* SU : SUnits) { 590 SU->dump(DAG); 591 } 592 } 593 594 dbgs() << "///////////////////////\n"; 595 } 596 597 #endif 598 599 // SIScheduleBlockCreator // 600 601 SIScheduleBlockCreator::SIScheduleBlockCreator(SIScheduleDAGMI *DAG) : 602 DAG(DAG) { 603 } 604 605 SIScheduleBlockCreator::~SIScheduleBlockCreator() { 606 } 607 608 SIScheduleBlocks 609 SIScheduleBlockCreator::getBlocks(SISchedulerBlockCreatorVariant BlockVariant) { 610 std::map<SISchedulerBlockCreatorVariant, SIScheduleBlocks>::iterator B = 611 Blocks.find(BlockVariant); 612 if (B == Blocks.end()) { 613 SIScheduleBlocks Res; 614 createBlocksForVariant(BlockVariant); 615 topologicalSort(); 616 scheduleInsideBlocks(); 617 fillStats(); 618 Res.Blocks = CurrentBlocks; 619 Res.TopDownIndex2Block = TopDownIndex2Block; 620 Res.TopDownBlock2Index = TopDownBlock2Index; 621 Blocks[BlockVariant] = Res; 622 return Res; 623 } else { 624 return B->second; 625 } 626 } 627 628 bool SIScheduleBlockCreator::isSUInBlock(SUnit *SU, unsigned ID) { 629 if (SU->NodeNum >= DAG->SUnits.size()) 630 return false; 631 return CurrentBlocks[Node2CurrentBlock[SU->NodeNum]]->getID() == ID; 632 } 633 634 void SIScheduleBlockCreator::colorHighLatenciesAlone() { 635 unsigned DAGSize = DAG->SUnits.size(); 636 637 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 638 SUnit *SU = &DAG->SUnits[i]; 639 if (DAG->IsHighLatencySU[SU->NodeNum]) { 640 CurrentColoring[SU->NodeNum] = NextReservedID++; 641 } 642 } 643 } 644 645 void SIScheduleBlockCreator::colorHighLatenciesGroups() { 646 unsigned DAGSize = DAG->SUnits.size(); 647 unsigned NumHighLatencies = 0; 648 unsigned GroupSize; 649 unsigned Color = NextReservedID; 650 unsigned Count = 0; 651 std::set<unsigned> FormingGroup; 652 653 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 654 SUnit *SU = &DAG->SUnits[i]; 655 if (DAG->IsHighLatencySU[SU->NodeNum]) 656 ++NumHighLatencies; 657 } 658 659 if (NumHighLatencies == 0) 660 return; 661 662 if (NumHighLatencies <= 6) 663 GroupSize = 2; 664 else if (NumHighLatencies <= 12) 665 GroupSize = 3; 666 else 667 GroupSize = 4; 668 669 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 670 SUnit *SU = &DAG->SUnits[i]; 671 if (DAG->IsHighLatencySU[SU->NodeNum]) { 672 unsigned CompatibleGroup = true; 673 unsigned ProposedColor = Color; 674 for (unsigned j : FormingGroup) { 675 // TODO: Currently CompatibleGroup will always be false, 676 // because the graph enforces the load order. This 677 // can be fixed, but as keeping the load order is often 678 // good for performance that causes a performance hit (both 679 // the default scheduler and this scheduler). 680 // When this scheduler determines a good load order, 681 // this can be fixed. 682 if (!DAG->canAddEdge(SU, &DAG->SUnits[j]) || 683 !DAG->canAddEdge(&DAG->SUnits[j], SU)) 684 CompatibleGroup = false; 685 } 686 if (!CompatibleGroup || ++Count == GroupSize) { 687 FormingGroup.clear(); 688 Color = ++NextReservedID; 689 if (!CompatibleGroup) { 690 ProposedColor = Color; 691 FormingGroup.insert(SU->NodeNum); 692 } 693 Count = 0; 694 } else { 695 FormingGroup.insert(SU->NodeNum); 696 } 697 CurrentColoring[SU->NodeNum] = ProposedColor; 698 } 699 } 700 } 701 702 void SIScheduleBlockCreator::colorComputeReservedDependencies() { 703 unsigned DAGSize = DAG->SUnits.size(); 704 std::map<std::set<unsigned>, unsigned> ColorCombinations; 705 706 CurrentTopDownReservedDependencyColoring.clear(); 707 CurrentBottomUpReservedDependencyColoring.clear(); 708 709 CurrentTopDownReservedDependencyColoring.resize(DAGSize, 0); 710 CurrentBottomUpReservedDependencyColoring.resize(DAGSize, 0); 711 712 // Traverse TopDown, and give different colors to SUs depending 713 // on which combination of High Latencies they depend on. 714 715 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 716 SUnit *SU = &DAG->SUnits[DAG->TopDownIndex2SU[i]]; 717 std::set<unsigned> SUColors; 718 719 // Already given. 720 if (CurrentColoring[SU->NodeNum]) { 721 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = 722 CurrentColoring[SU->NodeNum]; 723 continue; 724 } 725 726 for (SDep& PredDep : SU->Preds) { 727 SUnit *Pred = PredDep.getSUnit(); 728 if (PredDep.isWeak() || Pred->NodeNum >= DAGSize) 729 continue; 730 if (CurrentTopDownReservedDependencyColoring[Pred->NodeNum] > 0) 731 SUColors.insert(CurrentTopDownReservedDependencyColoring[Pred->NodeNum]); 732 } 733 // Color 0 by default. 734 if (SUColors.empty()) 735 continue; 736 // Same color than parents. 737 if (SUColors.size() == 1 && *SUColors.begin() > DAGSize) 738 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = 739 *SUColors.begin(); 740 else { 741 std::map<std::set<unsigned>, unsigned>::iterator Pos = 742 ColorCombinations.find(SUColors); 743 if (Pos != ColorCombinations.end()) { 744 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = Pos->second; 745 } else { 746 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = 747 NextNonReservedID; 748 ColorCombinations[SUColors] = NextNonReservedID++; 749 } 750 } 751 } 752 753 ColorCombinations.clear(); 754 755 // Same as before, but BottomUp. 756 757 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 758 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]]; 759 std::set<unsigned> SUColors; 760 761 // Already given. 762 if (CurrentColoring[SU->NodeNum]) { 763 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = 764 CurrentColoring[SU->NodeNum]; 765 continue; 766 } 767 768 for (SDep& SuccDep : SU->Succs) { 769 SUnit *Succ = SuccDep.getSUnit(); 770 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 771 continue; 772 if (CurrentBottomUpReservedDependencyColoring[Succ->NodeNum] > 0) 773 SUColors.insert(CurrentBottomUpReservedDependencyColoring[Succ->NodeNum]); 774 } 775 // Keep color 0. 776 if (SUColors.empty()) 777 continue; 778 // Same color than parents. 779 if (SUColors.size() == 1 && *SUColors.begin() > DAGSize) 780 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = 781 *SUColors.begin(); 782 else { 783 std::map<std::set<unsigned>, unsigned>::iterator Pos = 784 ColorCombinations.find(SUColors); 785 if (Pos != ColorCombinations.end()) { 786 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = Pos->second; 787 } else { 788 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = 789 NextNonReservedID; 790 ColorCombinations[SUColors] = NextNonReservedID++; 791 } 792 } 793 } 794 } 795 796 void SIScheduleBlockCreator::colorAccordingToReservedDependencies() { 797 unsigned DAGSize = DAG->SUnits.size(); 798 std::map<std::pair<unsigned, unsigned>, unsigned> ColorCombinations; 799 800 // Every combination of colors given by the top down 801 // and bottom up Reserved node dependency 802 803 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 804 SUnit *SU = &DAG->SUnits[i]; 805 std::pair<unsigned, unsigned> SUColors; 806 807 // High latency instructions: already given. 808 if (CurrentColoring[SU->NodeNum]) 809 continue; 810 811 SUColors.first = CurrentTopDownReservedDependencyColoring[SU->NodeNum]; 812 SUColors.second = CurrentBottomUpReservedDependencyColoring[SU->NodeNum]; 813 814 std::map<std::pair<unsigned, unsigned>, unsigned>::iterator Pos = 815 ColorCombinations.find(SUColors); 816 if (Pos != ColorCombinations.end()) { 817 CurrentColoring[SU->NodeNum] = Pos->second; 818 } else { 819 CurrentColoring[SU->NodeNum] = NextNonReservedID; 820 ColorCombinations[SUColors] = NextNonReservedID++; 821 } 822 } 823 } 824 825 void SIScheduleBlockCreator::colorEndsAccordingToDependencies() { 826 unsigned DAGSize = DAG->SUnits.size(); 827 std::vector<int> PendingColoring = CurrentColoring; 828 829 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 830 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]]; 831 std::set<unsigned> SUColors; 832 std::set<unsigned> SUColorsPending; 833 834 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 835 continue; 836 837 if (CurrentBottomUpReservedDependencyColoring[SU->NodeNum] > 0 || 838 CurrentTopDownReservedDependencyColoring[SU->NodeNum] > 0) 839 continue; 840 841 for (SDep& SuccDep : SU->Succs) { 842 SUnit *Succ = SuccDep.getSUnit(); 843 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 844 continue; 845 if (CurrentBottomUpReservedDependencyColoring[Succ->NodeNum] > 0 || 846 CurrentTopDownReservedDependencyColoring[Succ->NodeNum] > 0) 847 SUColors.insert(CurrentColoring[Succ->NodeNum]); 848 SUColorsPending.insert(PendingColoring[Succ->NodeNum]); 849 } 850 if (SUColors.size() == 1 && SUColorsPending.size() == 1) 851 PendingColoring[SU->NodeNum] = *SUColors.begin(); 852 else // TODO: Attribute new colors depending on color 853 // combination of children. 854 PendingColoring[SU->NodeNum] = NextNonReservedID++; 855 } 856 CurrentColoring = PendingColoring; 857 } 858 859 860 void SIScheduleBlockCreator::colorForceConsecutiveOrderInGroup() { 861 unsigned DAGSize = DAG->SUnits.size(); 862 unsigned PreviousColor; 863 std::set<unsigned> SeenColors; 864 865 if (DAGSize <= 1) 866 return; 867 868 PreviousColor = CurrentColoring[0]; 869 870 for (unsigned i = 1, e = DAGSize; i != e; ++i) { 871 SUnit *SU = &DAG->SUnits[i]; 872 unsigned CurrentColor = CurrentColoring[i]; 873 unsigned PreviousColorSave = PreviousColor; 874 assert(i == SU->NodeNum); 875 876 if (CurrentColor != PreviousColor) 877 SeenColors.insert(PreviousColor); 878 PreviousColor = CurrentColor; 879 880 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 881 continue; 882 883 if (SeenColors.find(CurrentColor) == SeenColors.end()) 884 continue; 885 886 if (PreviousColorSave != CurrentColor) 887 CurrentColoring[i] = NextNonReservedID++; 888 else 889 CurrentColoring[i] = CurrentColoring[i-1]; 890 } 891 } 892 893 void SIScheduleBlockCreator::colorMergeConstantLoadsNextGroup() { 894 unsigned DAGSize = DAG->SUnits.size(); 895 896 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 897 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]]; 898 std::set<unsigned> SUColors; 899 900 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 901 continue; 902 903 // No predecessor: Vgpr constant loading. 904 // Low latency instructions usually have a predecessor (the address) 905 if (SU->Preds.size() > 0 && !DAG->IsLowLatencySU[SU->NodeNum]) 906 continue; 907 908 for (SDep& SuccDep : SU->Succs) { 909 SUnit *Succ = SuccDep.getSUnit(); 910 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 911 continue; 912 SUColors.insert(CurrentColoring[Succ->NodeNum]); 913 } 914 if (SUColors.size() == 1) 915 CurrentColoring[SU->NodeNum] = *SUColors.begin(); 916 } 917 } 918 919 void SIScheduleBlockCreator::colorMergeIfPossibleNextGroup() { 920 unsigned DAGSize = DAG->SUnits.size(); 921 922 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 923 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]]; 924 std::set<unsigned> SUColors; 925 926 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 927 continue; 928 929 for (SDep& SuccDep : SU->Succs) { 930 SUnit *Succ = SuccDep.getSUnit(); 931 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 932 continue; 933 SUColors.insert(CurrentColoring[Succ->NodeNum]); 934 } 935 if (SUColors.size() == 1) 936 CurrentColoring[SU->NodeNum] = *SUColors.begin(); 937 } 938 } 939 940 void SIScheduleBlockCreator::colorMergeIfPossibleNextGroupOnlyForReserved() { 941 unsigned DAGSize = DAG->SUnits.size(); 942 943 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 944 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]]; 945 std::set<unsigned> SUColors; 946 947 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 948 continue; 949 950 for (SDep& SuccDep : SU->Succs) { 951 SUnit *Succ = SuccDep.getSUnit(); 952 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 953 continue; 954 SUColors.insert(CurrentColoring[Succ->NodeNum]); 955 } 956 if (SUColors.size() == 1 && *SUColors.begin() <= DAGSize) 957 CurrentColoring[SU->NodeNum] = *SUColors.begin(); 958 } 959 } 960 961 void SIScheduleBlockCreator::colorMergeIfPossibleSmallGroupsToNextGroup() { 962 unsigned DAGSize = DAG->SUnits.size(); 963 std::map<unsigned, unsigned> ColorCount; 964 965 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 966 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]]; 967 unsigned color = CurrentColoring[SU->NodeNum]; 968 std::map<unsigned, unsigned>::iterator Pos = ColorCount.find(color); 969 if (Pos != ColorCount.end()) { 970 ++ColorCount[color]; 971 } else { 972 ColorCount[color] = 1; 973 } 974 } 975 976 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 977 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]]; 978 unsigned color = CurrentColoring[SU->NodeNum]; 979 std::set<unsigned> SUColors; 980 981 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 982 continue; 983 984 if (ColorCount[color] > 1) 985 continue; 986 987 for (SDep& SuccDep : SU->Succs) { 988 SUnit *Succ = SuccDep.getSUnit(); 989 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 990 continue; 991 SUColors.insert(CurrentColoring[Succ->NodeNum]); 992 } 993 if (SUColors.size() == 1 && *SUColors.begin() != color) { 994 --ColorCount[color]; 995 CurrentColoring[SU->NodeNum] = *SUColors.begin(); 996 ++ColorCount[*SUColors.begin()]; 997 } 998 } 999 } 1000 1001 void SIScheduleBlockCreator::cutHugeBlocks() { 1002 // TODO 1003 } 1004 1005 void SIScheduleBlockCreator::regroupNoUserInstructions() { 1006 unsigned DAGSize = DAG->SUnits.size(); 1007 int GroupID = NextNonReservedID++; 1008 1009 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1010 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]]; 1011 bool hasSuccessor = false; 1012 1013 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 1014 continue; 1015 1016 for (SDep& SuccDep : SU->Succs) { 1017 SUnit *Succ = SuccDep.getSUnit(); 1018 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 1019 continue; 1020 hasSuccessor = true; 1021 } 1022 if (!hasSuccessor) 1023 CurrentColoring[SU->NodeNum] = GroupID; 1024 } 1025 } 1026 1027 void SIScheduleBlockCreator::createBlocksForVariant(SISchedulerBlockCreatorVariant BlockVariant) { 1028 unsigned DAGSize = DAG->SUnits.size(); 1029 std::map<unsigned,unsigned> RealID; 1030 1031 CurrentBlocks.clear(); 1032 CurrentColoring.clear(); 1033 CurrentColoring.resize(DAGSize, 0); 1034 Node2CurrentBlock.clear(); 1035 1036 // Restore links previous scheduling variant has overridden. 1037 DAG->restoreSULinksLeft(); 1038 1039 NextReservedID = 1; 1040 NextNonReservedID = DAGSize + 1; 1041 1042 DEBUG(dbgs() << "Coloring the graph\n"); 1043 1044 if (BlockVariant == SISchedulerBlockCreatorVariant::LatenciesGrouped) 1045 colorHighLatenciesGroups(); 1046 else 1047 colorHighLatenciesAlone(); 1048 colorComputeReservedDependencies(); 1049 colorAccordingToReservedDependencies(); 1050 colorEndsAccordingToDependencies(); 1051 if (BlockVariant == SISchedulerBlockCreatorVariant::LatenciesAlonePlusConsecutive) 1052 colorForceConsecutiveOrderInGroup(); 1053 regroupNoUserInstructions(); 1054 colorMergeConstantLoadsNextGroup(); 1055 colorMergeIfPossibleNextGroupOnlyForReserved(); 1056 1057 // Put SUs of same color into same block 1058 Node2CurrentBlock.resize(DAGSize, -1); 1059 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1060 SUnit *SU = &DAG->SUnits[i]; 1061 unsigned Color = CurrentColoring[SU->NodeNum]; 1062 if (RealID.find(Color) == RealID.end()) { 1063 int ID = CurrentBlocks.size(); 1064 BlockPtrs.push_back( 1065 make_unique<SIScheduleBlock>(DAG, this, ID)); 1066 CurrentBlocks.push_back(BlockPtrs.rbegin()->get()); 1067 RealID[Color] = ID; 1068 } 1069 CurrentBlocks[RealID[Color]]->addUnit(SU); 1070 Node2CurrentBlock[SU->NodeNum] = RealID[Color]; 1071 } 1072 1073 // Build dependencies between blocks. 1074 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1075 SUnit *SU = &DAG->SUnits[i]; 1076 int SUID = Node2CurrentBlock[i]; 1077 for (SDep& SuccDep : SU->Succs) { 1078 SUnit *Succ = SuccDep.getSUnit(); 1079 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 1080 continue; 1081 if (Node2CurrentBlock[Succ->NodeNum] != SUID) 1082 CurrentBlocks[SUID]->addSucc(CurrentBlocks[Node2CurrentBlock[Succ->NodeNum]]); 1083 } 1084 for (SDep& PredDep : SU->Preds) { 1085 SUnit *Pred = PredDep.getSUnit(); 1086 if (PredDep.isWeak() || Pred->NodeNum >= DAGSize) 1087 continue; 1088 if (Node2CurrentBlock[Pred->NodeNum] != SUID) 1089 CurrentBlocks[SUID]->addPred(CurrentBlocks[Node2CurrentBlock[Pred->NodeNum]]); 1090 } 1091 } 1092 1093 // Free root and leafs of all blocks to enable scheduling inside them. 1094 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) { 1095 SIScheduleBlock *Block = CurrentBlocks[i]; 1096 Block->finalizeUnits(); 1097 } 1098 DEBUG( 1099 dbgs() << "Blocks created:\n\n"; 1100 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) { 1101 SIScheduleBlock *Block = CurrentBlocks[i]; 1102 Block->printDebug(true); 1103 } 1104 ); 1105 } 1106 1107 // Two functions taken from Codegen/MachineScheduler.cpp 1108 1109 /// If this iterator is a debug value, increment until reaching the End or a 1110 /// non-debug instruction. 1111 static MachineBasicBlock::const_iterator 1112 nextIfDebug(MachineBasicBlock::const_iterator I, 1113 MachineBasicBlock::const_iterator End) { 1114 for(; I != End; ++I) { 1115 if (!I->isDebugValue()) 1116 break; 1117 } 1118 return I; 1119 } 1120 1121 /// Non-const version. 1122 static MachineBasicBlock::iterator 1123 nextIfDebug(MachineBasicBlock::iterator I, 1124 MachineBasicBlock::const_iterator End) { 1125 // Cast the return value to nonconst MachineInstr, then cast to an 1126 // instr_iterator, which does not check for null, finally return a 1127 // bundle_iterator. 1128 return MachineBasicBlock::instr_iterator( 1129 const_cast<MachineInstr*>( 1130 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End))); 1131 } 1132 1133 void SIScheduleBlockCreator::topologicalSort() { 1134 unsigned DAGSize = CurrentBlocks.size(); 1135 std::vector<int> WorkList; 1136 1137 DEBUG(dbgs() << "Topological Sort\n"); 1138 1139 WorkList.reserve(DAGSize); 1140 TopDownIndex2Block.resize(DAGSize); 1141 TopDownBlock2Index.resize(DAGSize); 1142 BottomUpIndex2Block.resize(DAGSize); 1143 1144 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1145 SIScheduleBlock *Block = CurrentBlocks[i]; 1146 unsigned Degree = Block->getSuccs().size(); 1147 TopDownBlock2Index[i] = Degree; 1148 if (Degree == 0) { 1149 WorkList.push_back(i); 1150 } 1151 } 1152 1153 int Id = DAGSize; 1154 while (!WorkList.empty()) { 1155 int i = WorkList.back(); 1156 SIScheduleBlock *Block = CurrentBlocks[i]; 1157 WorkList.pop_back(); 1158 TopDownBlock2Index[i] = --Id; 1159 TopDownIndex2Block[Id] = i; 1160 for (SIScheduleBlock* Pred : Block->getPreds()) { 1161 if (!--TopDownBlock2Index[Pred->getID()]) 1162 WorkList.push_back(Pred->getID()); 1163 } 1164 } 1165 1166 #ifndef NDEBUG 1167 // Check correctness of the ordering. 1168 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1169 SIScheduleBlock *Block = CurrentBlocks[i]; 1170 for (SIScheduleBlock* Pred : Block->getPreds()) { 1171 assert(TopDownBlock2Index[i] > TopDownBlock2Index[Pred->getID()] && 1172 "Wrong Top Down topological sorting"); 1173 } 1174 } 1175 #endif 1176 1177 BottomUpIndex2Block = std::vector<int>(TopDownIndex2Block.rbegin(), 1178 TopDownIndex2Block.rend()); 1179 } 1180 1181 void SIScheduleBlockCreator::scheduleInsideBlocks() { 1182 unsigned DAGSize = CurrentBlocks.size(); 1183 1184 DEBUG(dbgs() << "\nScheduling Blocks\n\n"); 1185 1186 // We do schedule a valid scheduling such that a Block corresponds 1187 // to a range of instructions. 1188 DEBUG(dbgs() << "First phase: Fast scheduling for Reg Liveness\n"); 1189 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1190 SIScheduleBlock *Block = CurrentBlocks[i]; 1191 Block->fastSchedule(); 1192 } 1193 1194 // Note: the following code, and the part restoring previous position 1195 // is by far the most expensive operation of the Scheduler. 1196 1197 // Do not update CurrentTop. 1198 MachineBasicBlock::iterator CurrentTopFastSched = DAG->getCurrentTop(); 1199 std::vector<MachineBasicBlock::iterator> PosOld; 1200 std::vector<MachineBasicBlock::iterator> PosNew; 1201 PosOld.reserve(DAG->SUnits.size()); 1202 PosNew.reserve(DAG->SUnits.size()); 1203 1204 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1205 int BlockIndice = TopDownIndex2Block[i]; 1206 SIScheduleBlock *Block = CurrentBlocks[BlockIndice]; 1207 std::vector<SUnit*> SUs = Block->getScheduledUnits(); 1208 1209 for (SUnit* SU : SUs) { 1210 MachineInstr *MI = SU->getInstr(); 1211 MachineBasicBlock::iterator Pos = MI; 1212 PosOld.push_back(Pos); 1213 if (&*CurrentTopFastSched == MI) { 1214 PosNew.push_back(Pos); 1215 CurrentTopFastSched = nextIfDebug(++CurrentTopFastSched, 1216 DAG->getCurrentBottom()); 1217 } else { 1218 // Update the instruction stream. 1219 DAG->getBB()->splice(CurrentTopFastSched, DAG->getBB(), MI); 1220 1221 // Update LiveIntervals. 1222 // Note: Moving all instructions and calling handleMove everytime 1223 // is the most cpu intensive operation of the scheduler. 1224 // It would gain a lot if there was a way to recompute the 1225 // LiveIntervals for the entire scheduling region. 1226 DAG->getLIS()->handleMove(MI, /*UpdateFlags=*/true); 1227 PosNew.push_back(CurrentTopFastSched); 1228 } 1229 } 1230 } 1231 1232 // Now we have Block of SUs == Block of MI. 1233 // We do the final schedule for the instructions inside the block. 1234 // The property that all the SUs of the Block are grouped together as MI 1235 // is used for correct reg usage tracking. 1236 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1237 SIScheduleBlock *Block = CurrentBlocks[i]; 1238 std::vector<SUnit*> SUs = Block->getScheduledUnits(); 1239 Block->schedule((*SUs.begin())->getInstr(), (*SUs.rbegin())->getInstr()); 1240 } 1241 1242 DEBUG(dbgs() << "Restoring MI Pos\n"); 1243 // Restore old ordering (which prevents a LIS->handleMove bug). 1244 for (unsigned i = PosOld.size(), e = 0; i != e; --i) { 1245 MachineBasicBlock::iterator POld = PosOld[i-1]; 1246 MachineBasicBlock::iterator PNew = PosNew[i-1]; 1247 if (PNew != POld) { 1248 // Update the instruction stream. 1249 DAG->getBB()->splice(POld, DAG->getBB(), PNew); 1250 1251 // Update LiveIntervals. 1252 DAG->getLIS()->handleMove(POld, /*UpdateFlags=*/true); 1253 } 1254 } 1255 1256 DEBUG( 1257 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) { 1258 SIScheduleBlock *Block = CurrentBlocks[i]; 1259 Block->printDebug(true); 1260 } 1261 ); 1262 } 1263 1264 void SIScheduleBlockCreator::fillStats() { 1265 unsigned DAGSize = CurrentBlocks.size(); 1266 1267 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1268 int BlockIndice = TopDownIndex2Block[i]; 1269 SIScheduleBlock *Block = CurrentBlocks[BlockIndice]; 1270 if (Block->getPreds().size() == 0) 1271 Block->Depth = 0; 1272 else { 1273 unsigned Depth = 0; 1274 for (SIScheduleBlock *Pred : Block->getPreds()) { 1275 if (Depth < Pred->Depth + 1) 1276 Depth = Pred->Depth + 1; 1277 } 1278 Block->Depth = Depth; 1279 } 1280 } 1281 1282 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1283 int BlockIndice = BottomUpIndex2Block[i]; 1284 SIScheduleBlock *Block = CurrentBlocks[BlockIndice]; 1285 if (Block->getSuccs().size() == 0) 1286 Block->Height = 0; 1287 else { 1288 unsigned Height = 0; 1289 for (SIScheduleBlock *Succ : Block->getSuccs()) { 1290 if (Height < Succ->Height + 1) 1291 Height = Succ->Height + 1; 1292 } 1293 Block->Height = Height; 1294 } 1295 } 1296 } 1297 1298 // SIScheduleBlockScheduler // 1299 1300 SIScheduleBlockScheduler::SIScheduleBlockScheduler(SIScheduleDAGMI *DAG, 1301 SISchedulerBlockSchedulerVariant Variant, 1302 SIScheduleBlocks BlocksStruct) : 1303 DAG(DAG), Variant(Variant), Blocks(BlocksStruct.Blocks), 1304 LastPosWaitedHighLatency(0), NumBlockScheduled(0), VregCurrentUsage(0), 1305 SregCurrentUsage(0), maxVregUsage(0), maxSregUsage(0) { 1306 1307 // Fill the usage of every output 1308 // Warning: while by construction we always have a link between two blocks 1309 // when one needs a result from the other, the number of users of an output 1310 // is not the sum of child blocks having as input the same virtual register. 1311 // Here is an example. A produces x and y. B eats x and produces x'. 1312 // C eats x' and y. The register coalescer may have attributed the same 1313 // virtual register to x and x'. 1314 // To count accurately, we do a topological sort. In case the register is 1315 // found for several parents, we increment the usage of the one with the 1316 // highest topological index. 1317 LiveOutRegsNumUsages.resize(Blocks.size()); 1318 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1319 SIScheduleBlock *Block = Blocks[i]; 1320 for (unsigned Reg : Block->getInRegs()) { 1321 bool Found = false; 1322 int topoInd = -1; 1323 for (SIScheduleBlock* Pred: Block->getPreds()) { 1324 std::set<unsigned> PredOutRegs = Pred->getOutRegs(); 1325 std::set<unsigned>::iterator RegPos = PredOutRegs.find(Reg); 1326 1327 if (RegPos != PredOutRegs.end()) { 1328 Found = true; 1329 if (topoInd < BlocksStruct.TopDownBlock2Index[Pred->getID()]) { 1330 topoInd = BlocksStruct.TopDownBlock2Index[Pred->getID()]; 1331 } 1332 } 1333 } 1334 1335 if (!Found) 1336 continue; 1337 1338 int PredID = BlocksStruct.TopDownIndex2Block[topoInd]; 1339 std::map<unsigned, unsigned>::iterator RegPos = 1340 LiveOutRegsNumUsages[PredID].find(Reg); 1341 if (RegPos != LiveOutRegsNumUsages[PredID].end()) { 1342 ++LiveOutRegsNumUsages[PredID][Reg]; 1343 } else { 1344 LiveOutRegsNumUsages[PredID][Reg] = 1; 1345 } 1346 } 1347 } 1348 1349 LastPosHighLatencyParentScheduled.resize(Blocks.size(), 0); 1350 BlockNumPredsLeft.resize(Blocks.size()); 1351 BlockNumSuccsLeft.resize(Blocks.size()); 1352 1353 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1354 SIScheduleBlock *Block = Blocks[i]; 1355 BlockNumPredsLeft[i] = Block->getPreds().size(); 1356 BlockNumSuccsLeft[i] = Block->getSuccs().size(); 1357 } 1358 1359 #ifndef NDEBUG 1360 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1361 SIScheduleBlock *Block = Blocks[i]; 1362 assert(Block->getID() == i); 1363 } 1364 #endif 1365 1366 std::set<unsigned> InRegs = DAG->getInRegs(); 1367 addLiveRegs(InRegs); 1368 1369 // Fill LiveRegsConsumers for regs that were already 1370 // defined before scheduling. 1371 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1372 SIScheduleBlock *Block = Blocks[i]; 1373 for (unsigned Reg : Block->getInRegs()) { 1374 bool Found = false; 1375 for (SIScheduleBlock* Pred: Block->getPreds()) { 1376 std::set<unsigned> PredOutRegs = Pred->getOutRegs(); 1377 std::set<unsigned>::iterator RegPos = PredOutRegs.find(Reg); 1378 1379 if (RegPos != PredOutRegs.end()) { 1380 Found = true; 1381 break; 1382 } 1383 } 1384 1385 if (!Found) { 1386 if (LiveRegsConsumers.find(Reg) == LiveRegsConsumers.end()) 1387 LiveRegsConsumers[Reg] = 1; 1388 else 1389 ++LiveRegsConsumers[Reg]; 1390 } 1391 } 1392 } 1393 1394 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1395 SIScheduleBlock *Block = Blocks[i]; 1396 if (BlockNumPredsLeft[i] == 0) { 1397 ReadyBlocks.push_back(Block); 1398 } 1399 } 1400 1401 while (SIScheduleBlock *Block = pickBlock()) { 1402 BlocksScheduled.push_back(Block); 1403 blockScheduled(Block); 1404 } 1405 1406 DEBUG( 1407 dbgs() << "Block Order:"; 1408 for (SIScheduleBlock* Block : BlocksScheduled) { 1409 dbgs() << ' ' << Block->getID(); 1410 } 1411 ); 1412 } 1413 1414 bool SIScheduleBlockScheduler::tryCandidateLatency(SIBlockSchedCandidate &Cand, 1415 SIBlockSchedCandidate &TryCand) { 1416 if (!Cand.isValid()) { 1417 TryCand.Reason = NodeOrder; 1418 return true; 1419 } 1420 1421 // Try to hide high latencies. 1422 if (tryLess(TryCand.LastPosHighLatParentScheduled, 1423 Cand.LastPosHighLatParentScheduled, TryCand, Cand, Latency)) 1424 return true; 1425 // Schedule high latencies early so you can hide them better. 1426 if (tryGreater(TryCand.IsHighLatency, Cand.IsHighLatency, 1427 TryCand, Cand, Latency)) 1428 return true; 1429 if (TryCand.IsHighLatency && tryGreater(TryCand.Height, Cand.Height, 1430 TryCand, Cand, Depth)) 1431 return true; 1432 if (tryGreater(TryCand.NumHighLatencySuccessors, 1433 Cand.NumHighLatencySuccessors, 1434 TryCand, Cand, Successor)) 1435 return true; 1436 return false; 1437 } 1438 1439 bool SIScheduleBlockScheduler::tryCandidateRegUsage(SIBlockSchedCandidate &Cand, 1440 SIBlockSchedCandidate &TryCand) { 1441 if (!Cand.isValid()) { 1442 TryCand.Reason = NodeOrder; 1443 return true; 1444 } 1445 1446 if (tryLess(TryCand.VGPRUsageDiff > 0, Cand.VGPRUsageDiff > 0, 1447 TryCand, Cand, RegUsage)) 1448 return true; 1449 if (tryGreater(TryCand.NumSuccessors > 0, 1450 Cand.NumSuccessors > 0, 1451 TryCand, Cand, Successor)) 1452 return true; 1453 if (tryGreater(TryCand.Height, Cand.Height, TryCand, Cand, Depth)) 1454 return true; 1455 if (tryLess(TryCand.VGPRUsageDiff, Cand.VGPRUsageDiff, 1456 TryCand, Cand, RegUsage)) 1457 return true; 1458 return false; 1459 } 1460 1461 SIScheduleBlock *SIScheduleBlockScheduler::pickBlock() { 1462 SIBlockSchedCandidate Cand; 1463 std::vector<SIScheduleBlock*>::iterator Best; 1464 SIScheduleBlock *Block; 1465 if (ReadyBlocks.empty()) 1466 return nullptr; 1467 1468 DAG->fillVgprSgprCost(LiveRegs.begin(), LiveRegs.end(), 1469 VregCurrentUsage, SregCurrentUsage); 1470 if (VregCurrentUsage > maxVregUsage) 1471 maxVregUsage = VregCurrentUsage; 1472 if (VregCurrentUsage > maxSregUsage) 1473 maxSregUsage = VregCurrentUsage; 1474 DEBUG( 1475 dbgs() << "Picking New Blocks\n"; 1476 dbgs() << "Available: "; 1477 for (SIScheduleBlock* Block : ReadyBlocks) 1478 dbgs() << Block->getID() << ' '; 1479 dbgs() << "\nCurrent Live:\n"; 1480 for (unsigned Reg : LiveRegs) 1481 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' '; 1482 dbgs() << '\n'; 1483 dbgs() << "Current VGPRs: " << VregCurrentUsage << '\n'; 1484 dbgs() << "Current SGPRs: " << SregCurrentUsage << '\n'; 1485 ); 1486 1487 Cand.Block = nullptr; 1488 for (std::vector<SIScheduleBlock*>::iterator I = ReadyBlocks.begin(), 1489 E = ReadyBlocks.end(); I != E; ++I) { 1490 SIBlockSchedCandidate TryCand; 1491 TryCand.Block = *I; 1492 TryCand.IsHighLatency = TryCand.Block->isHighLatencyBlock(); 1493 TryCand.VGPRUsageDiff = 1494 checkRegUsageImpact(TryCand.Block->getInRegs(), 1495 TryCand.Block->getOutRegs())[DAG->getVGPRSetID()]; 1496 TryCand.NumSuccessors = TryCand.Block->getSuccs().size(); 1497 TryCand.NumHighLatencySuccessors = 1498 TryCand.Block->getNumHighLatencySuccessors(); 1499 TryCand.LastPosHighLatParentScheduled = 1500 (unsigned int) std::max<int> (0, 1501 LastPosHighLatencyParentScheduled[TryCand.Block->getID()] - 1502 LastPosWaitedHighLatency); 1503 TryCand.Height = TryCand.Block->Height; 1504 // Try not to increase VGPR usage too much, else we may spill. 1505 if (VregCurrentUsage > 120 || 1506 Variant != SISchedulerBlockSchedulerVariant::BlockLatencyRegUsage) { 1507 if (!tryCandidateRegUsage(Cand, TryCand) && 1508 Variant != SISchedulerBlockSchedulerVariant::BlockRegUsage) 1509 tryCandidateLatency(Cand, TryCand); 1510 } else { 1511 if (!tryCandidateLatency(Cand, TryCand)) 1512 tryCandidateRegUsage(Cand, TryCand); 1513 } 1514 if (TryCand.Reason != NoCand) { 1515 Cand.setBest(TryCand); 1516 Best = I; 1517 DEBUG(dbgs() << "Best Current Choice: " << Cand.Block->getID() << ' ' 1518 << getReasonStr(Cand.Reason) << '\n'); 1519 } 1520 } 1521 1522 DEBUG( 1523 dbgs() << "Picking: " << Cand.Block->getID() << '\n'; 1524 dbgs() << "Is a block with high latency instruction: " 1525 << (Cand.IsHighLatency ? "yes\n" : "no\n"); 1526 dbgs() << "Position of last high latency dependency: " 1527 << Cand.LastPosHighLatParentScheduled << '\n'; 1528 dbgs() << "VGPRUsageDiff: " << Cand.VGPRUsageDiff << '\n'; 1529 dbgs() << '\n'; 1530 ); 1531 1532 Block = Cand.Block; 1533 ReadyBlocks.erase(Best); 1534 return Block; 1535 } 1536 1537 // Tracking of currently alive registers to determine VGPR Usage. 1538 1539 void SIScheduleBlockScheduler::addLiveRegs(std::set<unsigned> &Regs) { 1540 for (unsigned Reg : Regs) { 1541 // For now only track virtual registers. 1542 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1543 continue; 1544 // If not already in the live set, then add it. 1545 (void) LiveRegs.insert(Reg); 1546 } 1547 } 1548 1549 void SIScheduleBlockScheduler::decreaseLiveRegs(SIScheduleBlock *Block, 1550 std::set<unsigned> &Regs) { 1551 for (unsigned Reg : Regs) { 1552 // For now only track virtual registers. 1553 std::set<unsigned>::iterator Pos = LiveRegs.find(Reg); 1554 assert (Pos != LiveRegs.end() && // Reg must be live. 1555 LiveRegsConsumers.find(Reg) != LiveRegsConsumers.end() && 1556 LiveRegsConsumers[Reg] >= 1); 1557 --LiveRegsConsumers[Reg]; 1558 if (LiveRegsConsumers[Reg] == 0) 1559 LiveRegs.erase(Pos); 1560 } 1561 } 1562 1563 void SIScheduleBlockScheduler::releaseBlockSuccs(SIScheduleBlock *Parent) { 1564 for (SIScheduleBlock* Block : Parent->getSuccs()) { 1565 --BlockNumPredsLeft[Block->getID()]; 1566 if (BlockNumPredsLeft[Block->getID()] == 0) { 1567 ReadyBlocks.push_back(Block); 1568 } 1569 // TODO: Improve check. When the dependency between the high latency 1570 // instructions and the instructions of the other blocks are WAR or WAW 1571 // there will be no wait triggered. We would like these cases to not 1572 // update LastPosHighLatencyParentScheduled. 1573 if (Parent->isHighLatencyBlock()) 1574 LastPosHighLatencyParentScheduled[Block->getID()] = NumBlockScheduled; 1575 } 1576 } 1577 1578 void SIScheduleBlockScheduler::blockScheduled(SIScheduleBlock *Block) { 1579 decreaseLiveRegs(Block, Block->getInRegs()); 1580 addLiveRegs(Block->getOutRegs()); 1581 releaseBlockSuccs(Block); 1582 for (std::map<unsigned, unsigned>::iterator RegI = 1583 LiveOutRegsNumUsages[Block->getID()].begin(), 1584 E = LiveOutRegsNumUsages[Block->getID()].end(); RegI != E; ++RegI) { 1585 std::pair<unsigned, unsigned> RegP = *RegI; 1586 if (LiveRegsConsumers.find(RegP.first) == LiveRegsConsumers.end()) 1587 LiveRegsConsumers[RegP.first] = RegP.second; 1588 else { 1589 assert(LiveRegsConsumers[RegP.first] == 0); 1590 LiveRegsConsumers[RegP.first] += RegP.second; 1591 } 1592 } 1593 if (LastPosHighLatencyParentScheduled[Block->getID()] > 1594 (unsigned)LastPosWaitedHighLatency) 1595 LastPosWaitedHighLatency = 1596 LastPosHighLatencyParentScheduled[Block->getID()]; 1597 ++NumBlockScheduled; 1598 } 1599 1600 std::vector<int> 1601 SIScheduleBlockScheduler::checkRegUsageImpact(std::set<unsigned> &InRegs, 1602 std::set<unsigned> &OutRegs) { 1603 std::vector<int> DiffSetPressure; 1604 DiffSetPressure.assign(DAG->getTRI()->getNumRegPressureSets(), 0); 1605 1606 for (unsigned Reg : InRegs) { 1607 // For now only track virtual registers. 1608 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1609 continue; 1610 if (LiveRegsConsumers[Reg] > 1) 1611 continue; 1612 PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg); 1613 for (; PSetI.isValid(); ++PSetI) { 1614 DiffSetPressure[*PSetI] -= PSetI.getWeight(); 1615 } 1616 } 1617 1618 for (unsigned Reg : OutRegs) { 1619 // For now only track virtual registers. 1620 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1621 continue; 1622 PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg); 1623 for (; PSetI.isValid(); ++PSetI) { 1624 DiffSetPressure[*PSetI] += PSetI.getWeight(); 1625 } 1626 } 1627 1628 return DiffSetPressure; 1629 } 1630 1631 // SIScheduler // 1632 1633 struct SIScheduleBlockResult 1634 SIScheduler::scheduleVariant(SISchedulerBlockCreatorVariant BlockVariant, 1635 SISchedulerBlockSchedulerVariant ScheduleVariant) { 1636 SIScheduleBlocks Blocks = BlockCreator.getBlocks(BlockVariant); 1637 SIScheduleBlockScheduler Scheduler(DAG, ScheduleVariant, Blocks); 1638 std::vector<SIScheduleBlock*> ScheduledBlocks; 1639 struct SIScheduleBlockResult Res; 1640 1641 ScheduledBlocks = Scheduler.getBlocks(); 1642 1643 for (unsigned b = 0; b < ScheduledBlocks.size(); ++b) { 1644 SIScheduleBlock *Block = ScheduledBlocks[b]; 1645 std::vector<SUnit*> SUs = Block->getScheduledUnits(); 1646 1647 for (SUnit* SU : SUs) 1648 Res.SUs.push_back(SU->NodeNum); 1649 } 1650 1651 Res.MaxSGPRUsage = Scheduler.getSGPRUsage(); 1652 Res.MaxVGPRUsage = Scheduler.getVGPRUsage(); 1653 return Res; 1654 } 1655 1656 // SIScheduleDAGMI // 1657 1658 SIScheduleDAGMI::SIScheduleDAGMI(MachineSchedContext *C) : 1659 ScheduleDAGMILive(C, make_unique<GenericScheduler>(C)) { 1660 SITII = static_cast<const SIInstrInfo*>(TII); 1661 SITRI = static_cast<const SIRegisterInfo*>(TRI); 1662 1663 VGPRSetID = SITRI->getVGPR32PressureSet(); 1664 SGPRSetID = SITRI->getSGPR32PressureSet(); 1665 } 1666 1667 SIScheduleDAGMI::~SIScheduleDAGMI() { 1668 } 1669 1670 ScheduleDAGInstrs *llvm::createSIMachineScheduler(MachineSchedContext *C) { 1671 return new SIScheduleDAGMI(C); 1672 } 1673 1674 // Code adapted from scheduleDAG.cpp 1675 // Does a topological sort over the SUs. 1676 // Both TopDown and BottomUp 1677 void SIScheduleDAGMI::topologicalSort() { 1678 std::vector<int> TopDownSU2Index; 1679 unsigned DAGSize = SUnits.size(); 1680 std::vector<SUnit*> WorkList; 1681 1682 DEBUG(dbgs() << "Topological Sort\n"); 1683 WorkList.reserve(DAGSize); 1684 1685 TopDownIndex2SU.resize(DAGSize); 1686 TopDownSU2Index.resize(DAGSize); 1687 BottomUpIndex2SU.resize(DAGSize); 1688 1689 WorkList.push_back(&getExitSU()); 1690 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1691 SUnit *SU = &SUnits[i]; 1692 int NodeNum = SU->NodeNum; 1693 unsigned Degree = SU->Succs.size(); 1694 TopDownSU2Index[NodeNum] = Degree; 1695 if (Degree == 0) { 1696 assert(SU->Succs.empty() && "SUnit should have no successors"); 1697 WorkList.push_back(SU); 1698 } 1699 } 1700 1701 int Id = DAGSize; 1702 while (!WorkList.empty()) { 1703 SUnit *SU = WorkList.back(); 1704 WorkList.pop_back(); 1705 if (SU->NodeNum < DAGSize) { 1706 TopDownSU2Index[SU->NodeNum] = --Id; 1707 TopDownIndex2SU[Id] = SU->NodeNum; 1708 } 1709 for (SDep& Pred : SU->Preds) { 1710 SUnit *SU = Pred.getSUnit(); 1711 if (SU->NodeNum < DAGSize && !--TopDownSU2Index[SU->NodeNum]) 1712 WorkList.push_back(SU); 1713 } 1714 } 1715 1716 BottomUpIndex2SU = std::vector<int>(TopDownIndex2SU.rbegin(), 1717 TopDownIndex2SU.rend()); 1718 1719 #ifndef NDEBUG 1720 // Check correctness of the ordering 1721 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1722 SUnit *SU = &SUnits[i]; 1723 for (SDep& Pred : SU->Preds) { 1724 if (Pred.getSUnit()->NodeNum >= DAGSize) 1725 continue; 1726 assert(TopDownSU2Index[SU->NodeNum] > 1727 TopDownSU2Index[Pred.getSUnit()->NodeNum] && 1728 "Wrong Top Down topological sorting"); 1729 } 1730 } 1731 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1732 SUnit *SU = &SUnits[i]; 1733 for (SDep& Succ : SU->Succs) { 1734 if (Succ.getSUnit()->NodeNum >= DAGSize) 1735 continue; 1736 assert(TopDownSU2Index[SU->NodeNum] < 1737 TopDownSU2Index[Succ.getSUnit()->NodeNum] && 1738 "Wrong Bottom Up topological sorting"); 1739 } 1740 } 1741 #endif 1742 } 1743 1744 // Move low latencies further from their user without 1745 // increasing SGPR usage (in general) 1746 // This is to be replaced by a better pass that would 1747 // take into account SGPR usage (based on VGPR Usage 1748 // and the corresponding wavefront count), that would 1749 // try to merge groups of loads if it make sense, etc 1750 void SIScheduleDAGMI::moveLowLatencies() { 1751 unsigned DAGSize = SUnits.size(); 1752 int LastLowLatencyUser = -1; 1753 int LastLowLatencyPos = -1; 1754 1755 for (unsigned i = 0, e = ScheduledSUnits.size(); i != e; ++i) { 1756 SUnit *SU = &SUnits[ScheduledSUnits[i]]; 1757 bool IsLowLatencyUser = false; 1758 unsigned MinPos = 0; 1759 1760 for (SDep& PredDep : SU->Preds) { 1761 SUnit *Pred = PredDep.getSUnit(); 1762 if (SITII->isLowLatencyInstruction(Pred->getInstr())) { 1763 IsLowLatencyUser = true; 1764 } 1765 if (Pred->NodeNum >= DAGSize) 1766 continue; 1767 unsigned PredPos = ScheduledSUnitsInv[Pred->NodeNum]; 1768 if (PredPos >= MinPos) 1769 MinPos = PredPos + 1; 1770 } 1771 1772 if (SITII->isLowLatencyInstruction(SU->getInstr())) { 1773 unsigned BestPos = LastLowLatencyUser + 1; 1774 if ((int)BestPos <= LastLowLatencyPos) 1775 BestPos = LastLowLatencyPos + 1; 1776 if (BestPos < MinPos) 1777 BestPos = MinPos; 1778 if (BestPos < i) { 1779 for (unsigned u = i; u > BestPos; --u) { 1780 ++ScheduledSUnitsInv[ScheduledSUnits[u-1]]; 1781 ScheduledSUnits[u] = ScheduledSUnits[u-1]; 1782 } 1783 ScheduledSUnits[BestPos] = SU->NodeNum; 1784 ScheduledSUnitsInv[SU->NodeNum] = BestPos; 1785 } 1786 LastLowLatencyPos = BestPos; 1787 if (IsLowLatencyUser) 1788 LastLowLatencyUser = BestPos; 1789 } else if (IsLowLatencyUser) { 1790 LastLowLatencyUser = i; 1791 // Moves COPY instructions on which depends 1792 // the low latency instructions too. 1793 } else if (SU->getInstr()->getOpcode() == AMDGPU::COPY) { 1794 bool CopyForLowLat = false; 1795 for (SDep& SuccDep : SU->Succs) { 1796 SUnit *Succ = SuccDep.getSUnit(); 1797 if (SITII->isLowLatencyInstruction(Succ->getInstr())) { 1798 CopyForLowLat = true; 1799 } 1800 } 1801 if (!CopyForLowLat) 1802 continue; 1803 if (MinPos < i) { 1804 for (unsigned u = i; u > MinPos; --u) { 1805 ++ScheduledSUnitsInv[ScheduledSUnits[u-1]]; 1806 ScheduledSUnits[u] = ScheduledSUnits[u-1]; 1807 } 1808 ScheduledSUnits[MinPos] = SU->NodeNum; 1809 ScheduledSUnitsInv[SU->NodeNum] = MinPos; 1810 } 1811 } 1812 } 1813 } 1814 1815 void SIScheduleDAGMI::restoreSULinksLeft() { 1816 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 1817 SUnits[i].isScheduled = false; 1818 SUnits[i].WeakPredsLeft = SUnitsLinksBackup[i].WeakPredsLeft; 1819 SUnits[i].NumPredsLeft = SUnitsLinksBackup[i].NumPredsLeft; 1820 SUnits[i].WeakSuccsLeft = SUnitsLinksBackup[i].WeakSuccsLeft; 1821 SUnits[i].NumSuccsLeft = SUnitsLinksBackup[i].NumSuccsLeft; 1822 } 1823 } 1824 1825 // Return the Vgpr and Sgpr usage corresponding to some virtual registers. 1826 template<typename _Iterator> void 1827 SIScheduleDAGMI::fillVgprSgprCost(_Iterator First, _Iterator End, 1828 unsigned &VgprUsage, unsigned &SgprUsage) { 1829 VgprUsage = 0; 1830 SgprUsage = 0; 1831 for (_Iterator RegI = First; RegI != End; ++RegI) { 1832 unsigned Reg = *RegI; 1833 // For now only track virtual registers 1834 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1835 continue; 1836 PSetIterator PSetI = MRI.getPressureSets(Reg); 1837 for (; PSetI.isValid(); ++PSetI) { 1838 if (*PSetI == VGPRSetID) 1839 VgprUsage += PSetI.getWeight(); 1840 else if (*PSetI == SGPRSetID) 1841 SgprUsage += PSetI.getWeight(); 1842 } 1843 } 1844 } 1845 1846 void SIScheduleDAGMI::schedule() 1847 { 1848 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1849 SIScheduleBlockResult Best, Temp; 1850 DEBUG(dbgs() << "Preparing Scheduling\n"); 1851 1852 buildDAGWithRegPressure(); 1853 DEBUG( 1854 for(SUnit& SU : SUnits) 1855 SU.dumpAll(this) 1856 ); 1857 1858 Topo.InitDAGTopologicalSorting(); 1859 topologicalSort(); 1860 findRootsAndBiasEdges(TopRoots, BotRoots); 1861 // We reuse several ScheduleDAGMI and ScheduleDAGMILive 1862 // functions, but to make them happy we must initialize 1863 // the default Scheduler implementation (even if we do not 1864 // run it) 1865 SchedImpl->initialize(this); 1866 initQueues(TopRoots, BotRoots); 1867 1868 // Fill some stats to help scheduling. 1869 1870 SUnitsLinksBackup = SUnits; 1871 IsLowLatencySU.clear(); 1872 LowLatencyOffset.clear(); 1873 IsHighLatencySU.clear(); 1874 1875 IsLowLatencySU.resize(SUnits.size(), 0); 1876 LowLatencyOffset.resize(SUnits.size(), 0); 1877 IsHighLatencySU.resize(SUnits.size(), 0); 1878 1879 for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) { 1880 SUnit *SU = &SUnits[i]; 1881 unsigned BaseLatReg, OffLatReg; 1882 if (SITII->isLowLatencyInstruction(SU->getInstr())) { 1883 IsLowLatencySU[i] = 1; 1884 if (SITII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseLatReg, 1885 OffLatReg, TRI)) 1886 LowLatencyOffset[i] = OffLatReg; 1887 } else if (SITII->isHighLatencyInstruction(SU->getInstr())) 1888 IsHighLatencySU[i] = 1; 1889 } 1890 1891 SIScheduler Scheduler(this); 1892 Best = Scheduler.scheduleVariant(SISchedulerBlockCreatorVariant::LatenciesAlone, 1893 SISchedulerBlockSchedulerVariant::BlockLatencyRegUsage); 1894 #if 0 // To enable when handleMove fix lands 1895 // if VGPR usage is extremely high, try other good performing variants 1896 // which could lead to lower VGPR usage 1897 if (Best.MaxVGPRUsage > 180) { 1898 std::vector<std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant>> Variants = { 1899 { LatenciesAlone, BlockRegUsageLatency }, 1900 // { LatenciesAlone, BlockRegUsage }, 1901 { LatenciesGrouped, BlockLatencyRegUsage }, 1902 // { LatenciesGrouped, BlockRegUsageLatency }, 1903 // { LatenciesGrouped, BlockRegUsage }, 1904 { LatenciesAlonePlusConsecutive, BlockLatencyRegUsage }, 1905 // { LatenciesAlonePlusConsecutive, BlockRegUsageLatency }, 1906 // { LatenciesAlonePlusConsecutive, BlockRegUsage } 1907 }; 1908 for (std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant> v : Variants) { 1909 Temp = Scheduler.scheduleVariant(v.first, v.second); 1910 if (Temp.MaxVGPRUsage < Best.MaxVGPRUsage) 1911 Best = Temp; 1912 } 1913 } 1914 // if VGPR usage is still extremely high, we may spill. Try other variants 1915 // which are less performing, but that could lead to lower VGPR usage. 1916 if (Best.MaxVGPRUsage > 200) { 1917 std::vector<std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant>> Variants = { 1918 // { LatenciesAlone, BlockRegUsageLatency }, 1919 { LatenciesAlone, BlockRegUsage }, 1920 // { LatenciesGrouped, BlockLatencyRegUsage }, 1921 { LatenciesGrouped, BlockRegUsageLatency }, 1922 { LatenciesGrouped, BlockRegUsage }, 1923 // { LatenciesAlonePlusConsecutive, BlockLatencyRegUsage }, 1924 { LatenciesAlonePlusConsecutive, BlockRegUsageLatency }, 1925 { LatenciesAlonePlusConsecutive, BlockRegUsage } 1926 }; 1927 for (std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant> v : Variants) { 1928 Temp = Scheduler.scheduleVariant(v.first, v.second); 1929 if (Temp.MaxVGPRUsage < Best.MaxVGPRUsage) 1930 Best = Temp; 1931 } 1932 } 1933 #endif 1934 ScheduledSUnits = Best.SUs; 1935 ScheduledSUnitsInv.resize(SUnits.size()); 1936 1937 for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) { 1938 ScheduledSUnitsInv[ScheduledSUnits[i]] = i; 1939 } 1940 1941 moveLowLatencies(); 1942 1943 // Tell the outside world about the result of the scheduling. 1944 1945 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1946 TopRPTracker.setPos(CurrentTop); 1947 1948 for (std::vector<unsigned>::iterator I = ScheduledSUnits.begin(), 1949 E = ScheduledSUnits.end(); I != E; ++I) { 1950 SUnit *SU = &SUnits[*I]; 1951 1952 scheduleMI(SU, true); 1953 1954 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 1955 << *SU->getInstr()); 1956 } 1957 1958 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1959 1960 placeDebugValues(); 1961 1962 DEBUG({ 1963 unsigned BBNum = begin()->getParent()->getNumber(); 1964 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 1965 dumpSchedule(); 1966 dbgs() << '\n'; 1967 }); 1968 } 1969