1 //===-- SIMachineScheduler.cpp - SI Scheduler Interface -*- C++ -*-----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief SI Machine Scheduler interface 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPU.h" 16 #include "SIMachineScheduler.h" 17 #include "llvm/CodeGen/LiveInterval.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/MachineScheduler.h" 21 #include "llvm/CodeGen/RegisterPressure.h" 22 23 using namespace llvm; 24 25 #define DEBUG_TYPE "misched" 26 27 // This scheduler implements a different scheduling algorithm than 28 // GenericScheduler. 29 // 30 // There are several specific architecture behaviours that can't be modelled 31 // for GenericScheduler: 32 // . When accessing the result of an SGPR load instruction, you have to wait 33 // for all the SGPR load instructions before your current instruction to 34 // have finished. 35 // . When accessing the result of an VGPR load instruction, you have to wait 36 // for all the VGPR load instructions previous to the VGPR load instruction 37 // you are interested in to finish. 38 // . The less the register pressure, the best load latencies are hidden 39 // 40 // Moreover some specifities (like the fact a lot of instructions in the shader 41 // have few dependencies) makes the generic scheduler have some unpredictable 42 // behaviours. For example when register pressure becomes high, it can either 43 // manage to prevent register pressure from going too high, or it can 44 // increase register pressure even more than if it hadn't taken register 45 // pressure into account. 46 // 47 // Also some other bad behaviours are generated, like loading at the beginning 48 // of the shader a constant in VGPR you won't need until the end of the shader. 49 // 50 // The scheduling problem for SI can distinguish three main parts: 51 // . Hiding high latencies (texture sampling, etc) 52 // . Hiding low latencies (SGPR constant loading, etc) 53 // . Keeping register usage low for better latency hiding and general 54 // performance 55 // 56 // Some other things can also affect performance, but are hard to predict 57 // (cache usage, the fact the HW can issue several instructions from different 58 // wavefronts if different types, etc) 59 // 60 // This scheduler tries to solve the scheduling problem by dividing it into 61 // simpler sub-problems. It divides the instructions into blocks, schedules 62 // locally inside the blocks where it takes care of low latencies, and then 63 // chooses the order of the blocks by taking care of high latencies. 64 // Dividing the instructions into blocks helps control keeping register 65 // usage low. 66 // 67 // First the instructions are put into blocks. 68 // We want the blocks help control register usage and hide high latencies 69 // later. To help control register usage, we typically want all local 70 // computations, when for example you create a result that can be comsummed 71 // right away, to be contained in a block. Block inputs and outputs would 72 // typically be important results that are needed in several locations of 73 // the shader. Since we do want blocks to help hide high latencies, we want 74 // the instructions inside the block to have a minimal set of dependencies 75 // on high latencies. It will make it easy to pick blocks to hide specific 76 // high latencies. 77 // The block creation algorithm is divided into several steps, and several 78 // variants can be tried during the scheduling process. 79 // 80 // Second the order of the instructions inside the blocks is choosen. 81 // At that step we do take into account only register usage and hiding 82 // low latency instructions 83 // 84 // Third the block order is choosen, there we try to hide high latencies 85 // and keep register usage low. 86 // 87 // After the third step, a pass is done to improve the hiding of low 88 // latencies. 89 // 90 // Actually when talking about 'low latency' or 'high latency' it includes 91 // both the latency to get the cache (or global mem) data go to the register, 92 // and the bandwith limitations. 93 // Increasing the number of active wavefronts helps hide the former, but it 94 // doesn't solve the latter, thus why even if wavefront count is high, we have 95 // to try have as many instructions hiding high latencies as possible. 96 // The OpenCL doc says for example latency of 400 cycles for a global mem access, 97 // which is hidden by 10 instructions if the wavefront count is 10. 98 99 // Some figures taken from AMD docs: 100 // Both texture and constant L1 caches are 4-way associative with 64 bytes 101 // lines. 102 // Constant cache is shared with 4 CUs. 103 // For texture sampling, the address generation unit receives 4 texture 104 // addresses per cycle, thus we could expect texture sampling latency to be 105 // equivalent to 4 instructions in the very best case (a VGPR is 64 work items, 106 // instructions in a wavefront group are executed every 4 cycles), 107 // or 16 instructions if the other wavefronts associated to the 3 other VALUs 108 // of the CU do texture sampling too. (Don't take these figures too seriously, 109 // as I'm not 100% sure of the computation) 110 // Data exports should get similar latency. 111 // For constant loading, the cache is shader with 4 CUs. 112 // The doc says "a throughput of 16B/cycle for each of the 4 Compute Unit" 113 // I guess if the other CU don't read the cache, it can go up to 64B/cycle. 114 // It means a simple s_buffer_load should take one instruction to hide, as 115 // well as a s_buffer_loadx2 and potentially a s_buffer_loadx8 if on the same 116 // cache line. 117 // 118 // As of today the driver doesn't preload the constants in cache, thus the 119 // first loads get extra latency. The doc says global memory access can be 120 // 300-600 cycles. We do not specially take that into account when scheduling 121 // As we expect the driver to be able to preload the constants soon. 122 123 124 // common code // 125 126 #ifndef NDEBUG 127 128 static const char *getReasonStr(SIScheduleCandReason Reason) { 129 switch (Reason) { 130 case NoCand: return "NOCAND"; 131 case RegUsage: return "REGUSAGE"; 132 case Latency: return "LATENCY"; 133 case Successor: return "SUCCESSOR"; 134 case Depth: return "DEPTH"; 135 case NodeOrder: return "ORDER"; 136 } 137 llvm_unreachable("Unknown reason!"); 138 } 139 140 #endif 141 142 static bool tryLess(int TryVal, int CandVal, 143 SISchedulerCandidate &TryCand, 144 SISchedulerCandidate &Cand, 145 SIScheduleCandReason Reason) { 146 if (TryVal < CandVal) { 147 TryCand.Reason = Reason; 148 return true; 149 } 150 if (TryVal > CandVal) { 151 if (Cand.Reason > Reason) 152 Cand.Reason = Reason; 153 return true; 154 } 155 Cand.setRepeat(Reason); 156 return false; 157 } 158 159 static bool tryGreater(int TryVal, int CandVal, 160 SISchedulerCandidate &TryCand, 161 SISchedulerCandidate &Cand, 162 SIScheduleCandReason Reason) { 163 if (TryVal > CandVal) { 164 TryCand.Reason = Reason; 165 return true; 166 } 167 if (TryVal < CandVal) { 168 if (Cand.Reason > Reason) 169 Cand.Reason = Reason; 170 return true; 171 } 172 Cand.setRepeat(Reason); 173 return false; 174 } 175 176 // SIScheduleBlock // 177 178 void SIScheduleBlock::addUnit(SUnit *SU) { 179 NodeNum2Index[SU->NodeNum] = SUnits.size(); 180 SUnits.push_back(SU); 181 } 182 183 #ifndef NDEBUG 184 185 void SIScheduleBlock::traceCandidate(const SISchedCandidate &Cand) { 186 187 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason); 188 dbgs() << '\n'; 189 } 190 #endif 191 192 void SIScheduleBlock::tryCandidateTopDown(SISchedCandidate &Cand, 193 SISchedCandidate &TryCand) { 194 // Initialize the candidate if needed. 195 if (!Cand.isValid()) { 196 TryCand.Reason = NodeOrder; 197 return; 198 } 199 200 if (Cand.SGPRUsage > 60 && 201 tryLess(TryCand.SGPRUsage, Cand.SGPRUsage, TryCand, Cand, RegUsage)) 202 return; 203 204 // Schedule low latency instructions as top as possible. 205 // Order of priority is: 206 // . Low latency instructions which do not depend on other low latency 207 // instructions we haven't waited for 208 // . Other instructions which do not depend on low latency instructions 209 // we haven't waited for 210 // . Low latencies 211 // . All other instructions 212 // Goal is to get: low latency instructions - independant instructions 213 // - (eventually some more low latency instructions) 214 // - instructions that depend on the first low latency instructions. 215 // If in the block there is a lot of constant loads, the SGPR usage 216 // could go quite high, thus above the arbitrary limit of 60 will encourage 217 // use the already loaded constants (in order to release some SGPRs) before 218 // loading more. 219 if (tryLess(TryCand.HasLowLatencyNonWaitedParent, 220 Cand.HasLowLatencyNonWaitedParent, 221 TryCand, Cand, SIScheduleCandReason::Depth)) 222 return; 223 224 if (tryGreater(TryCand.IsLowLatency, Cand.IsLowLatency, 225 TryCand, Cand, SIScheduleCandReason::Depth)) 226 return; 227 228 if (TryCand.IsLowLatency && 229 tryLess(TryCand.LowLatencyOffset, Cand.LowLatencyOffset, 230 TryCand, Cand, SIScheduleCandReason::Depth)) 231 return; 232 233 if (tryLess(TryCand.VGPRUsage, Cand.VGPRUsage, TryCand, Cand, RegUsage)) 234 return; 235 236 // Fall through to original instruction order. 237 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) { 238 TryCand.Reason = NodeOrder; 239 } 240 } 241 242 SUnit* SIScheduleBlock::pickNode() { 243 SISchedCandidate TopCand; 244 245 for (SUnit* SU : TopReadySUs) { 246 SISchedCandidate TryCand; 247 std::vector<unsigned> pressure; 248 std::vector<unsigned> MaxPressure; 249 // Predict register usage after this instruction. 250 TryCand.SU = SU; 251 TopRPTracker.getDownwardPressure(SU->getInstr(), pressure, MaxPressure); 252 TryCand.SGPRUsage = pressure[DAG->getSGPRSetID()]; 253 TryCand.VGPRUsage = pressure[DAG->getVGPRSetID()]; 254 TryCand.IsLowLatency = DAG->IsLowLatencySU[SU->NodeNum]; 255 TryCand.LowLatencyOffset = DAG->LowLatencyOffset[SU->NodeNum]; 256 TryCand.HasLowLatencyNonWaitedParent = 257 HasLowLatencyNonWaitedParent[NodeNum2Index[SU->NodeNum]]; 258 tryCandidateTopDown(TopCand, TryCand); 259 if (TryCand.Reason != NoCand) 260 TopCand.setBest(TryCand); 261 } 262 263 return TopCand.SU; 264 } 265 266 267 // Schedule something valid. 268 void SIScheduleBlock::fastSchedule() { 269 TopReadySUs.clear(); 270 if (Scheduled) 271 undoSchedule(); 272 273 for (SUnit* SU : SUnits) { 274 if (!SU->NumPredsLeft) 275 TopReadySUs.push_back(SU); 276 } 277 278 while (!TopReadySUs.empty()) { 279 SUnit *SU = TopReadySUs[0]; 280 ScheduledSUnits.push_back(SU); 281 nodeScheduled(SU); 282 } 283 284 Scheduled = true; 285 } 286 287 // Returns if the register was set between first and last. 288 static bool isDefBetween(unsigned Reg, 289 SlotIndex First, SlotIndex Last, 290 const MachineRegisterInfo *MRI, 291 const LiveIntervals *LIS) { 292 for (MachineRegisterInfo::def_instr_iterator 293 UI = MRI->def_instr_begin(Reg), 294 UE = MRI->def_instr_end(); UI != UE; ++UI) { 295 const MachineInstr* MI = &*UI; 296 if (MI->isDebugValue()) 297 continue; 298 SlotIndex InstSlot = LIS->getInstructionIndex(*MI).getRegSlot(); 299 if (InstSlot >= First && InstSlot <= Last) 300 return true; 301 } 302 return false; 303 } 304 305 void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock, 306 MachineBasicBlock::iterator EndBlock) { 307 IntervalPressure Pressure, BotPressure; 308 RegPressureTracker RPTracker(Pressure), BotRPTracker(BotPressure); 309 LiveIntervals *LIS = DAG->getLIS(); 310 MachineRegisterInfo *MRI = DAG->getMRI(); 311 DAG->initRPTracker(TopRPTracker); 312 DAG->initRPTracker(BotRPTracker); 313 DAG->initRPTracker(RPTracker); 314 315 // Goes though all SU. RPTracker captures what had to be alive for the SUs 316 // to execute, and what is still alive at the end. 317 for (SUnit* SU : ScheduledSUnits) { 318 RPTracker.setPos(SU->getInstr()); 319 RPTracker.advance(); 320 } 321 322 // Close the RPTracker to finalize live ins/outs. 323 RPTracker.closeRegion(); 324 325 // Initialize the live ins and live outs. 326 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 327 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 328 329 // Do not Track Physical Registers, because it messes up. 330 for (const auto &RegMaskPair : RPTracker.getPressure().LiveInRegs) { 331 if (TargetRegisterInfo::isVirtualRegister(RegMaskPair.RegUnit)) 332 LiveInRegs.insert(RegMaskPair.RegUnit); 333 } 334 LiveOutRegs.clear(); 335 // There is several possibilities to distinguish: 336 // 1) Reg is not input to any instruction in the block, but is output of one 337 // 2) 1) + read in the block and not needed after it 338 // 3) 1) + read in the block but needed in another block 339 // 4) Reg is input of an instruction but another block will read it too 340 // 5) Reg is input of an instruction and then rewritten in the block. 341 // result is not read in the block (implies used in another block) 342 // 6) Reg is input of an instruction and then rewritten in the block. 343 // result is read in the block and not needed in another block 344 // 7) Reg is input of an instruction and then rewritten in the block. 345 // result is read in the block but also needed in another block 346 // LiveInRegs will contains all the regs in situation 4, 5, 6, 7 347 // We want LiveOutRegs to contain only Regs whose content will be read after 348 // in another block, and whose content was written in the current block, 349 // that is we want it to get 1, 3, 5, 7 350 // Since we made the MIs of a block to be packed all together before 351 // scheduling, then the LiveIntervals were correct, and the RPTracker was 352 // able to correctly handle 5 vs 6, 2 vs 3. 353 // (Note: This is not sufficient for RPTracker to not do mistakes for case 4) 354 // The RPTracker's LiveOutRegs has 1, 3, (some correct or incorrect)4, 5, 7 355 // Comparing to LiveInRegs is not sufficient to differenciate 4 vs 5, 7 356 // The use of findDefBetween removes the case 4. 357 for (const auto &RegMaskPair : RPTracker.getPressure().LiveOutRegs) { 358 unsigned Reg = RegMaskPair.RegUnit; 359 if (TargetRegisterInfo::isVirtualRegister(Reg) && 360 isDefBetween(Reg, LIS->getInstructionIndex(*BeginBlock).getRegSlot(), 361 LIS->getInstructionIndex(*EndBlock).getRegSlot(), MRI, 362 LIS)) { 363 LiveOutRegs.insert(Reg); 364 } 365 } 366 367 // Pressure = sum_alive_registers register size 368 // Internally llvm will represent some registers as big 128 bits registers 369 // for example, but they actually correspond to 4 actual 32 bits registers. 370 // Thus Pressure is not equal to num_alive_registers * constant. 371 LiveInPressure = TopPressure.MaxSetPressure; 372 LiveOutPressure = BotPressure.MaxSetPressure; 373 374 // Prepares TopRPTracker for top down scheduling. 375 TopRPTracker.closeTop(); 376 } 377 378 void SIScheduleBlock::schedule(MachineBasicBlock::iterator BeginBlock, 379 MachineBasicBlock::iterator EndBlock) { 380 if (!Scheduled) 381 fastSchedule(); 382 383 // PreScheduling phase to set LiveIn and LiveOut. 384 initRegPressure(BeginBlock, EndBlock); 385 undoSchedule(); 386 387 // Schedule for real now. 388 389 TopReadySUs.clear(); 390 391 for (SUnit* SU : SUnits) { 392 if (!SU->NumPredsLeft) 393 TopReadySUs.push_back(SU); 394 } 395 396 while (!TopReadySUs.empty()) { 397 SUnit *SU = pickNode(); 398 ScheduledSUnits.push_back(SU); 399 TopRPTracker.setPos(SU->getInstr()); 400 TopRPTracker.advance(); 401 nodeScheduled(SU); 402 } 403 404 // TODO: compute InternalAdditionnalPressure. 405 InternalAdditionnalPressure.resize(TopPressure.MaxSetPressure.size()); 406 407 // Check everything is right. 408 #ifndef NDEBUG 409 assert(SUnits.size() == ScheduledSUnits.size() && 410 TopReadySUs.empty()); 411 for (SUnit* SU : SUnits) { 412 assert(SU->isScheduled && 413 SU->NumPredsLeft == 0); 414 } 415 #endif 416 417 Scheduled = true; 418 } 419 420 void SIScheduleBlock::undoSchedule() { 421 for (SUnit* SU : SUnits) { 422 SU->isScheduled = false; 423 for (SDep& Succ : SU->Succs) { 424 if (BC->isSUInBlock(Succ.getSUnit(), ID)) 425 undoReleaseSucc(SU, &Succ); 426 } 427 } 428 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0); 429 ScheduledSUnits.clear(); 430 Scheduled = false; 431 } 432 433 void SIScheduleBlock::undoReleaseSucc(SUnit *SU, SDep *SuccEdge) { 434 SUnit *SuccSU = SuccEdge->getSUnit(); 435 436 if (SuccEdge->isWeak()) { 437 ++SuccSU->WeakPredsLeft; 438 return; 439 } 440 ++SuccSU->NumPredsLeft; 441 } 442 443 void SIScheduleBlock::releaseSucc(SUnit *SU, SDep *SuccEdge) { 444 SUnit *SuccSU = SuccEdge->getSUnit(); 445 446 if (SuccEdge->isWeak()) { 447 --SuccSU->WeakPredsLeft; 448 return; 449 } 450 #ifndef NDEBUG 451 if (SuccSU->NumPredsLeft == 0) { 452 dbgs() << "*** Scheduling failed! ***\n"; 453 SuccSU->dump(DAG); 454 dbgs() << " has been released too many times!\n"; 455 llvm_unreachable(nullptr); 456 } 457 #endif 458 459 --SuccSU->NumPredsLeft; 460 } 461 462 /// Release Successors of the SU that are in the block or not. 463 void SIScheduleBlock::releaseSuccessors(SUnit *SU, bool InOrOutBlock) { 464 for (SDep& Succ : SU->Succs) { 465 SUnit *SuccSU = Succ.getSUnit(); 466 467 if (SuccSU->NodeNum >= DAG->SUnits.size()) 468 continue; 469 470 if (BC->isSUInBlock(SuccSU, ID) != InOrOutBlock) 471 continue; 472 473 releaseSucc(SU, &Succ); 474 if (SuccSU->NumPredsLeft == 0 && InOrOutBlock) 475 TopReadySUs.push_back(SuccSU); 476 } 477 } 478 479 void SIScheduleBlock::nodeScheduled(SUnit *SU) { 480 // Is in TopReadySUs 481 assert (!SU->NumPredsLeft); 482 std::vector<SUnit*>::iterator I = 483 std::find(TopReadySUs.begin(), TopReadySUs.end(), SU); 484 if (I == TopReadySUs.end()) { 485 dbgs() << "Data Structure Bug in SI Scheduler\n"; 486 llvm_unreachable(nullptr); 487 } 488 TopReadySUs.erase(I); 489 490 releaseSuccessors(SU, true); 491 // Scheduling this node will trigger a wait, 492 // thus propagate to other instructions that they do not need to wait either. 493 if (HasLowLatencyNonWaitedParent[NodeNum2Index[SU->NodeNum]]) 494 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0); 495 496 if (DAG->IsLowLatencySU[SU->NodeNum]) { 497 for (SDep& Succ : SU->Succs) { 498 std::map<unsigned, unsigned>::iterator I = 499 NodeNum2Index.find(Succ.getSUnit()->NodeNum); 500 if (I != NodeNum2Index.end()) 501 HasLowLatencyNonWaitedParent[I->second] = 1; 502 } 503 } 504 SU->isScheduled = true; 505 } 506 507 void SIScheduleBlock::finalizeUnits() { 508 // We remove links from outside blocks to enable scheduling inside the block. 509 for (SUnit* SU : SUnits) { 510 releaseSuccessors(SU, false); 511 if (DAG->IsHighLatencySU[SU->NodeNum]) 512 HighLatencyBlock = true; 513 } 514 HasLowLatencyNonWaitedParent.resize(SUnits.size(), 0); 515 } 516 517 // we maintain ascending order of IDs 518 void SIScheduleBlock::addPred(SIScheduleBlock *Pred) { 519 unsigned PredID = Pred->getID(); 520 521 // Check if not already predecessor. 522 for (SIScheduleBlock* P : Preds) { 523 if (PredID == P->getID()) 524 return; 525 } 526 Preds.push_back(Pred); 527 528 assert(none_of(Succs, 529 [=](SIScheduleBlock *S) { return PredID == S->getID(); }) && 530 "Loop in the Block Graph!"); 531 } 532 533 void SIScheduleBlock::addSucc(SIScheduleBlock *Succ) { 534 unsigned SuccID = Succ->getID(); 535 536 // Check if not already predecessor. 537 for (SIScheduleBlock* S : Succs) { 538 if (SuccID == S->getID()) 539 return; 540 } 541 if (Succ->isHighLatencyBlock()) 542 ++NumHighLatencySuccessors; 543 Succs.push_back(Succ); 544 assert(none_of(Preds, 545 [=](SIScheduleBlock *P) { return SuccID == P->getID(); }) && 546 "Loop in the Block Graph!"); 547 } 548 549 #ifndef NDEBUG 550 void SIScheduleBlock::printDebug(bool full) { 551 dbgs() << "Block (" << ID << ")\n"; 552 if (!full) 553 return; 554 555 dbgs() << "\nContains High Latency Instruction: " 556 << HighLatencyBlock << '\n'; 557 dbgs() << "\nDepends On:\n"; 558 for (SIScheduleBlock* P : Preds) { 559 P->printDebug(false); 560 } 561 562 dbgs() << "\nSuccessors:\n"; 563 for (SIScheduleBlock* S : Succs) { 564 S->printDebug(false); 565 } 566 567 if (Scheduled) { 568 dbgs() << "LiveInPressure " << LiveInPressure[DAG->getSGPRSetID()] << ' ' 569 << LiveInPressure[DAG->getVGPRSetID()] << '\n'; 570 dbgs() << "LiveOutPressure " << LiveOutPressure[DAG->getSGPRSetID()] << ' ' 571 << LiveOutPressure[DAG->getVGPRSetID()] << "\n\n"; 572 dbgs() << "LiveIns:\n"; 573 for (unsigned Reg : LiveInRegs) 574 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' '; 575 576 dbgs() << "\nLiveOuts:\n"; 577 for (unsigned Reg : LiveOutRegs) 578 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' '; 579 } 580 581 dbgs() << "\nInstructions:\n"; 582 if (!Scheduled) { 583 for (SUnit* SU : SUnits) { 584 SU->dump(DAG); 585 } 586 } else { 587 for (SUnit* SU : SUnits) { 588 SU->dump(DAG); 589 } 590 } 591 592 dbgs() << "///////////////////////\n"; 593 } 594 595 #endif 596 597 // SIScheduleBlockCreator // 598 599 SIScheduleBlockCreator::SIScheduleBlockCreator(SIScheduleDAGMI *DAG) : 600 DAG(DAG) { 601 } 602 603 SIScheduleBlockCreator::~SIScheduleBlockCreator() { 604 } 605 606 SIScheduleBlocks 607 SIScheduleBlockCreator::getBlocks(SISchedulerBlockCreatorVariant BlockVariant) { 608 std::map<SISchedulerBlockCreatorVariant, SIScheduleBlocks>::iterator B = 609 Blocks.find(BlockVariant); 610 if (B == Blocks.end()) { 611 SIScheduleBlocks Res; 612 createBlocksForVariant(BlockVariant); 613 topologicalSort(); 614 scheduleInsideBlocks(); 615 fillStats(); 616 Res.Blocks = CurrentBlocks; 617 Res.TopDownIndex2Block = TopDownIndex2Block; 618 Res.TopDownBlock2Index = TopDownBlock2Index; 619 Blocks[BlockVariant] = Res; 620 return Res; 621 } else { 622 return B->second; 623 } 624 } 625 626 bool SIScheduleBlockCreator::isSUInBlock(SUnit *SU, unsigned ID) { 627 if (SU->NodeNum >= DAG->SUnits.size()) 628 return false; 629 return CurrentBlocks[Node2CurrentBlock[SU->NodeNum]]->getID() == ID; 630 } 631 632 void SIScheduleBlockCreator::colorHighLatenciesAlone() { 633 unsigned DAGSize = DAG->SUnits.size(); 634 635 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 636 SUnit *SU = &DAG->SUnits[i]; 637 if (DAG->IsHighLatencySU[SU->NodeNum]) { 638 CurrentColoring[SU->NodeNum] = NextReservedID++; 639 } 640 } 641 } 642 643 void SIScheduleBlockCreator::colorHighLatenciesGroups() { 644 unsigned DAGSize = DAG->SUnits.size(); 645 unsigned NumHighLatencies = 0; 646 unsigned GroupSize; 647 unsigned Color = NextReservedID; 648 unsigned Count = 0; 649 std::set<unsigned> FormingGroup; 650 651 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 652 SUnit *SU = &DAG->SUnits[i]; 653 if (DAG->IsHighLatencySU[SU->NodeNum]) 654 ++NumHighLatencies; 655 } 656 657 if (NumHighLatencies == 0) 658 return; 659 660 if (NumHighLatencies <= 6) 661 GroupSize = 2; 662 else if (NumHighLatencies <= 12) 663 GroupSize = 3; 664 else 665 GroupSize = 4; 666 667 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 668 SUnit *SU = &DAG->SUnits[i]; 669 if (DAG->IsHighLatencySU[SU->NodeNum]) { 670 unsigned CompatibleGroup = true; 671 unsigned ProposedColor = Color; 672 for (unsigned j : FormingGroup) { 673 // TODO: Currently CompatibleGroup will always be false, 674 // because the graph enforces the load order. This 675 // can be fixed, but as keeping the load order is often 676 // good for performance that causes a performance hit (both 677 // the default scheduler and this scheduler). 678 // When this scheduler determines a good load order, 679 // this can be fixed. 680 if (!DAG->canAddEdge(SU, &DAG->SUnits[j]) || 681 !DAG->canAddEdge(&DAG->SUnits[j], SU)) 682 CompatibleGroup = false; 683 } 684 if (!CompatibleGroup || ++Count == GroupSize) { 685 FormingGroup.clear(); 686 Color = ++NextReservedID; 687 if (!CompatibleGroup) { 688 ProposedColor = Color; 689 FormingGroup.insert(SU->NodeNum); 690 } 691 Count = 0; 692 } else { 693 FormingGroup.insert(SU->NodeNum); 694 } 695 CurrentColoring[SU->NodeNum] = ProposedColor; 696 } 697 } 698 } 699 700 void SIScheduleBlockCreator::colorComputeReservedDependencies() { 701 unsigned DAGSize = DAG->SUnits.size(); 702 std::map<std::set<unsigned>, unsigned> ColorCombinations; 703 704 CurrentTopDownReservedDependencyColoring.clear(); 705 CurrentBottomUpReservedDependencyColoring.clear(); 706 707 CurrentTopDownReservedDependencyColoring.resize(DAGSize, 0); 708 CurrentBottomUpReservedDependencyColoring.resize(DAGSize, 0); 709 710 // Traverse TopDown, and give different colors to SUs depending 711 // on which combination of High Latencies they depend on. 712 713 for (unsigned SUNum : DAG->TopDownIndex2SU) { 714 SUnit *SU = &DAG->SUnits[SUNum]; 715 std::set<unsigned> SUColors; 716 717 // Already given. 718 if (CurrentColoring[SU->NodeNum]) { 719 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = 720 CurrentColoring[SU->NodeNum]; 721 continue; 722 } 723 724 for (SDep& PredDep : SU->Preds) { 725 SUnit *Pred = PredDep.getSUnit(); 726 if (PredDep.isWeak() || Pred->NodeNum >= DAGSize) 727 continue; 728 if (CurrentTopDownReservedDependencyColoring[Pred->NodeNum] > 0) 729 SUColors.insert(CurrentTopDownReservedDependencyColoring[Pred->NodeNum]); 730 } 731 // Color 0 by default. 732 if (SUColors.empty()) 733 continue; 734 // Same color than parents. 735 if (SUColors.size() == 1 && *SUColors.begin() > DAGSize) 736 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = 737 *SUColors.begin(); 738 else { 739 std::map<std::set<unsigned>, unsigned>::iterator Pos = 740 ColorCombinations.find(SUColors); 741 if (Pos != ColorCombinations.end()) { 742 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = Pos->second; 743 } else { 744 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = 745 NextNonReservedID; 746 ColorCombinations[SUColors] = NextNonReservedID++; 747 } 748 } 749 } 750 751 ColorCombinations.clear(); 752 753 // Same as before, but BottomUp. 754 755 for (unsigned SUNum : DAG->BottomUpIndex2SU) { 756 SUnit *SU = &DAG->SUnits[SUNum]; 757 std::set<unsigned> SUColors; 758 759 // Already given. 760 if (CurrentColoring[SU->NodeNum]) { 761 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = 762 CurrentColoring[SU->NodeNum]; 763 continue; 764 } 765 766 for (SDep& SuccDep : SU->Succs) { 767 SUnit *Succ = SuccDep.getSUnit(); 768 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 769 continue; 770 if (CurrentBottomUpReservedDependencyColoring[Succ->NodeNum] > 0) 771 SUColors.insert(CurrentBottomUpReservedDependencyColoring[Succ->NodeNum]); 772 } 773 // Keep color 0. 774 if (SUColors.empty()) 775 continue; 776 // Same color than parents. 777 if (SUColors.size() == 1 && *SUColors.begin() > DAGSize) 778 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = 779 *SUColors.begin(); 780 else { 781 std::map<std::set<unsigned>, unsigned>::iterator Pos = 782 ColorCombinations.find(SUColors); 783 if (Pos != ColorCombinations.end()) { 784 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = Pos->second; 785 } else { 786 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = 787 NextNonReservedID; 788 ColorCombinations[SUColors] = NextNonReservedID++; 789 } 790 } 791 } 792 } 793 794 void SIScheduleBlockCreator::colorAccordingToReservedDependencies() { 795 unsigned DAGSize = DAG->SUnits.size(); 796 std::map<std::pair<unsigned, unsigned>, unsigned> ColorCombinations; 797 798 // Every combination of colors given by the top down 799 // and bottom up Reserved node dependency 800 801 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 802 SUnit *SU = &DAG->SUnits[i]; 803 std::pair<unsigned, unsigned> SUColors; 804 805 // High latency instructions: already given. 806 if (CurrentColoring[SU->NodeNum]) 807 continue; 808 809 SUColors.first = CurrentTopDownReservedDependencyColoring[SU->NodeNum]; 810 SUColors.second = CurrentBottomUpReservedDependencyColoring[SU->NodeNum]; 811 812 std::map<std::pair<unsigned, unsigned>, unsigned>::iterator Pos = 813 ColorCombinations.find(SUColors); 814 if (Pos != ColorCombinations.end()) { 815 CurrentColoring[SU->NodeNum] = Pos->second; 816 } else { 817 CurrentColoring[SU->NodeNum] = NextNonReservedID; 818 ColorCombinations[SUColors] = NextNonReservedID++; 819 } 820 } 821 } 822 823 void SIScheduleBlockCreator::colorEndsAccordingToDependencies() { 824 unsigned DAGSize = DAG->SUnits.size(); 825 std::vector<int> PendingColoring = CurrentColoring; 826 827 for (unsigned SUNum : DAG->BottomUpIndex2SU) { 828 SUnit *SU = &DAG->SUnits[SUNum]; 829 std::set<unsigned> SUColors; 830 std::set<unsigned> SUColorsPending; 831 832 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 833 continue; 834 835 if (CurrentBottomUpReservedDependencyColoring[SU->NodeNum] > 0 || 836 CurrentTopDownReservedDependencyColoring[SU->NodeNum] > 0) 837 continue; 838 839 for (SDep& SuccDep : SU->Succs) { 840 SUnit *Succ = SuccDep.getSUnit(); 841 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 842 continue; 843 if (CurrentBottomUpReservedDependencyColoring[Succ->NodeNum] > 0 || 844 CurrentTopDownReservedDependencyColoring[Succ->NodeNum] > 0) 845 SUColors.insert(CurrentColoring[Succ->NodeNum]); 846 SUColorsPending.insert(PendingColoring[Succ->NodeNum]); 847 } 848 if (SUColors.size() == 1 && SUColorsPending.size() == 1) 849 PendingColoring[SU->NodeNum] = *SUColors.begin(); 850 else // TODO: Attribute new colors depending on color 851 // combination of children. 852 PendingColoring[SU->NodeNum] = NextNonReservedID++; 853 } 854 CurrentColoring = PendingColoring; 855 } 856 857 858 void SIScheduleBlockCreator::colorForceConsecutiveOrderInGroup() { 859 unsigned DAGSize = DAG->SUnits.size(); 860 unsigned PreviousColor; 861 std::set<unsigned> SeenColors; 862 863 if (DAGSize <= 1) 864 return; 865 866 PreviousColor = CurrentColoring[0]; 867 868 for (unsigned i = 1, e = DAGSize; i != e; ++i) { 869 SUnit *SU = &DAG->SUnits[i]; 870 unsigned CurrentColor = CurrentColoring[i]; 871 unsigned PreviousColorSave = PreviousColor; 872 assert(i == SU->NodeNum); 873 874 if (CurrentColor != PreviousColor) 875 SeenColors.insert(PreviousColor); 876 PreviousColor = CurrentColor; 877 878 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 879 continue; 880 881 if (SeenColors.find(CurrentColor) == SeenColors.end()) 882 continue; 883 884 if (PreviousColorSave != CurrentColor) 885 CurrentColoring[i] = NextNonReservedID++; 886 else 887 CurrentColoring[i] = CurrentColoring[i-1]; 888 } 889 } 890 891 void SIScheduleBlockCreator::colorMergeConstantLoadsNextGroup() { 892 unsigned DAGSize = DAG->SUnits.size(); 893 894 for (unsigned SUNum : DAG->BottomUpIndex2SU) { 895 SUnit *SU = &DAG->SUnits[SUNum]; 896 std::set<unsigned> SUColors; 897 898 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 899 continue; 900 901 // No predecessor: Vgpr constant loading. 902 // Low latency instructions usually have a predecessor (the address) 903 if (SU->Preds.size() > 0 && !DAG->IsLowLatencySU[SU->NodeNum]) 904 continue; 905 906 for (SDep& SuccDep : SU->Succs) { 907 SUnit *Succ = SuccDep.getSUnit(); 908 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 909 continue; 910 SUColors.insert(CurrentColoring[Succ->NodeNum]); 911 } 912 if (SUColors.size() == 1) 913 CurrentColoring[SU->NodeNum] = *SUColors.begin(); 914 } 915 } 916 917 void SIScheduleBlockCreator::colorMergeIfPossibleNextGroup() { 918 unsigned DAGSize = DAG->SUnits.size(); 919 920 for (unsigned SUNum : DAG->BottomUpIndex2SU) { 921 SUnit *SU = &DAG->SUnits[SUNum]; 922 std::set<unsigned> SUColors; 923 924 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 925 continue; 926 927 for (SDep& SuccDep : SU->Succs) { 928 SUnit *Succ = SuccDep.getSUnit(); 929 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 930 continue; 931 SUColors.insert(CurrentColoring[Succ->NodeNum]); 932 } 933 if (SUColors.size() == 1) 934 CurrentColoring[SU->NodeNum] = *SUColors.begin(); 935 } 936 } 937 938 void SIScheduleBlockCreator::colorMergeIfPossibleNextGroupOnlyForReserved() { 939 unsigned DAGSize = DAG->SUnits.size(); 940 941 for (unsigned SUNum : DAG->BottomUpIndex2SU) { 942 SUnit *SU = &DAG->SUnits[SUNum]; 943 std::set<unsigned> SUColors; 944 945 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 946 continue; 947 948 for (SDep& SuccDep : SU->Succs) { 949 SUnit *Succ = SuccDep.getSUnit(); 950 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 951 continue; 952 SUColors.insert(CurrentColoring[Succ->NodeNum]); 953 } 954 if (SUColors.size() == 1 && *SUColors.begin() <= DAGSize) 955 CurrentColoring[SU->NodeNum] = *SUColors.begin(); 956 } 957 } 958 959 void SIScheduleBlockCreator::colorMergeIfPossibleSmallGroupsToNextGroup() { 960 unsigned DAGSize = DAG->SUnits.size(); 961 std::map<unsigned, unsigned> ColorCount; 962 963 for (unsigned SUNum : DAG->BottomUpIndex2SU) { 964 SUnit *SU = &DAG->SUnits[SUNum]; 965 unsigned color = CurrentColoring[SU->NodeNum]; 966 std::map<unsigned, unsigned>::iterator Pos = ColorCount.find(color); 967 if (Pos != ColorCount.end()) { 968 ++ColorCount[color]; 969 } else { 970 ColorCount[color] = 1; 971 } 972 } 973 974 for (unsigned SUNum : DAG->BottomUpIndex2SU) { 975 SUnit *SU = &DAG->SUnits[SUNum]; 976 unsigned color = CurrentColoring[SU->NodeNum]; 977 std::set<unsigned> SUColors; 978 979 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 980 continue; 981 982 if (ColorCount[color] > 1) 983 continue; 984 985 for (SDep& SuccDep : SU->Succs) { 986 SUnit *Succ = SuccDep.getSUnit(); 987 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 988 continue; 989 SUColors.insert(CurrentColoring[Succ->NodeNum]); 990 } 991 if (SUColors.size() == 1 && *SUColors.begin() != color) { 992 --ColorCount[color]; 993 CurrentColoring[SU->NodeNum] = *SUColors.begin(); 994 ++ColorCount[*SUColors.begin()]; 995 } 996 } 997 } 998 999 void SIScheduleBlockCreator::cutHugeBlocks() { 1000 // TODO 1001 } 1002 1003 void SIScheduleBlockCreator::regroupNoUserInstructions() { 1004 unsigned DAGSize = DAG->SUnits.size(); 1005 int GroupID = NextNonReservedID++; 1006 1007 for (unsigned SUNum : DAG->BottomUpIndex2SU) { 1008 SUnit *SU = &DAG->SUnits[SUNum]; 1009 bool hasSuccessor = false; 1010 1011 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize) 1012 continue; 1013 1014 for (SDep& SuccDep : SU->Succs) { 1015 SUnit *Succ = SuccDep.getSUnit(); 1016 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 1017 continue; 1018 hasSuccessor = true; 1019 } 1020 if (!hasSuccessor) 1021 CurrentColoring[SU->NodeNum] = GroupID; 1022 } 1023 } 1024 1025 void SIScheduleBlockCreator::createBlocksForVariant(SISchedulerBlockCreatorVariant BlockVariant) { 1026 unsigned DAGSize = DAG->SUnits.size(); 1027 std::map<unsigned,unsigned> RealID; 1028 1029 CurrentBlocks.clear(); 1030 CurrentColoring.clear(); 1031 CurrentColoring.resize(DAGSize, 0); 1032 Node2CurrentBlock.clear(); 1033 1034 // Restore links previous scheduling variant has overridden. 1035 DAG->restoreSULinksLeft(); 1036 1037 NextReservedID = 1; 1038 NextNonReservedID = DAGSize + 1; 1039 1040 DEBUG(dbgs() << "Coloring the graph\n"); 1041 1042 if (BlockVariant == SISchedulerBlockCreatorVariant::LatenciesGrouped) 1043 colorHighLatenciesGroups(); 1044 else 1045 colorHighLatenciesAlone(); 1046 colorComputeReservedDependencies(); 1047 colorAccordingToReservedDependencies(); 1048 colorEndsAccordingToDependencies(); 1049 if (BlockVariant == SISchedulerBlockCreatorVariant::LatenciesAlonePlusConsecutive) 1050 colorForceConsecutiveOrderInGroup(); 1051 regroupNoUserInstructions(); 1052 colorMergeConstantLoadsNextGroup(); 1053 colorMergeIfPossibleNextGroupOnlyForReserved(); 1054 1055 // Put SUs of same color into same block 1056 Node2CurrentBlock.resize(DAGSize, -1); 1057 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1058 SUnit *SU = &DAG->SUnits[i]; 1059 unsigned Color = CurrentColoring[SU->NodeNum]; 1060 if (RealID.find(Color) == RealID.end()) { 1061 int ID = CurrentBlocks.size(); 1062 BlockPtrs.push_back( 1063 make_unique<SIScheduleBlock>(DAG, this, ID)); 1064 CurrentBlocks.push_back(BlockPtrs.rbegin()->get()); 1065 RealID[Color] = ID; 1066 } 1067 CurrentBlocks[RealID[Color]]->addUnit(SU); 1068 Node2CurrentBlock[SU->NodeNum] = RealID[Color]; 1069 } 1070 1071 // Build dependencies between blocks. 1072 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1073 SUnit *SU = &DAG->SUnits[i]; 1074 int SUID = Node2CurrentBlock[i]; 1075 for (SDep& SuccDep : SU->Succs) { 1076 SUnit *Succ = SuccDep.getSUnit(); 1077 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize) 1078 continue; 1079 if (Node2CurrentBlock[Succ->NodeNum] != SUID) 1080 CurrentBlocks[SUID]->addSucc(CurrentBlocks[Node2CurrentBlock[Succ->NodeNum]]); 1081 } 1082 for (SDep& PredDep : SU->Preds) { 1083 SUnit *Pred = PredDep.getSUnit(); 1084 if (PredDep.isWeak() || Pred->NodeNum >= DAGSize) 1085 continue; 1086 if (Node2CurrentBlock[Pred->NodeNum] != SUID) 1087 CurrentBlocks[SUID]->addPred(CurrentBlocks[Node2CurrentBlock[Pred->NodeNum]]); 1088 } 1089 } 1090 1091 // Free root and leafs of all blocks to enable scheduling inside them. 1092 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) { 1093 SIScheduleBlock *Block = CurrentBlocks[i]; 1094 Block->finalizeUnits(); 1095 } 1096 DEBUG( 1097 dbgs() << "Blocks created:\n\n"; 1098 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) { 1099 SIScheduleBlock *Block = CurrentBlocks[i]; 1100 Block->printDebug(true); 1101 } 1102 ); 1103 } 1104 1105 // Two functions taken from Codegen/MachineScheduler.cpp 1106 1107 /// If this iterator is a debug value, increment until reaching the End or a 1108 /// non-debug instruction. 1109 static MachineBasicBlock::const_iterator 1110 nextIfDebug(MachineBasicBlock::const_iterator I, 1111 MachineBasicBlock::const_iterator End) { 1112 for(; I != End; ++I) { 1113 if (!I->isDebugValue()) 1114 break; 1115 } 1116 return I; 1117 } 1118 1119 /// Non-const version. 1120 static MachineBasicBlock::iterator 1121 nextIfDebug(MachineBasicBlock::iterator I, 1122 MachineBasicBlock::const_iterator End) { 1123 // Cast the return value to nonconst MachineInstr, then cast to an 1124 // instr_iterator, which does not check for null, finally return a 1125 // bundle_iterator. 1126 return MachineBasicBlock::instr_iterator( 1127 const_cast<MachineInstr*>( 1128 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End))); 1129 } 1130 1131 void SIScheduleBlockCreator::topologicalSort() { 1132 unsigned DAGSize = CurrentBlocks.size(); 1133 std::vector<int> WorkList; 1134 1135 DEBUG(dbgs() << "Topological Sort\n"); 1136 1137 WorkList.reserve(DAGSize); 1138 TopDownIndex2Block.resize(DAGSize); 1139 TopDownBlock2Index.resize(DAGSize); 1140 BottomUpIndex2Block.resize(DAGSize); 1141 1142 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1143 SIScheduleBlock *Block = CurrentBlocks[i]; 1144 unsigned Degree = Block->getSuccs().size(); 1145 TopDownBlock2Index[i] = Degree; 1146 if (Degree == 0) { 1147 WorkList.push_back(i); 1148 } 1149 } 1150 1151 int Id = DAGSize; 1152 while (!WorkList.empty()) { 1153 int i = WorkList.back(); 1154 SIScheduleBlock *Block = CurrentBlocks[i]; 1155 WorkList.pop_back(); 1156 TopDownBlock2Index[i] = --Id; 1157 TopDownIndex2Block[Id] = i; 1158 for (SIScheduleBlock* Pred : Block->getPreds()) { 1159 if (!--TopDownBlock2Index[Pred->getID()]) 1160 WorkList.push_back(Pred->getID()); 1161 } 1162 } 1163 1164 #ifndef NDEBUG 1165 // Check correctness of the ordering. 1166 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1167 SIScheduleBlock *Block = CurrentBlocks[i]; 1168 for (SIScheduleBlock* Pred : Block->getPreds()) { 1169 assert(TopDownBlock2Index[i] > TopDownBlock2Index[Pred->getID()] && 1170 "Wrong Top Down topological sorting"); 1171 } 1172 } 1173 #endif 1174 1175 BottomUpIndex2Block = std::vector<int>(TopDownIndex2Block.rbegin(), 1176 TopDownIndex2Block.rend()); 1177 } 1178 1179 void SIScheduleBlockCreator::scheduleInsideBlocks() { 1180 unsigned DAGSize = CurrentBlocks.size(); 1181 1182 DEBUG(dbgs() << "\nScheduling Blocks\n\n"); 1183 1184 // We do schedule a valid scheduling such that a Block corresponds 1185 // to a range of instructions. 1186 DEBUG(dbgs() << "First phase: Fast scheduling for Reg Liveness\n"); 1187 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1188 SIScheduleBlock *Block = CurrentBlocks[i]; 1189 Block->fastSchedule(); 1190 } 1191 1192 // Note: the following code, and the part restoring previous position 1193 // is by far the most expensive operation of the Scheduler. 1194 1195 // Do not update CurrentTop. 1196 MachineBasicBlock::iterator CurrentTopFastSched = DAG->getCurrentTop(); 1197 std::vector<MachineBasicBlock::iterator> PosOld; 1198 std::vector<MachineBasicBlock::iterator> PosNew; 1199 PosOld.reserve(DAG->SUnits.size()); 1200 PosNew.reserve(DAG->SUnits.size()); 1201 1202 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1203 int BlockIndice = TopDownIndex2Block[i]; 1204 SIScheduleBlock *Block = CurrentBlocks[BlockIndice]; 1205 std::vector<SUnit*> SUs = Block->getScheduledUnits(); 1206 1207 for (SUnit* SU : SUs) { 1208 MachineInstr *MI = SU->getInstr(); 1209 MachineBasicBlock::iterator Pos = MI; 1210 PosOld.push_back(Pos); 1211 if (&*CurrentTopFastSched == MI) { 1212 PosNew.push_back(Pos); 1213 CurrentTopFastSched = nextIfDebug(++CurrentTopFastSched, 1214 DAG->getCurrentBottom()); 1215 } else { 1216 // Update the instruction stream. 1217 DAG->getBB()->splice(CurrentTopFastSched, DAG->getBB(), MI); 1218 1219 // Update LiveIntervals. 1220 // Note: Moving all instructions and calling handleMove everytime 1221 // is the most cpu intensive operation of the scheduler. 1222 // It would gain a lot if there was a way to recompute the 1223 // LiveIntervals for the entire scheduling region. 1224 DAG->getLIS()->handleMove(*MI, /*UpdateFlags=*/true); 1225 PosNew.push_back(CurrentTopFastSched); 1226 } 1227 } 1228 } 1229 1230 // Now we have Block of SUs == Block of MI. 1231 // We do the final schedule for the instructions inside the block. 1232 // The property that all the SUs of the Block are grouped together as MI 1233 // is used for correct reg usage tracking. 1234 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1235 SIScheduleBlock *Block = CurrentBlocks[i]; 1236 std::vector<SUnit*> SUs = Block->getScheduledUnits(); 1237 Block->schedule((*SUs.begin())->getInstr(), (*SUs.rbegin())->getInstr()); 1238 } 1239 1240 DEBUG(dbgs() << "Restoring MI Pos\n"); 1241 // Restore old ordering (which prevents a LIS->handleMove bug). 1242 for (unsigned i = PosOld.size(), e = 0; i != e; --i) { 1243 MachineBasicBlock::iterator POld = PosOld[i-1]; 1244 MachineBasicBlock::iterator PNew = PosNew[i-1]; 1245 if (PNew != POld) { 1246 // Update the instruction stream. 1247 DAG->getBB()->splice(POld, DAG->getBB(), PNew); 1248 1249 // Update LiveIntervals. 1250 DAG->getLIS()->handleMove(*POld, /*UpdateFlags=*/true); 1251 } 1252 } 1253 1254 DEBUG( 1255 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) { 1256 SIScheduleBlock *Block = CurrentBlocks[i]; 1257 Block->printDebug(true); 1258 } 1259 ); 1260 } 1261 1262 void SIScheduleBlockCreator::fillStats() { 1263 unsigned DAGSize = CurrentBlocks.size(); 1264 1265 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1266 int BlockIndice = TopDownIndex2Block[i]; 1267 SIScheduleBlock *Block = CurrentBlocks[BlockIndice]; 1268 if (Block->getPreds().size() == 0) 1269 Block->Depth = 0; 1270 else { 1271 unsigned Depth = 0; 1272 for (SIScheduleBlock *Pred : Block->getPreds()) { 1273 if (Depth < Pred->Depth + 1) 1274 Depth = Pred->Depth + 1; 1275 } 1276 Block->Depth = Depth; 1277 } 1278 } 1279 1280 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 1281 int BlockIndice = BottomUpIndex2Block[i]; 1282 SIScheduleBlock *Block = CurrentBlocks[BlockIndice]; 1283 if (Block->getSuccs().size() == 0) 1284 Block->Height = 0; 1285 else { 1286 unsigned Height = 0; 1287 for (SIScheduleBlock *Succ : Block->getSuccs()) { 1288 if (Height < Succ->Height + 1) 1289 Height = Succ->Height + 1; 1290 } 1291 Block->Height = Height; 1292 } 1293 } 1294 } 1295 1296 // SIScheduleBlockScheduler // 1297 1298 SIScheduleBlockScheduler::SIScheduleBlockScheduler(SIScheduleDAGMI *DAG, 1299 SISchedulerBlockSchedulerVariant Variant, 1300 SIScheduleBlocks BlocksStruct) : 1301 DAG(DAG), Variant(Variant), Blocks(BlocksStruct.Blocks), 1302 LastPosWaitedHighLatency(0), NumBlockScheduled(0), VregCurrentUsage(0), 1303 SregCurrentUsage(0), maxVregUsage(0), maxSregUsage(0) { 1304 1305 // Fill the usage of every output 1306 // Warning: while by construction we always have a link between two blocks 1307 // when one needs a result from the other, the number of users of an output 1308 // is not the sum of child blocks having as input the same virtual register. 1309 // Here is an example. A produces x and y. B eats x and produces x'. 1310 // C eats x' and y. The register coalescer may have attributed the same 1311 // virtual register to x and x'. 1312 // To count accurately, we do a topological sort. In case the register is 1313 // found for several parents, we increment the usage of the one with the 1314 // highest topological index. 1315 LiveOutRegsNumUsages.resize(Blocks.size()); 1316 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1317 SIScheduleBlock *Block = Blocks[i]; 1318 for (unsigned Reg : Block->getInRegs()) { 1319 bool Found = false; 1320 int topoInd = -1; 1321 for (SIScheduleBlock* Pred: Block->getPreds()) { 1322 std::set<unsigned> PredOutRegs = Pred->getOutRegs(); 1323 std::set<unsigned>::iterator RegPos = PredOutRegs.find(Reg); 1324 1325 if (RegPos != PredOutRegs.end()) { 1326 Found = true; 1327 if (topoInd < BlocksStruct.TopDownBlock2Index[Pred->getID()]) { 1328 topoInd = BlocksStruct.TopDownBlock2Index[Pred->getID()]; 1329 } 1330 } 1331 } 1332 1333 if (!Found) 1334 continue; 1335 1336 int PredID = BlocksStruct.TopDownIndex2Block[topoInd]; 1337 std::map<unsigned, unsigned>::iterator RegPos = 1338 LiveOutRegsNumUsages[PredID].find(Reg); 1339 if (RegPos != LiveOutRegsNumUsages[PredID].end()) { 1340 ++LiveOutRegsNumUsages[PredID][Reg]; 1341 } else { 1342 LiveOutRegsNumUsages[PredID][Reg] = 1; 1343 } 1344 } 1345 } 1346 1347 LastPosHighLatencyParentScheduled.resize(Blocks.size(), 0); 1348 BlockNumPredsLeft.resize(Blocks.size()); 1349 BlockNumSuccsLeft.resize(Blocks.size()); 1350 1351 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1352 SIScheduleBlock *Block = Blocks[i]; 1353 BlockNumPredsLeft[i] = Block->getPreds().size(); 1354 BlockNumSuccsLeft[i] = Block->getSuccs().size(); 1355 } 1356 1357 #ifndef NDEBUG 1358 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1359 SIScheduleBlock *Block = Blocks[i]; 1360 assert(Block->getID() == i); 1361 } 1362 #endif 1363 1364 std::set<unsigned> InRegs = DAG->getInRegs(); 1365 addLiveRegs(InRegs); 1366 1367 // Fill LiveRegsConsumers for regs that were already 1368 // defined before scheduling. 1369 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1370 SIScheduleBlock *Block = Blocks[i]; 1371 for (unsigned Reg : Block->getInRegs()) { 1372 bool Found = false; 1373 for (SIScheduleBlock* Pred: Block->getPreds()) { 1374 std::set<unsigned> PredOutRegs = Pred->getOutRegs(); 1375 std::set<unsigned>::iterator RegPos = PredOutRegs.find(Reg); 1376 1377 if (RegPos != PredOutRegs.end()) { 1378 Found = true; 1379 break; 1380 } 1381 } 1382 1383 if (!Found) { 1384 if (LiveRegsConsumers.find(Reg) == LiveRegsConsumers.end()) 1385 LiveRegsConsumers[Reg] = 1; 1386 else 1387 ++LiveRegsConsumers[Reg]; 1388 } 1389 } 1390 } 1391 1392 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1393 SIScheduleBlock *Block = Blocks[i]; 1394 if (BlockNumPredsLeft[i] == 0) { 1395 ReadyBlocks.push_back(Block); 1396 } 1397 } 1398 1399 while (SIScheduleBlock *Block = pickBlock()) { 1400 BlocksScheduled.push_back(Block); 1401 blockScheduled(Block); 1402 } 1403 1404 DEBUG( 1405 dbgs() << "Block Order:"; 1406 for (SIScheduleBlock* Block : BlocksScheduled) { 1407 dbgs() << ' ' << Block->getID(); 1408 } 1409 ); 1410 } 1411 1412 bool SIScheduleBlockScheduler::tryCandidateLatency(SIBlockSchedCandidate &Cand, 1413 SIBlockSchedCandidate &TryCand) { 1414 if (!Cand.isValid()) { 1415 TryCand.Reason = NodeOrder; 1416 return true; 1417 } 1418 1419 // Try to hide high latencies. 1420 if (tryLess(TryCand.LastPosHighLatParentScheduled, 1421 Cand.LastPosHighLatParentScheduled, TryCand, Cand, Latency)) 1422 return true; 1423 // Schedule high latencies early so you can hide them better. 1424 if (tryGreater(TryCand.IsHighLatency, Cand.IsHighLatency, 1425 TryCand, Cand, Latency)) 1426 return true; 1427 if (TryCand.IsHighLatency && tryGreater(TryCand.Height, Cand.Height, 1428 TryCand, Cand, Depth)) 1429 return true; 1430 if (tryGreater(TryCand.NumHighLatencySuccessors, 1431 Cand.NumHighLatencySuccessors, 1432 TryCand, Cand, Successor)) 1433 return true; 1434 return false; 1435 } 1436 1437 bool SIScheduleBlockScheduler::tryCandidateRegUsage(SIBlockSchedCandidate &Cand, 1438 SIBlockSchedCandidate &TryCand) { 1439 if (!Cand.isValid()) { 1440 TryCand.Reason = NodeOrder; 1441 return true; 1442 } 1443 1444 if (tryLess(TryCand.VGPRUsageDiff > 0, Cand.VGPRUsageDiff > 0, 1445 TryCand, Cand, RegUsage)) 1446 return true; 1447 if (tryGreater(TryCand.NumSuccessors > 0, 1448 Cand.NumSuccessors > 0, 1449 TryCand, Cand, Successor)) 1450 return true; 1451 if (tryGreater(TryCand.Height, Cand.Height, TryCand, Cand, Depth)) 1452 return true; 1453 if (tryLess(TryCand.VGPRUsageDiff, Cand.VGPRUsageDiff, 1454 TryCand, Cand, RegUsage)) 1455 return true; 1456 return false; 1457 } 1458 1459 SIScheduleBlock *SIScheduleBlockScheduler::pickBlock() { 1460 SIBlockSchedCandidate Cand; 1461 std::vector<SIScheduleBlock*>::iterator Best; 1462 SIScheduleBlock *Block; 1463 if (ReadyBlocks.empty()) 1464 return nullptr; 1465 1466 DAG->fillVgprSgprCost(LiveRegs.begin(), LiveRegs.end(), 1467 VregCurrentUsage, SregCurrentUsage); 1468 if (VregCurrentUsage > maxVregUsage) 1469 maxVregUsage = VregCurrentUsage; 1470 if (VregCurrentUsage > maxSregUsage) 1471 maxSregUsage = VregCurrentUsage; 1472 DEBUG( 1473 dbgs() << "Picking New Blocks\n"; 1474 dbgs() << "Available: "; 1475 for (SIScheduleBlock* Block : ReadyBlocks) 1476 dbgs() << Block->getID() << ' '; 1477 dbgs() << "\nCurrent Live:\n"; 1478 for (unsigned Reg : LiveRegs) 1479 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' '; 1480 dbgs() << '\n'; 1481 dbgs() << "Current VGPRs: " << VregCurrentUsage << '\n'; 1482 dbgs() << "Current SGPRs: " << SregCurrentUsage << '\n'; 1483 ); 1484 1485 Cand.Block = nullptr; 1486 for (std::vector<SIScheduleBlock*>::iterator I = ReadyBlocks.begin(), 1487 E = ReadyBlocks.end(); I != E; ++I) { 1488 SIBlockSchedCandidate TryCand; 1489 TryCand.Block = *I; 1490 TryCand.IsHighLatency = TryCand.Block->isHighLatencyBlock(); 1491 TryCand.VGPRUsageDiff = 1492 checkRegUsageImpact(TryCand.Block->getInRegs(), 1493 TryCand.Block->getOutRegs())[DAG->getVGPRSetID()]; 1494 TryCand.NumSuccessors = TryCand.Block->getSuccs().size(); 1495 TryCand.NumHighLatencySuccessors = 1496 TryCand.Block->getNumHighLatencySuccessors(); 1497 TryCand.LastPosHighLatParentScheduled = 1498 (unsigned int) std::max<int> (0, 1499 LastPosHighLatencyParentScheduled[TryCand.Block->getID()] - 1500 LastPosWaitedHighLatency); 1501 TryCand.Height = TryCand.Block->Height; 1502 // Try not to increase VGPR usage too much, else we may spill. 1503 if (VregCurrentUsage > 120 || 1504 Variant != SISchedulerBlockSchedulerVariant::BlockLatencyRegUsage) { 1505 if (!tryCandidateRegUsage(Cand, TryCand) && 1506 Variant != SISchedulerBlockSchedulerVariant::BlockRegUsage) 1507 tryCandidateLatency(Cand, TryCand); 1508 } else { 1509 if (!tryCandidateLatency(Cand, TryCand)) 1510 tryCandidateRegUsage(Cand, TryCand); 1511 } 1512 if (TryCand.Reason != NoCand) { 1513 Cand.setBest(TryCand); 1514 Best = I; 1515 DEBUG(dbgs() << "Best Current Choice: " << Cand.Block->getID() << ' ' 1516 << getReasonStr(Cand.Reason) << '\n'); 1517 } 1518 } 1519 1520 DEBUG( 1521 dbgs() << "Picking: " << Cand.Block->getID() << '\n'; 1522 dbgs() << "Is a block with high latency instruction: " 1523 << (Cand.IsHighLatency ? "yes\n" : "no\n"); 1524 dbgs() << "Position of last high latency dependency: " 1525 << Cand.LastPosHighLatParentScheduled << '\n'; 1526 dbgs() << "VGPRUsageDiff: " << Cand.VGPRUsageDiff << '\n'; 1527 dbgs() << '\n'; 1528 ); 1529 1530 Block = Cand.Block; 1531 ReadyBlocks.erase(Best); 1532 return Block; 1533 } 1534 1535 // Tracking of currently alive registers to determine VGPR Usage. 1536 1537 void SIScheduleBlockScheduler::addLiveRegs(std::set<unsigned> &Regs) { 1538 for (unsigned Reg : Regs) { 1539 // For now only track virtual registers. 1540 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1541 continue; 1542 // If not already in the live set, then add it. 1543 (void) LiveRegs.insert(Reg); 1544 } 1545 } 1546 1547 void SIScheduleBlockScheduler::decreaseLiveRegs(SIScheduleBlock *Block, 1548 std::set<unsigned> &Regs) { 1549 for (unsigned Reg : Regs) { 1550 // For now only track virtual registers. 1551 std::set<unsigned>::iterator Pos = LiveRegs.find(Reg); 1552 assert (Pos != LiveRegs.end() && // Reg must be live. 1553 LiveRegsConsumers.find(Reg) != LiveRegsConsumers.end() && 1554 LiveRegsConsumers[Reg] >= 1); 1555 --LiveRegsConsumers[Reg]; 1556 if (LiveRegsConsumers[Reg] == 0) 1557 LiveRegs.erase(Pos); 1558 } 1559 } 1560 1561 void SIScheduleBlockScheduler::releaseBlockSuccs(SIScheduleBlock *Parent) { 1562 for (SIScheduleBlock* Block : Parent->getSuccs()) { 1563 --BlockNumPredsLeft[Block->getID()]; 1564 if (BlockNumPredsLeft[Block->getID()] == 0) { 1565 ReadyBlocks.push_back(Block); 1566 } 1567 // TODO: Improve check. When the dependency between the high latency 1568 // instructions and the instructions of the other blocks are WAR or WAW 1569 // there will be no wait triggered. We would like these cases to not 1570 // update LastPosHighLatencyParentScheduled. 1571 if (Parent->isHighLatencyBlock()) 1572 LastPosHighLatencyParentScheduled[Block->getID()] = NumBlockScheduled; 1573 } 1574 } 1575 1576 void SIScheduleBlockScheduler::blockScheduled(SIScheduleBlock *Block) { 1577 decreaseLiveRegs(Block, Block->getInRegs()); 1578 addLiveRegs(Block->getOutRegs()); 1579 releaseBlockSuccs(Block); 1580 for (std::map<unsigned, unsigned>::iterator RegI = 1581 LiveOutRegsNumUsages[Block->getID()].begin(), 1582 E = LiveOutRegsNumUsages[Block->getID()].end(); RegI != E; ++RegI) { 1583 std::pair<unsigned, unsigned> RegP = *RegI; 1584 if (LiveRegsConsumers.find(RegP.first) == LiveRegsConsumers.end()) 1585 LiveRegsConsumers[RegP.first] = RegP.second; 1586 else { 1587 assert(LiveRegsConsumers[RegP.first] == 0); 1588 LiveRegsConsumers[RegP.first] += RegP.second; 1589 } 1590 } 1591 if (LastPosHighLatencyParentScheduled[Block->getID()] > 1592 (unsigned)LastPosWaitedHighLatency) 1593 LastPosWaitedHighLatency = 1594 LastPosHighLatencyParentScheduled[Block->getID()]; 1595 ++NumBlockScheduled; 1596 } 1597 1598 std::vector<int> 1599 SIScheduleBlockScheduler::checkRegUsageImpact(std::set<unsigned> &InRegs, 1600 std::set<unsigned> &OutRegs) { 1601 std::vector<int> DiffSetPressure; 1602 DiffSetPressure.assign(DAG->getTRI()->getNumRegPressureSets(), 0); 1603 1604 for (unsigned Reg : InRegs) { 1605 // For now only track virtual registers. 1606 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1607 continue; 1608 if (LiveRegsConsumers[Reg] > 1) 1609 continue; 1610 PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg); 1611 for (; PSetI.isValid(); ++PSetI) { 1612 DiffSetPressure[*PSetI] -= PSetI.getWeight(); 1613 } 1614 } 1615 1616 for (unsigned Reg : OutRegs) { 1617 // For now only track virtual registers. 1618 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1619 continue; 1620 PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg); 1621 for (; PSetI.isValid(); ++PSetI) { 1622 DiffSetPressure[*PSetI] += PSetI.getWeight(); 1623 } 1624 } 1625 1626 return DiffSetPressure; 1627 } 1628 1629 // SIScheduler // 1630 1631 struct SIScheduleBlockResult 1632 SIScheduler::scheduleVariant(SISchedulerBlockCreatorVariant BlockVariant, 1633 SISchedulerBlockSchedulerVariant ScheduleVariant) { 1634 SIScheduleBlocks Blocks = BlockCreator.getBlocks(BlockVariant); 1635 SIScheduleBlockScheduler Scheduler(DAG, ScheduleVariant, Blocks); 1636 std::vector<SIScheduleBlock*> ScheduledBlocks; 1637 struct SIScheduleBlockResult Res; 1638 1639 ScheduledBlocks = Scheduler.getBlocks(); 1640 1641 for (unsigned b = 0; b < ScheduledBlocks.size(); ++b) { 1642 SIScheduleBlock *Block = ScheduledBlocks[b]; 1643 std::vector<SUnit*> SUs = Block->getScheduledUnits(); 1644 1645 for (SUnit* SU : SUs) 1646 Res.SUs.push_back(SU->NodeNum); 1647 } 1648 1649 Res.MaxSGPRUsage = Scheduler.getSGPRUsage(); 1650 Res.MaxVGPRUsage = Scheduler.getVGPRUsage(); 1651 return Res; 1652 } 1653 1654 // SIScheduleDAGMI // 1655 1656 SIScheduleDAGMI::SIScheduleDAGMI(MachineSchedContext *C) : 1657 ScheduleDAGMILive(C, make_unique<GenericScheduler>(C)) { 1658 SITII = static_cast<const SIInstrInfo*>(TII); 1659 SITRI = static_cast<const SIRegisterInfo*>(TRI); 1660 1661 VGPRSetID = SITRI->getVGPR32PressureSet(); 1662 SGPRSetID = SITRI->getSGPR32PressureSet(); 1663 } 1664 1665 SIScheduleDAGMI::~SIScheduleDAGMI() { 1666 } 1667 1668 ScheduleDAGInstrs *llvm::createSIMachineScheduler(MachineSchedContext *C) { 1669 return new SIScheduleDAGMI(C); 1670 } 1671 1672 // Code adapted from scheduleDAG.cpp 1673 // Does a topological sort over the SUs. 1674 // Both TopDown and BottomUp 1675 void SIScheduleDAGMI::topologicalSort() { 1676 Topo.InitDAGTopologicalSorting(); 1677 1678 TopDownIndex2SU = std::vector<int>(Topo.begin(), Topo.end()); 1679 BottomUpIndex2SU = std::vector<int>(Topo.rbegin(), Topo.rend()); 1680 } 1681 1682 // Move low latencies further from their user without 1683 // increasing SGPR usage (in general) 1684 // This is to be replaced by a better pass that would 1685 // take into account SGPR usage (based on VGPR Usage 1686 // and the corresponding wavefront count), that would 1687 // try to merge groups of loads if it make sense, etc 1688 void SIScheduleDAGMI::moveLowLatencies() { 1689 unsigned DAGSize = SUnits.size(); 1690 int LastLowLatencyUser = -1; 1691 int LastLowLatencyPos = -1; 1692 1693 for (unsigned i = 0, e = ScheduledSUnits.size(); i != e; ++i) { 1694 SUnit *SU = &SUnits[ScheduledSUnits[i]]; 1695 bool IsLowLatencyUser = false; 1696 unsigned MinPos = 0; 1697 1698 for (SDep& PredDep : SU->Preds) { 1699 SUnit *Pred = PredDep.getSUnit(); 1700 if (SITII->isLowLatencyInstruction(*Pred->getInstr())) { 1701 IsLowLatencyUser = true; 1702 } 1703 if (Pred->NodeNum >= DAGSize) 1704 continue; 1705 unsigned PredPos = ScheduledSUnitsInv[Pred->NodeNum]; 1706 if (PredPos >= MinPos) 1707 MinPos = PredPos + 1; 1708 } 1709 1710 if (SITII->isLowLatencyInstruction(*SU->getInstr())) { 1711 unsigned BestPos = LastLowLatencyUser + 1; 1712 if ((int)BestPos <= LastLowLatencyPos) 1713 BestPos = LastLowLatencyPos + 1; 1714 if (BestPos < MinPos) 1715 BestPos = MinPos; 1716 if (BestPos < i) { 1717 for (unsigned u = i; u > BestPos; --u) { 1718 ++ScheduledSUnitsInv[ScheduledSUnits[u-1]]; 1719 ScheduledSUnits[u] = ScheduledSUnits[u-1]; 1720 } 1721 ScheduledSUnits[BestPos] = SU->NodeNum; 1722 ScheduledSUnitsInv[SU->NodeNum] = BestPos; 1723 } 1724 LastLowLatencyPos = BestPos; 1725 if (IsLowLatencyUser) 1726 LastLowLatencyUser = BestPos; 1727 } else if (IsLowLatencyUser) { 1728 LastLowLatencyUser = i; 1729 // Moves COPY instructions on which depends 1730 // the low latency instructions too. 1731 } else if (SU->getInstr()->getOpcode() == AMDGPU::COPY) { 1732 bool CopyForLowLat = false; 1733 for (SDep& SuccDep : SU->Succs) { 1734 SUnit *Succ = SuccDep.getSUnit(); 1735 if (SITII->isLowLatencyInstruction(*Succ->getInstr())) { 1736 CopyForLowLat = true; 1737 } 1738 } 1739 if (!CopyForLowLat) 1740 continue; 1741 if (MinPos < i) { 1742 for (unsigned u = i; u > MinPos; --u) { 1743 ++ScheduledSUnitsInv[ScheduledSUnits[u-1]]; 1744 ScheduledSUnits[u] = ScheduledSUnits[u-1]; 1745 } 1746 ScheduledSUnits[MinPos] = SU->NodeNum; 1747 ScheduledSUnitsInv[SU->NodeNum] = MinPos; 1748 } 1749 } 1750 } 1751 } 1752 1753 void SIScheduleDAGMI::restoreSULinksLeft() { 1754 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 1755 SUnits[i].isScheduled = false; 1756 SUnits[i].WeakPredsLeft = SUnitsLinksBackup[i].WeakPredsLeft; 1757 SUnits[i].NumPredsLeft = SUnitsLinksBackup[i].NumPredsLeft; 1758 SUnits[i].WeakSuccsLeft = SUnitsLinksBackup[i].WeakSuccsLeft; 1759 SUnits[i].NumSuccsLeft = SUnitsLinksBackup[i].NumSuccsLeft; 1760 } 1761 } 1762 1763 // Return the Vgpr and Sgpr usage corresponding to some virtual registers. 1764 template<typename _Iterator> void 1765 SIScheduleDAGMI::fillVgprSgprCost(_Iterator First, _Iterator End, 1766 unsigned &VgprUsage, unsigned &SgprUsage) { 1767 VgprUsage = 0; 1768 SgprUsage = 0; 1769 for (_Iterator RegI = First; RegI != End; ++RegI) { 1770 unsigned Reg = *RegI; 1771 // For now only track virtual registers 1772 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1773 continue; 1774 PSetIterator PSetI = MRI.getPressureSets(Reg); 1775 for (; PSetI.isValid(); ++PSetI) { 1776 if (*PSetI == VGPRSetID) 1777 VgprUsage += PSetI.getWeight(); 1778 else if (*PSetI == SGPRSetID) 1779 SgprUsage += PSetI.getWeight(); 1780 } 1781 } 1782 } 1783 1784 void SIScheduleDAGMI::schedule() 1785 { 1786 SmallVector<SUnit*, 8> TopRoots, BotRoots; 1787 SIScheduleBlockResult Best, Temp; 1788 DEBUG(dbgs() << "Preparing Scheduling\n"); 1789 1790 buildDAGWithRegPressure(); 1791 DEBUG( 1792 for(SUnit& SU : SUnits) 1793 SU.dumpAll(this) 1794 ); 1795 1796 topologicalSort(); 1797 findRootsAndBiasEdges(TopRoots, BotRoots); 1798 // We reuse several ScheduleDAGMI and ScheduleDAGMILive 1799 // functions, but to make them happy we must initialize 1800 // the default Scheduler implementation (even if we do not 1801 // run it) 1802 SchedImpl->initialize(this); 1803 initQueues(TopRoots, BotRoots); 1804 1805 // Fill some stats to help scheduling. 1806 1807 SUnitsLinksBackup = SUnits; 1808 IsLowLatencySU.clear(); 1809 LowLatencyOffset.clear(); 1810 IsHighLatencySU.clear(); 1811 1812 IsLowLatencySU.resize(SUnits.size(), 0); 1813 LowLatencyOffset.resize(SUnits.size(), 0); 1814 IsHighLatencySU.resize(SUnits.size(), 0); 1815 1816 for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) { 1817 SUnit *SU = &SUnits[i]; 1818 unsigned BaseLatReg; 1819 int64_t OffLatReg; 1820 if (SITII->isLowLatencyInstruction(*SU->getInstr())) { 1821 IsLowLatencySU[i] = 1; 1822 if (SITII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseLatReg, OffLatReg, 1823 TRI)) 1824 LowLatencyOffset[i] = OffLatReg; 1825 } else if (SITII->isHighLatencyInstruction(*SU->getInstr())) 1826 IsHighLatencySU[i] = 1; 1827 } 1828 1829 SIScheduler Scheduler(this); 1830 Best = Scheduler.scheduleVariant(SISchedulerBlockCreatorVariant::LatenciesAlone, 1831 SISchedulerBlockSchedulerVariant::BlockLatencyRegUsage); 1832 1833 // if VGPR usage is extremely high, try other good performing variants 1834 // which could lead to lower VGPR usage 1835 if (Best.MaxVGPRUsage > 180) { 1836 std::vector<std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant>> Variants = { 1837 { LatenciesAlone, BlockRegUsageLatency }, 1838 // { LatenciesAlone, BlockRegUsage }, 1839 { LatenciesGrouped, BlockLatencyRegUsage }, 1840 // { LatenciesGrouped, BlockRegUsageLatency }, 1841 // { LatenciesGrouped, BlockRegUsage }, 1842 { LatenciesAlonePlusConsecutive, BlockLatencyRegUsage }, 1843 // { LatenciesAlonePlusConsecutive, BlockRegUsageLatency }, 1844 // { LatenciesAlonePlusConsecutive, BlockRegUsage } 1845 }; 1846 for (std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant> v : Variants) { 1847 Temp = Scheduler.scheduleVariant(v.first, v.second); 1848 if (Temp.MaxVGPRUsage < Best.MaxVGPRUsage) 1849 Best = Temp; 1850 } 1851 } 1852 // if VGPR usage is still extremely high, we may spill. Try other variants 1853 // which are less performing, but that could lead to lower VGPR usage. 1854 if (Best.MaxVGPRUsage > 200) { 1855 std::vector<std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant>> Variants = { 1856 // { LatenciesAlone, BlockRegUsageLatency }, 1857 { LatenciesAlone, BlockRegUsage }, 1858 // { LatenciesGrouped, BlockLatencyRegUsage }, 1859 { LatenciesGrouped, BlockRegUsageLatency }, 1860 { LatenciesGrouped, BlockRegUsage }, 1861 // { LatenciesAlonePlusConsecutive, BlockLatencyRegUsage }, 1862 { LatenciesAlonePlusConsecutive, BlockRegUsageLatency }, 1863 { LatenciesAlonePlusConsecutive, BlockRegUsage } 1864 }; 1865 for (std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant> v : Variants) { 1866 Temp = Scheduler.scheduleVariant(v.first, v.second); 1867 if (Temp.MaxVGPRUsage < Best.MaxVGPRUsage) 1868 Best = Temp; 1869 } 1870 } 1871 1872 ScheduledSUnits = Best.SUs; 1873 ScheduledSUnitsInv.resize(SUnits.size()); 1874 1875 for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) { 1876 ScheduledSUnitsInv[ScheduledSUnits[i]] = i; 1877 } 1878 1879 moveLowLatencies(); 1880 1881 // Tell the outside world about the result of the scheduling. 1882 1883 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 1884 TopRPTracker.setPos(CurrentTop); 1885 1886 for (std::vector<unsigned>::iterator I = ScheduledSUnits.begin(), 1887 E = ScheduledSUnits.end(); I != E; ++I) { 1888 SUnit *SU = &SUnits[*I]; 1889 1890 scheduleMI(SU, true); 1891 1892 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " 1893 << *SU->getInstr()); 1894 } 1895 1896 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 1897 1898 placeDebugValues(); 1899 1900 DEBUG({ 1901 unsigned BBNum = begin()->getParent()->getNumber(); 1902 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 1903 dumpSchedule(); 1904 dbgs() << '\n'; 1905 }); 1906 } 1907