1 //===- ResourcePriorityQueue.cpp - A DFA-oriented priority queue -*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the ResourcePriorityQueue class, which is a 11 // SchedulingPriorityQueue that prioritizes instructions using DFA state to 12 // reduce the length of the critical path through the basic block 13 // on VLIW platforms. 14 // The scheduler is basically a top-down adaptable list scheduler with DFA 15 // resource tracking added to the cost function. 16 // DFA is queried as a state machine to model "packets/bundles" during 17 // schedule. Currently packets/bundles are discarded at the end of 18 // scheduling, affecting only order of instructions. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #include "llvm/CodeGen/ResourcePriorityQueue.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/CodeGen/SelectionDAGNodes.h" 25 #include "llvm/Support/CommandLine.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include "llvm/Target/TargetLowering.h" 29 #include "llvm/Target/TargetMachine.h" 30 #include "llvm/Target/TargetSubtargetInfo.h" 31 32 using namespace llvm; 33 34 #define DEBUG_TYPE "scheduler" 35 36 static cl::opt<bool> DisableDFASched("disable-dfa-sched", cl::Hidden, 37 cl::ZeroOrMore, cl::init(false), 38 cl::desc("Disable use of DFA during scheduling")); 39 40 static cl::opt<signed> RegPressureThreshold( 41 "dfa-sched-reg-pressure-threshold", cl::Hidden, cl::ZeroOrMore, cl::init(5), 42 cl::desc("Track reg pressure and switch priority to in-depth")); 43 44 ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS) 45 : Picker(this), InstrItins(IS->getTargetLowering() 46 ->getTargetMachine() 47 .getSubtargetImpl() 48 ->getInstrItineraryData()) { 49 const TargetMachine &TM = (*IS->MF).getTarget(); 50 TRI = TM.getSubtargetImpl()->getRegisterInfo(); 51 TLI = IS->getTargetLowering(); 52 TII = TM.getSubtargetImpl()->getInstrInfo(); 53 ResourcesModel = TII->CreateTargetScheduleState(&TM, nullptr); 54 // This hard requirement could be relaxed, but for now 55 // do not let it procede. 56 assert(ResourcesModel && "Unimplemented CreateTargetScheduleState."); 57 58 unsigned NumRC = TRI->getNumRegClasses(); 59 RegLimit.resize(NumRC); 60 RegPressure.resize(NumRC); 61 std::fill(RegLimit.begin(), RegLimit.end(), 0); 62 std::fill(RegPressure.begin(), RegPressure.end(), 0); 63 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 64 E = TRI->regclass_end(); 65 I != E; ++I) 66 RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, *IS->MF); 67 68 ParallelLiveRanges = 0; 69 HorizontalVerticalBalance = 0; 70 } 71 72 unsigned 73 ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) { 74 unsigned NumberDeps = 0; 75 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 76 I != E; ++I) { 77 if (I->isCtrl()) 78 continue; 79 80 SUnit *PredSU = I->getSUnit(); 81 const SDNode *ScegN = PredSU->getNode(); 82 83 if (!ScegN) 84 continue; 85 86 // If value is passed to CopyToReg, it is probably 87 // live outside BB. 88 switch (ScegN->getOpcode()) { 89 default: break; 90 case ISD::TokenFactor: break; 91 case ISD::CopyFromReg: NumberDeps++; break; 92 case ISD::CopyToReg: break; 93 case ISD::INLINEASM: break; 94 } 95 if (!ScegN->isMachineOpcode()) 96 continue; 97 98 for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) { 99 MVT VT = ScegN->getSimpleValueType(i); 100 if (TLI->isTypeLegal(VT) 101 && (TLI->getRegClassFor(VT)->getID() == RCId)) { 102 NumberDeps++; 103 break; 104 } 105 } 106 } 107 return NumberDeps; 108 } 109 110 unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU, 111 unsigned RCId) { 112 unsigned NumberDeps = 0; 113 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 114 I != E; ++I) { 115 if (I->isCtrl()) 116 continue; 117 118 SUnit *SuccSU = I->getSUnit(); 119 const SDNode *ScegN = SuccSU->getNode(); 120 if (!ScegN) 121 continue; 122 123 // If value is passed to CopyToReg, it is probably 124 // live outside BB. 125 switch (ScegN->getOpcode()) { 126 default: break; 127 case ISD::TokenFactor: break; 128 case ISD::CopyFromReg: break; 129 case ISD::CopyToReg: NumberDeps++; break; 130 case ISD::INLINEASM: break; 131 } 132 if (!ScegN->isMachineOpcode()) 133 continue; 134 135 for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) { 136 const SDValue &Op = ScegN->getOperand(i); 137 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); 138 if (TLI->isTypeLegal(VT) 139 && (TLI->getRegClassFor(VT)->getID() == RCId)) { 140 NumberDeps++; 141 break; 142 } 143 } 144 } 145 return NumberDeps; 146 } 147 148 static unsigned numberCtrlDepsInSU(SUnit *SU) { 149 unsigned NumberDeps = 0; 150 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 151 I != E; ++I) 152 if (I->isCtrl()) 153 NumberDeps++; 154 155 return NumberDeps; 156 } 157 158 static unsigned numberCtrlPredInSU(SUnit *SU) { 159 unsigned NumberDeps = 0; 160 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 161 I != E; ++I) 162 if (I->isCtrl()) 163 NumberDeps++; 164 165 return NumberDeps; 166 } 167 168 /// 169 /// Initialize nodes. 170 /// 171 void ResourcePriorityQueue::initNodes(std::vector<SUnit> &sunits) { 172 SUnits = &sunits; 173 NumNodesSolelyBlocking.resize(SUnits->size(), 0); 174 175 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 176 SUnit *SU = &(*SUnits)[i]; 177 initNumRegDefsLeft(SU); 178 SU->NodeQueueId = 0; 179 } 180 } 181 182 /// This heuristic is used if DFA scheduling is not desired 183 /// for some VLIW platform. 184 bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const { 185 // The isScheduleHigh flag allows nodes with wraparound dependencies that 186 // cannot easily be modeled as edges with latencies to be scheduled as 187 // soon as possible in a top-down schedule. 188 if (LHS->isScheduleHigh && !RHS->isScheduleHigh) 189 return false; 190 191 if (!LHS->isScheduleHigh && RHS->isScheduleHigh) 192 return true; 193 194 unsigned LHSNum = LHS->NodeNum; 195 unsigned RHSNum = RHS->NodeNum; 196 197 // The most important heuristic is scheduling the critical path. 198 unsigned LHSLatency = PQ->getLatency(LHSNum); 199 unsigned RHSLatency = PQ->getLatency(RHSNum); 200 if (LHSLatency < RHSLatency) return true; 201 if (LHSLatency > RHSLatency) return false; 202 203 // After that, if two nodes have identical latencies, look to see if one will 204 // unblock more other nodes than the other. 205 unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum); 206 unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum); 207 if (LHSBlocked < RHSBlocked) return true; 208 if (LHSBlocked > RHSBlocked) return false; 209 210 // Finally, just to provide a stable ordering, use the node number as a 211 // deciding factor. 212 return LHSNum < RHSNum; 213 } 214 215 216 /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor 217 /// of SU, return it, otherwise return null. 218 SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) { 219 SUnit *OnlyAvailablePred = nullptr; 220 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 221 I != E; ++I) { 222 SUnit &Pred = *I->getSUnit(); 223 if (!Pred.isScheduled) { 224 // We found an available, but not scheduled, predecessor. If it's the 225 // only one we have found, keep track of it... otherwise give up. 226 if (OnlyAvailablePred && OnlyAvailablePred != &Pred) 227 return nullptr; 228 OnlyAvailablePred = &Pred; 229 } 230 } 231 return OnlyAvailablePred; 232 } 233 234 void ResourcePriorityQueue::push(SUnit *SU) { 235 // Look at all of the successors of this node. Count the number of nodes that 236 // this node is the sole unscheduled node for. 237 unsigned NumNodesBlocking = 0; 238 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 239 I != E; ++I) 240 if (getSingleUnscheduledPred(I->getSUnit()) == SU) 241 ++NumNodesBlocking; 242 243 NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking; 244 Queue.push_back(SU); 245 } 246 247 /// Check if scheduling of this SU is possible 248 /// in the current packet. 249 bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) { 250 if (!SU || !SU->getNode()) 251 return false; 252 253 // If this is a compound instruction, 254 // it is likely to be a call. Do not delay it. 255 if (SU->getNode()->getGluedNode()) 256 return true; 257 258 // First see if the pipeline could receive this instruction 259 // in the current cycle. 260 if (SU->getNode()->isMachineOpcode()) 261 switch (SU->getNode()->getMachineOpcode()) { 262 default: 263 if (!ResourcesModel->canReserveResources(&TII->get( 264 SU->getNode()->getMachineOpcode()))) 265 return false; 266 case TargetOpcode::EXTRACT_SUBREG: 267 case TargetOpcode::INSERT_SUBREG: 268 case TargetOpcode::SUBREG_TO_REG: 269 case TargetOpcode::REG_SEQUENCE: 270 case TargetOpcode::IMPLICIT_DEF: 271 break; 272 } 273 274 // Now see if there are no other dependencies 275 // to instructions alredy in the packet. 276 for (unsigned i = 0, e = Packet.size(); i != e; ++i) 277 for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(), 278 E = Packet[i]->Succs.end(); I != E; ++I) { 279 // Since we do not add pseudos to packets, might as well 280 // ignor order deps. 281 if (I->isCtrl()) 282 continue; 283 284 if (I->getSUnit() == SU) 285 return false; 286 } 287 288 return true; 289 } 290 291 /// Keep track of available resources. 292 void ResourcePriorityQueue::reserveResources(SUnit *SU) { 293 // If this SU does not fit in the packet 294 // start a new one. 295 if (!isResourceAvailable(SU) || SU->getNode()->getGluedNode()) { 296 ResourcesModel->clearResources(); 297 Packet.clear(); 298 } 299 300 if (SU->getNode() && SU->getNode()->isMachineOpcode()) { 301 switch (SU->getNode()->getMachineOpcode()) { 302 default: 303 ResourcesModel->reserveResources(&TII->get( 304 SU->getNode()->getMachineOpcode())); 305 break; 306 case TargetOpcode::EXTRACT_SUBREG: 307 case TargetOpcode::INSERT_SUBREG: 308 case TargetOpcode::SUBREG_TO_REG: 309 case TargetOpcode::REG_SEQUENCE: 310 case TargetOpcode::IMPLICIT_DEF: 311 break; 312 } 313 Packet.push_back(SU); 314 } 315 // Forcefully end packet for PseudoOps. 316 else { 317 ResourcesModel->clearResources(); 318 Packet.clear(); 319 } 320 321 // If packet is now full, reset the state so in the next cycle 322 // we start fresh. 323 if (Packet.size() >= InstrItins->SchedModel->IssueWidth) { 324 ResourcesModel->clearResources(); 325 Packet.clear(); 326 } 327 } 328 329 signed ResourcePriorityQueue::rawRegPressureDelta(SUnit *SU, unsigned RCId) { 330 signed RegBalance = 0; 331 332 if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode()) 333 return RegBalance; 334 335 // Gen estimate. 336 for (unsigned i = 0, e = SU->getNode()->getNumValues(); i != e; ++i) { 337 MVT VT = SU->getNode()->getSimpleValueType(i); 338 if (TLI->isTypeLegal(VT) 339 && TLI->getRegClassFor(VT) 340 && TLI->getRegClassFor(VT)->getID() == RCId) 341 RegBalance += numberRCValSuccInSU(SU, RCId); 342 } 343 // Kill estimate. 344 for (unsigned i = 0, e = SU->getNode()->getNumOperands(); i != e; ++i) { 345 const SDValue &Op = SU->getNode()->getOperand(i); 346 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); 347 if (isa<ConstantSDNode>(Op.getNode())) 348 continue; 349 350 if (TLI->isTypeLegal(VT) && TLI->getRegClassFor(VT) 351 && TLI->getRegClassFor(VT)->getID() == RCId) 352 RegBalance -= numberRCValPredInSU(SU, RCId); 353 } 354 return RegBalance; 355 } 356 357 /// Estimates change in reg pressure from this SU. 358 /// It is achieved by trivial tracking of defined 359 /// and used vregs in dependent instructions. 360 /// The RawPressure flag makes this function to ignore 361 /// existing reg file sizes, and report raw def/use 362 /// balance. 363 signed ResourcePriorityQueue::regPressureDelta(SUnit *SU, bool RawPressure) { 364 signed RegBalance = 0; 365 366 if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode()) 367 return RegBalance; 368 369 if (RawPressure) { 370 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 371 E = TRI->regclass_end(); I != E; ++I) { 372 const TargetRegisterClass *RC = *I; 373 RegBalance += rawRegPressureDelta(SU, RC->getID()); 374 } 375 } 376 else { 377 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 378 E = TRI->regclass_end(); I != E; ++I) { 379 const TargetRegisterClass *RC = *I; 380 if ((RegPressure[RC->getID()] + 381 rawRegPressureDelta(SU, RC->getID()) > 0) && 382 (RegPressure[RC->getID()] + 383 rawRegPressureDelta(SU, RC->getID()) >= RegLimit[RC->getID()])) 384 RegBalance += rawRegPressureDelta(SU, RC->getID()); 385 } 386 } 387 388 return RegBalance; 389 } 390 391 // Constants used to denote relative importance of 392 // heuristic components for cost computation. 393 static const unsigned PriorityOne = 200; 394 static const unsigned PriorityTwo = 50; 395 static const unsigned PriorityThree = 15; 396 static const unsigned PriorityFour = 5; 397 static const unsigned ScaleOne = 20; 398 static const unsigned ScaleTwo = 10; 399 static const unsigned ScaleThree = 5; 400 static const unsigned FactorOne = 2; 401 402 /// Returns single number reflecting benefit of scheduling SU 403 /// in the current cycle. 404 signed ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) { 405 // Initial trivial priority. 406 signed ResCount = 1; 407 408 // Do not waste time on a node that is already scheduled. 409 if (SU->isScheduled) 410 return ResCount; 411 412 // Forced priority is high. 413 if (SU->isScheduleHigh) 414 ResCount += PriorityOne; 415 416 // Adaptable scheduling 417 // A small, but very parallel 418 // region, where reg pressure is an issue. 419 if (HorizontalVerticalBalance > RegPressureThreshold) { 420 // Critical path first 421 ResCount += (SU->getHeight() * ScaleTwo); 422 // If resources are available for it, multiply the 423 // chance of scheduling. 424 if (isResourceAvailable(SU)) 425 ResCount <<= FactorOne; 426 427 // Consider change to reg pressure from scheduling 428 // this SU. 429 ResCount -= (regPressureDelta(SU,true) * ScaleOne); 430 } 431 // Default heuristic, greeady and 432 // critical path driven. 433 else { 434 // Critical path first. 435 ResCount += (SU->getHeight() * ScaleTwo); 436 // Now see how many instructions is blocked by this SU. 437 ResCount += (NumNodesSolelyBlocking[SU->NodeNum] * ScaleTwo); 438 // If resources are available for it, multiply the 439 // chance of scheduling. 440 if (isResourceAvailable(SU)) 441 ResCount <<= FactorOne; 442 443 ResCount -= (regPressureDelta(SU) * ScaleTwo); 444 } 445 446 // These are platform-specific things. 447 // Will need to go into the back end 448 // and accessed from here via a hook. 449 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) { 450 if (N->isMachineOpcode()) { 451 const MCInstrDesc &TID = TII->get(N->getMachineOpcode()); 452 if (TID.isCall()) 453 ResCount += (PriorityTwo + (ScaleThree*N->getNumValues())); 454 } 455 else 456 switch (N->getOpcode()) { 457 default: break; 458 case ISD::TokenFactor: 459 case ISD::CopyFromReg: 460 case ISD::CopyToReg: 461 ResCount += PriorityFour; 462 break; 463 464 case ISD::INLINEASM: 465 ResCount += PriorityThree; 466 break; 467 } 468 } 469 return ResCount; 470 } 471 472 473 /// Main resource tracking point. 474 void ResourcePriorityQueue::scheduledNode(SUnit *SU) { 475 // Use NULL entry as an event marker to reset 476 // the DFA state. 477 if (!SU) { 478 ResourcesModel->clearResources(); 479 Packet.clear(); 480 return; 481 } 482 483 const SDNode *ScegN = SU->getNode(); 484 // Update reg pressure tracking. 485 // First update current node. 486 if (ScegN->isMachineOpcode()) { 487 // Estimate generated regs. 488 for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) { 489 MVT VT = ScegN->getSimpleValueType(i); 490 491 if (TLI->isTypeLegal(VT)) { 492 const TargetRegisterClass *RC = TLI->getRegClassFor(VT); 493 if (RC) 494 RegPressure[RC->getID()] += numberRCValSuccInSU(SU, RC->getID()); 495 } 496 } 497 // Estimate killed regs. 498 for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) { 499 const SDValue &Op = ScegN->getOperand(i); 500 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); 501 502 if (TLI->isTypeLegal(VT)) { 503 const TargetRegisterClass *RC = TLI->getRegClassFor(VT); 504 if (RC) { 505 if (RegPressure[RC->getID()] > 506 (numberRCValPredInSU(SU, RC->getID()))) 507 RegPressure[RC->getID()] -= numberRCValPredInSU(SU, RC->getID()); 508 else RegPressure[RC->getID()] = 0; 509 } 510 } 511 } 512 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 513 I != E; ++I) { 514 if (I->isCtrl() || (I->getSUnit()->NumRegDefsLeft == 0)) 515 continue; 516 --I->getSUnit()->NumRegDefsLeft; 517 } 518 } 519 520 // Reserve resources for this SU. 521 reserveResources(SU); 522 523 // Adjust number of parallel live ranges. 524 // Heuristic is simple - node with no data successors reduces 525 // number of live ranges. All others, increase it. 526 unsigned NumberNonControlDeps = 0; 527 528 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 529 I != E; ++I) { 530 adjustPriorityOfUnscheduledPreds(I->getSUnit()); 531 if (!I->isCtrl()) 532 NumberNonControlDeps++; 533 } 534 535 if (!NumberNonControlDeps) { 536 if (ParallelLiveRanges >= SU->NumPreds) 537 ParallelLiveRanges -= SU->NumPreds; 538 else 539 ParallelLiveRanges = 0; 540 541 } 542 else 543 ParallelLiveRanges += SU->NumRegDefsLeft; 544 545 // Track parallel live chains. 546 HorizontalVerticalBalance += (SU->Succs.size() - numberCtrlDepsInSU(SU)); 547 HorizontalVerticalBalance -= (SU->Preds.size() - numberCtrlPredInSU(SU)); 548 } 549 550 void ResourcePriorityQueue::initNumRegDefsLeft(SUnit *SU) { 551 unsigned NodeNumDefs = 0; 552 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) 553 if (N->isMachineOpcode()) { 554 const MCInstrDesc &TID = TII->get(N->getMachineOpcode()); 555 // No register need be allocated for this. 556 if (N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { 557 NodeNumDefs = 0; 558 break; 559 } 560 NodeNumDefs = std::min(N->getNumValues(), TID.getNumDefs()); 561 } 562 else 563 switch(N->getOpcode()) { 564 default: break; 565 case ISD::CopyFromReg: 566 NodeNumDefs++; 567 break; 568 case ISD::INLINEASM: 569 NodeNumDefs++; 570 break; 571 } 572 573 SU->NumRegDefsLeft = NodeNumDefs; 574 } 575 576 /// adjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just 577 /// scheduled. If SU is not itself available, then there is at least one 578 /// predecessor node that has not been scheduled yet. If SU has exactly ONE 579 /// unscheduled predecessor, we want to increase its priority: it getting 580 /// scheduled will make this node available, so it is better than some other 581 /// node of the same priority that will not make a node available. 582 void ResourcePriorityQueue::adjustPriorityOfUnscheduledPreds(SUnit *SU) { 583 if (SU->isAvailable) return; // All preds scheduled. 584 585 SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU); 586 if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable) 587 return; 588 589 // Okay, we found a single predecessor that is available, but not scheduled. 590 // Since it is available, it must be in the priority queue. First remove it. 591 remove(OnlyAvailablePred); 592 593 // Reinsert the node into the priority queue, which recomputes its 594 // NumNodesSolelyBlocking value. 595 push(OnlyAvailablePred); 596 } 597 598 599 /// Main access point - returns next instructions 600 /// to be placed in scheduling sequence. 601 SUnit *ResourcePriorityQueue::pop() { 602 if (empty()) 603 return nullptr; 604 605 std::vector<SUnit *>::iterator Best = Queue.begin(); 606 if (!DisableDFASched) { 607 signed BestCost = SUSchedulingCost(*Best); 608 for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()), 609 E = Queue.end(); I != E; ++I) { 610 611 if (SUSchedulingCost(*I) > BestCost) { 612 BestCost = SUSchedulingCost(*I); 613 Best = I; 614 } 615 } 616 } 617 // Use default TD scheduling mechanism. 618 else { 619 for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()), 620 E = Queue.end(); I != E; ++I) 621 if (Picker(*Best, *I)) 622 Best = I; 623 } 624 625 SUnit *V = *Best; 626 if (Best != std::prev(Queue.end())) 627 std::swap(*Best, Queue.back()); 628 629 Queue.pop_back(); 630 631 return V; 632 } 633 634 635 void ResourcePriorityQueue::remove(SUnit *SU) { 636 assert(!Queue.empty() && "Queue is empty!"); 637 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU); 638 if (I != std::prev(Queue.end())) 639 std::swap(*I, Queue.back()); 640 641 Queue.pop_back(); 642 } 643 644 645 #ifdef NDEBUG 646 void ResourcePriorityQueue::dump(ScheduleDAG *DAG) const {} 647 #else 648 void ResourcePriorityQueue::dump(ScheduleDAG *DAG) const { 649 ResourcePriorityQueue q = *this; 650 while (!q.empty()) { 651 SUnit *su = q.pop(); 652 dbgs() << "Height " << su->getHeight() << ": "; 653 su->dump(DAG); 654 } 655 } 656 #endif 657