1 //===- MachinePipeliner.cpp - Machine Software Pipeliner Pass -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // An implementation of the Swing Modulo Scheduling (SMS) software pipeliner. 10 // 11 // This SMS implementation is a target-independent back-end pass. When enabled, 12 // the pass runs just prior to the register allocation pass, while the machine 13 // IR is in SSA form. If software pipelining is successful, then the original 14 // loop is replaced by the optimized loop. The optimized loop contains one or 15 // more prolog blocks, the pipelined kernel, and one or more epilog blocks. If 16 // the instructions cannot be scheduled in a given MII, we increase the MII by 17 // one and try again. 18 // 19 // The SMS implementation is an extension of the ScheduleDAGInstrs class. We 20 // represent loop carried dependences in the DAG as order edges to the Phi 21 // nodes. We also perform several passes over the DAG to eliminate unnecessary 22 // edges that inhibit the ability to pipeline. The implementation uses the 23 // DFAPacketizer class to compute the minimum initiation interval and the check 24 // where an instruction may be inserted in the pipelined schedule. 25 // 26 // In order for the SMS pass to work, several target specific hooks need to be 27 // implemented to get information about the loop structure and to rewrite 28 // instructions. 29 // 30 //===----------------------------------------------------------------------===// 31 32 #include "llvm/ADT/ArrayRef.h" 33 #include "llvm/ADT/BitVector.h" 34 #include "llvm/ADT/DenseMap.h" 35 #include "llvm/ADT/MapVector.h" 36 #include "llvm/ADT/PriorityQueue.h" 37 #include "llvm/ADT/SetVector.h" 38 #include "llvm/ADT/SmallPtrSet.h" 39 #include "llvm/ADT/SmallSet.h" 40 #include "llvm/ADT/SmallVector.h" 41 #include "llvm/ADT/Statistic.h" 42 #include "llvm/ADT/iterator_range.h" 43 #include "llvm/Analysis/AliasAnalysis.h" 44 #include "llvm/Analysis/MemoryLocation.h" 45 #include "llvm/Analysis/ValueTracking.h" 46 #include "llvm/CodeGen/DFAPacketizer.h" 47 #include "llvm/CodeGen/LiveIntervals.h" 48 #include "llvm/CodeGen/MachineBasicBlock.h" 49 #include "llvm/CodeGen/MachineDominators.h" 50 #include "llvm/CodeGen/MachineFunction.h" 51 #include "llvm/CodeGen/MachineFunctionPass.h" 52 #include "llvm/CodeGen/MachineInstr.h" 53 #include "llvm/CodeGen/MachineInstrBuilder.h" 54 #include "llvm/CodeGen/MachineLoopInfo.h" 55 #include "llvm/CodeGen/MachineMemOperand.h" 56 #include "llvm/CodeGen/MachineOperand.h" 57 #include "llvm/CodeGen/MachinePipeliner.h" 58 #include "llvm/CodeGen/MachineRegisterInfo.h" 59 #include "llvm/CodeGen/RegisterPressure.h" 60 #include "llvm/CodeGen/ScheduleDAG.h" 61 #include "llvm/CodeGen/ScheduleDAGMutation.h" 62 #include "llvm/CodeGen/TargetOpcodes.h" 63 #include "llvm/CodeGen/TargetRegisterInfo.h" 64 #include "llvm/CodeGen/TargetSubtargetInfo.h" 65 #include "llvm/Config/llvm-config.h" 66 #include "llvm/IR/Attributes.h" 67 #include "llvm/IR/DebugLoc.h" 68 #include "llvm/IR/Function.h" 69 #include "llvm/MC/LaneBitmask.h" 70 #include "llvm/MC/MCInstrDesc.h" 71 #include "llvm/MC/MCInstrItineraries.h" 72 #include "llvm/MC/MCRegisterInfo.h" 73 #include "llvm/Pass.h" 74 #include "llvm/Support/CommandLine.h" 75 #include "llvm/Support/Compiler.h" 76 #include "llvm/Support/Debug.h" 77 #include "llvm/Support/MathExtras.h" 78 #include "llvm/Support/raw_ostream.h" 79 #include <algorithm> 80 #include <cassert> 81 #include <climits> 82 #include <cstdint> 83 #include <deque> 84 #include <functional> 85 #include <iterator> 86 #include <map> 87 #include <memory> 88 #include <tuple> 89 #include <utility> 90 #include <vector> 91 92 using namespace llvm; 93 94 #define DEBUG_TYPE "pipeliner" 95 96 STATISTIC(NumTrytoPipeline, "Number of loops that we attempt to pipeline"); 97 STATISTIC(NumPipelined, "Number of loops software pipelined"); 98 STATISTIC(NumNodeOrderIssues, "Number of node order issues found"); 99 100 /// A command line option to turn software pipelining on or off. 101 static cl::opt<bool> EnableSWP("enable-pipeliner", cl::Hidden, cl::init(true), 102 cl::ZeroOrMore, 103 cl::desc("Enable Software Pipelining")); 104 105 /// A command line option to enable SWP at -Os. 106 static cl::opt<bool> EnableSWPOptSize("enable-pipeliner-opt-size", 107 cl::desc("Enable SWP at Os."), cl::Hidden, 108 cl::init(false)); 109 110 /// A command line argument to limit minimum initial interval for pipelining. 111 static cl::opt<int> SwpMaxMii("pipeliner-max-mii", 112 cl::desc("Size limit for the MII."), 113 cl::Hidden, cl::init(27)); 114 115 /// A command line argument to limit the number of stages in the pipeline. 116 static cl::opt<int> 117 SwpMaxStages("pipeliner-max-stages", 118 cl::desc("Maximum stages allowed in the generated scheduled."), 119 cl::Hidden, cl::init(3)); 120 121 /// A command line option to disable the pruning of chain dependences due to 122 /// an unrelated Phi. 123 static cl::opt<bool> 124 SwpPruneDeps("pipeliner-prune-deps", 125 cl::desc("Prune dependences between unrelated Phi nodes."), 126 cl::Hidden, cl::init(true)); 127 128 /// A command line option to disable the pruning of loop carried order 129 /// dependences. 130 static cl::opt<bool> 131 SwpPruneLoopCarried("pipeliner-prune-loop-carried", 132 cl::desc("Prune loop carried order dependences."), 133 cl::Hidden, cl::init(true)); 134 135 #ifndef NDEBUG 136 static cl::opt<int> SwpLoopLimit("pipeliner-max", cl::Hidden, cl::init(-1)); 137 #endif 138 139 static cl::opt<bool> SwpIgnoreRecMII("pipeliner-ignore-recmii", 140 cl::ReallyHidden, cl::init(false), 141 cl::ZeroOrMore, cl::desc("Ignore RecMII")); 142 143 namespace llvm { 144 145 // A command line option to enable the CopyToPhi DAG mutation. 146 cl::opt<bool> 147 SwpEnableCopyToPhi("pipeliner-enable-copytophi", cl::ReallyHidden, 148 cl::init(true), cl::ZeroOrMore, 149 cl::desc("Enable CopyToPhi DAG Mutation")); 150 151 } // end namespace llvm 152 153 unsigned SwingSchedulerDAG::Circuits::MaxPaths = 5; 154 char MachinePipeliner::ID = 0; 155 #ifndef NDEBUG 156 int MachinePipeliner::NumTries = 0; 157 #endif 158 char &llvm::MachinePipelinerID = MachinePipeliner::ID; 159 160 INITIALIZE_PASS_BEGIN(MachinePipeliner, DEBUG_TYPE, 161 "Modulo Software Pipelining", false, false) 162 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 163 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 164 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 165 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 166 INITIALIZE_PASS_END(MachinePipeliner, DEBUG_TYPE, 167 "Modulo Software Pipelining", false, false) 168 169 /// The "main" function for implementing Swing Modulo Scheduling. 170 bool MachinePipeliner::runOnMachineFunction(MachineFunction &mf) { 171 if (skipFunction(mf.getFunction())) 172 return false; 173 174 if (!EnableSWP) 175 return false; 176 177 if (mf.getFunction().getAttributes().hasAttribute( 178 AttributeList::FunctionIndex, Attribute::OptimizeForSize) && 179 !EnableSWPOptSize.getPosition()) 180 return false; 181 182 MF = &mf; 183 MLI = &getAnalysis<MachineLoopInfo>(); 184 MDT = &getAnalysis<MachineDominatorTree>(); 185 TII = MF->getSubtarget().getInstrInfo(); 186 RegClassInfo.runOnMachineFunction(*MF); 187 188 for (auto &L : *MLI) 189 scheduleLoop(*L); 190 191 return false; 192 } 193 194 /// Attempt to perform the SMS algorithm on the specified loop. This function is 195 /// the main entry point for the algorithm. The function identifies candidate 196 /// loops, calculates the minimum initiation interval, and attempts to schedule 197 /// the loop. 198 bool MachinePipeliner::scheduleLoop(MachineLoop &L) { 199 bool Changed = false; 200 for (auto &InnerLoop : L) 201 Changed |= scheduleLoop(*InnerLoop); 202 203 #ifndef NDEBUG 204 // Stop trying after reaching the limit (if any). 205 int Limit = SwpLoopLimit; 206 if (Limit >= 0) { 207 if (NumTries >= SwpLoopLimit) 208 return Changed; 209 NumTries++; 210 } 211 #endif 212 213 if (!canPipelineLoop(L)) 214 return Changed; 215 216 ++NumTrytoPipeline; 217 218 Changed = swingModuloScheduler(L); 219 220 return Changed; 221 } 222 223 /// Return true if the loop can be software pipelined. The algorithm is 224 /// restricted to loops with a single basic block. Make sure that the 225 /// branch in the loop can be analyzed. 226 bool MachinePipeliner::canPipelineLoop(MachineLoop &L) { 227 if (L.getNumBlocks() != 1) 228 return false; 229 230 // Check if the branch can't be understood because we can't do pipelining 231 // if that's the case. 232 LI.TBB = nullptr; 233 LI.FBB = nullptr; 234 LI.BrCond.clear(); 235 if (TII->analyzeBranch(*L.getHeader(), LI.TBB, LI.FBB, LI.BrCond)) 236 return false; 237 238 LI.LoopInductionVar = nullptr; 239 LI.LoopCompare = nullptr; 240 if (TII->analyzeLoop(L, LI.LoopInductionVar, LI.LoopCompare)) 241 return false; 242 243 if (!L.getLoopPreheader()) 244 return false; 245 246 // Remove any subregisters from inputs to phi nodes. 247 preprocessPhiNodes(*L.getHeader()); 248 return true; 249 } 250 251 void MachinePipeliner::preprocessPhiNodes(MachineBasicBlock &B) { 252 MachineRegisterInfo &MRI = MF->getRegInfo(); 253 SlotIndexes &Slots = *getAnalysis<LiveIntervals>().getSlotIndexes(); 254 255 for (MachineInstr &PI : make_range(B.begin(), B.getFirstNonPHI())) { 256 MachineOperand &DefOp = PI.getOperand(0); 257 assert(DefOp.getSubReg() == 0); 258 auto *RC = MRI.getRegClass(DefOp.getReg()); 259 260 for (unsigned i = 1, n = PI.getNumOperands(); i != n; i += 2) { 261 MachineOperand &RegOp = PI.getOperand(i); 262 if (RegOp.getSubReg() == 0) 263 continue; 264 265 // If the operand uses a subregister, replace it with a new register 266 // without subregisters, and generate a copy to the new register. 267 unsigned NewReg = MRI.createVirtualRegister(RC); 268 MachineBasicBlock &PredB = *PI.getOperand(i+1).getMBB(); 269 MachineBasicBlock::iterator At = PredB.getFirstTerminator(); 270 const DebugLoc &DL = PredB.findDebugLoc(At); 271 auto Copy = BuildMI(PredB, At, DL, TII->get(TargetOpcode::COPY), NewReg) 272 .addReg(RegOp.getReg(), getRegState(RegOp), 273 RegOp.getSubReg()); 274 Slots.insertMachineInstrInMaps(*Copy); 275 RegOp.setReg(NewReg); 276 RegOp.setSubReg(0); 277 } 278 } 279 } 280 281 /// The SMS algorithm consists of the following main steps: 282 /// 1. Computation and analysis of the dependence graph. 283 /// 2. Ordering of the nodes (instructions). 284 /// 3. Attempt to Schedule the loop. 285 bool MachinePipeliner::swingModuloScheduler(MachineLoop &L) { 286 assert(L.getBlocks().size() == 1 && "SMS works on single blocks only."); 287 288 SwingSchedulerDAG SMS(*this, L, getAnalysis<LiveIntervals>(), RegClassInfo); 289 290 MachineBasicBlock *MBB = L.getHeader(); 291 // The kernel should not include any terminator instructions. These 292 // will be added back later. 293 SMS.startBlock(MBB); 294 295 // Compute the number of 'real' instructions in the basic block by 296 // ignoring terminators. 297 unsigned size = MBB->size(); 298 for (MachineBasicBlock::iterator I = MBB->getFirstTerminator(), 299 E = MBB->instr_end(); 300 I != E; ++I, --size) 301 ; 302 303 SMS.enterRegion(MBB, MBB->begin(), MBB->getFirstTerminator(), size); 304 SMS.schedule(); 305 SMS.exitRegion(); 306 307 SMS.finishBlock(); 308 return SMS.hasNewSchedule(); 309 } 310 311 /// We override the schedule function in ScheduleDAGInstrs to implement the 312 /// scheduling part of the Swing Modulo Scheduling algorithm. 313 void SwingSchedulerDAG::schedule() { 314 AliasAnalysis *AA = &Pass.getAnalysis<AAResultsWrapperPass>().getAAResults(); 315 buildSchedGraph(AA); 316 addLoopCarriedDependences(AA); 317 updatePhiDependences(); 318 Topo.InitDAGTopologicalSorting(); 319 changeDependences(); 320 postprocessDAG(); 321 LLVM_DEBUG(dump()); 322 323 NodeSetType NodeSets; 324 findCircuits(NodeSets); 325 NodeSetType Circuits = NodeSets; 326 327 // Calculate the MII. 328 unsigned ResMII = calculateResMII(); 329 unsigned RecMII = calculateRecMII(NodeSets); 330 331 fuseRecs(NodeSets); 332 333 // This flag is used for testing and can cause correctness problems. 334 if (SwpIgnoreRecMII) 335 RecMII = 0; 336 337 MII = std::max(ResMII, RecMII); 338 LLVM_DEBUG(dbgs() << "MII = " << MII << " (rec=" << RecMII 339 << ", res=" << ResMII << ")\n"); 340 341 // Can't schedule a loop without a valid MII. 342 if (MII == 0) 343 return; 344 345 // Don't pipeline large loops. 346 if (SwpMaxMii != -1 && (int)MII > SwpMaxMii) 347 return; 348 349 computeNodeFunctions(NodeSets); 350 351 registerPressureFilter(NodeSets); 352 353 colocateNodeSets(NodeSets); 354 355 checkNodeSets(NodeSets); 356 357 LLVM_DEBUG({ 358 for (auto &I : NodeSets) { 359 dbgs() << " Rec NodeSet "; 360 I.dump(); 361 } 362 }); 363 364 std::stable_sort(NodeSets.begin(), NodeSets.end(), std::greater<NodeSet>()); 365 366 groupRemainingNodes(NodeSets); 367 368 removeDuplicateNodes(NodeSets); 369 370 LLVM_DEBUG({ 371 for (auto &I : NodeSets) { 372 dbgs() << " NodeSet "; 373 I.dump(); 374 } 375 }); 376 377 computeNodeOrder(NodeSets); 378 379 // check for node order issues 380 checkValidNodeOrder(Circuits); 381 382 SMSchedule Schedule(Pass.MF); 383 Scheduled = schedulePipeline(Schedule); 384 385 if (!Scheduled) 386 return; 387 388 unsigned numStages = Schedule.getMaxStageCount(); 389 // No need to generate pipeline if there are no overlapped iterations. 390 if (numStages == 0) 391 return; 392 393 // Check that the maximum stage count is less than user-defined limit. 394 if (SwpMaxStages > -1 && (int)numStages > SwpMaxStages) 395 return; 396 397 generatePipelinedLoop(Schedule); 398 ++NumPipelined; 399 } 400 401 /// Clean up after the software pipeliner runs. 402 void SwingSchedulerDAG::finishBlock() { 403 for (MachineInstr *I : NewMIs) 404 MF.DeleteMachineInstr(I); 405 NewMIs.clear(); 406 407 // Call the superclass. 408 ScheduleDAGInstrs::finishBlock(); 409 } 410 411 /// Return the register values for the operands of a Phi instruction. 412 /// This function assume the instruction is a Phi. 413 static void getPhiRegs(MachineInstr &Phi, MachineBasicBlock *Loop, 414 unsigned &InitVal, unsigned &LoopVal) { 415 assert(Phi.isPHI() && "Expecting a Phi."); 416 417 InitVal = 0; 418 LoopVal = 0; 419 for (unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2) 420 if (Phi.getOperand(i + 1).getMBB() != Loop) 421 InitVal = Phi.getOperand(i).getReg(); 422 else 423 LoopVal = Phi.getOperand(i).getReg(); 424 425 assert(InitVal != 0 && LoopVal != 0 && "Unexpected Phi structure."); 426 } 427 428 /// Return the Phi register value that comes from the incoming block. 429 static unsigned getInitPhiReg(MachineInstr &Phi, MachineBasicBlock *LoopBB) { 430 for (unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2) 431 if (Phi.getOperand(i + 1).getMBB() != LoopBB) 432 return Phi.getOperand(i).getReg(); 433 return 0; 434 } 435 436 /// Return the Phi register value that comes the loop block. 437 static unsigned getLoopPhiReg(MachineInstr &Phi, MachineBasicBlock *LoopBB) { 438 for (unsigned i = 1, e = Phi.getNumOperands(); i != e; i += 2) 439 if (Phi.getOperand(i + 1).getMBB() == LoopBB) 440 return Phi.getOperand(i).getReg(); 441 return 0; 442 } 443 444 /// Return true if SUb can be reached from SUa following the chain edges. 445 static bool isSuccOrder(SUnit *SUa, SUnit *SUb) { 446 SmallPtrSet<SUnit *, 8> Visited; 447 SmallVector<SUnit *, 8> Worklist; 448 Worklist.push_back(SUa); 449 while (!Worklist.empty()) { 450 const SUnit *SU = Worklist.pop_back_val(); 451 for (auto &SI : SU->Succs) { 452 SUnit *SuccSU = SI.getSUnit(); 453 if (SI.getKind() == SDep::Order) { 454 if (Visited.count(SuccSU)) 455 continue; 456 if (SuccSU == SUb) 457 return true; 458 Worklist.push_back(SuccSU); 459 Visited.insert(SuccSU); 460 } 461 } 462 } 463 return false; 464 } 465 466 /// Return true if the instruction causes a chain between memory 467 /// references before and after it. 468 static bool isDependenceBarrier(MachineInstr &MI, AliasAnalysis *AA) { 469 return MI.isCall() || MI.hasUnmodeledSideEffects() || 470 (MI.hasOrderedMemoryRef() && 471 (!MI.mayLoad() || !MI.isDereferenceableInvariantLoad(AA))); 472 } 473 474 /// Return the underlying objects for the memory references of an instruction. 475 /// This function calls the code in ValueTracking, but first checks that the 476 /// instruction has a memory operand. 477 static void getUnderlyingObjects(MachineInstr *MI, 478 SmallVectorImpl<Value *> &Objs, 479 const DataLayout &DL) { 480 if (!MI->hasOneMemOperand()) 481 return; 482 MachineMemOperand *MM = *MI->memoperands_begin(); 483 if (!MM->getValue()) 484 return; 485 GetUnderlyingObjects(const_cast<Value *>(MM->getValue()), Objs, DL); 486 for (Value *V : Objs) { 487 if (!isIdentifiedObject(V)) { 488 Objs.clear(); 489 return; 490 } 491 Objs.push_back(V); 492 } 493 } 494 495 /// Add a chain edge between a load and store if the store can be an 496 /// alias of the load on a subsequent iteration, i.e., a loop carried 497 /// dependence. This code is very similar to the code in ScheduleDAGInstrs 498 /// but that code doesn't create loop carried dependences. 499 void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) { 500 MapVector<Value *, SmallVector<SUnit *, 4>> PendingLoads; 501 Value *UnknownValue = 502 UndefValue::get(Type::getVoidTy(MF.getFunction().getContext())); 503 for (auto &SU : SUnits) { 504 MachineInstr &MI = *SU.getInstr(); 505 if (isDependenceBarrier(MI, AA)) 506 PendingLoads.clear(); 507 else if (MI.mayLoad()) { 508 SmallVector<Value *, 4> Objs; 509 getUnderlyingObjects(&MI, Objs, MF.getDataLayout()); 510 if (Objs.empty()) 511 Objs.push_back(UnknownValue); 512 for (auto V : Objs) { 513 SmallVector<SUnit *, 4> &SUs = PendingLoads[V]; 514 SUs.push_back(&SU); 515 } 516 } else if (MI.mayStore()) { 517 SmallVector<Value *, 4> Objs; 518 getUnderlyingObjects(&MI, Objs, MF.getDataLayout()); 519 if (Objs.empty()) 520 Objs.push_back(UnknownValue); 521 for (auto V : Objs) { 522 MapVector<Value *, SmallVector<SUnit *, 4>>::iterator I = 523 PendingLoads.find(V); 524 if (I == PendingLoads.end()) 525 continue; 526 for (auto Load : I->second) { 527 if (isSuccOrder(Load, &SU)) 528 continue; 529 MachineInstr &LdMI = *Load->getInstr(); 530 // First, perform the cheaper check that compares the base register. 531 // If they are the same and the load offset is less than the store 532 // offset, then mark the dependence as loop carried potentially. 533 MachineOperand *BaseOp1, *BaseOp2; 534 int64_t Offset1, Offset2; 535 if (TII->getMemOperandWithOffset(LdMI, BaseOp1, Offset1, TRI) && 536 TII->getMemOperandWithOffset(MI, BaseOp2, Offset2, TRI)) { 537 if (BaseOp1->isIdenticalTo(*BaseOp2) && 538 (int)Offset1 < (int)Offset2) { 539 assert(TII->areMemAccessesTriviallyDisjoint(LdMI, MI, AA) && 540 "What happened to the chain edge?"); 541 SDep Dep(Load, SDep::Barrier); 542 Dep.setLatency(1); 543 SU.addPred(Dep); 544 continue; 545 } 546 } 547 // Second, the more expensive check that uses alias analysis on the 548 // base registers. If they alias, and the load offset is less than 549 // the store offset, the mark the dependence as loop carried. 550 if (!AA) { 551 SDep Dep(Load, SDep::Barrier); 552 Dep.setLatency(1); 553 SU.addPred(Dep); 554 continue; 555 } 556 MachineMemOperand *MMO1 = *LdMI.memoperands_begin(); 557 MachineMemOperand *MMO2 = *MI.memoperands_begin(); 558 if (!MMO1->getValue() || !MMO2->getValue()) { 559 SDep Dep(Load, SDep::Barrier); 560 Dep.setLatency(1); 561 SU.addPred(Dep); 562 continue; 563 } 564 if (MMO1->getValue() == MMO2->getValue() && 565 MMO1->getOffset() <= MMO2->getOffset()) { 566 SDep Dep(Load, SDep::Barrier); 567 Dep.setLatency(1); 568 SU.addPred(Dep); 569 continue; 570 } 571 AliasResult AAResult = AA->alias( 572 MemoryLocation(MMO1->getValue(), LocationSize::unknown(), 573 MMO1->getAAInfo()), 574 MemoryLocation(MMO2->getValue(), LocationSize::unknown(), 575 MMO2->getAAInfo())); 576 577 if (AAResult != NoAlias) { 578 SDep Dep(Load, SDep::Barrier); 579 Dep.setLatency(1); 580 SU.addPred(Dep); 581 } 582 } 583 } 584 } 585 } 586 } 587 588 /// Update the phi dependences to the DAG because ScheduleDAGInstrs no longer 589 /// processes dependences for PHIs. This function adds true dependences 590 /// from a PHI to a use, and a loop carried dependence from the use to the 591 /// PHI. The loop carried dependence is represented as an anti dependence 592 /// edge. This function also removes chain dependences between unrelated 593 /// PHIs. 594 void SwingSchedulerDAG::updatePhiDependences() { 595 SmallVector<SDep, 4> RemoveDeps; 596 const TargetSubtargetInfo &ST = MF.getSubtarget<TargetSubtargetInfo>(); 597 598 // Iterate over each DAG node. 599 for (SUnit &I : SUnits) { 600 RemoveDeps.clear(); 601 // Set to true if the instruction has an operand defined by a Phi. 602 unsigned HasPhiUse = 0; 603 unsigned HasPhiDef = 0; 604 MachineInstr *MI = I.getInstr(); 605 // Iterate over each operand, and we process the definitions. 606 for (MachineInstr::mop_iterator MOI = MI->operands_begin(), 607 MOE = MI->operands_end(); 608 MOI != MOE; ++MOI) { 609 if (!MOI->isReg()) 610 continue; 611 unsigned Reg = MOI->getReg(); 612 if (MOI->isDef()) { 613 // If the register is used by a Phi, then create an anti dependence. 614 for (MachineRegisterInfo::use_instr_iterator 615 UI = MRI.use_instr_begin(Reg), 616 UE = MRI.use_instr_end(); 617 UI != UE; ++UI) { 618 MachineInstr *UseMI = &*UI; 619 SUnit *SU = getSUnit(UseMI); 620 if (SU != nullptr && UseMI->isPHI()) { 621 if (!MI->isPHI()) { 622 SDep Dep(SU, SDep::Anti, Reg); 623 Dep.setLatency(1); 624 I.addPred(Dep); 625 } else { 626 HasPhiDef = Reg; 627 // Add a chain edge to a dependent Phi that isn't an existing 628 // predecessor. 629 if (SU->NodeNum < I.NodeNum && !I.isPred(SU)) 630 I.addPred(SDep(SU, SDep::Barrier)); 631 } 632 } 633 } 634 } else if (MOI->isUse()) { 635 // If the register is defined by a Phi, then create a true dependence. 636 MachineInstr *DefMI = MRI.getUniqueVRegDef(Reg); 637 if (DefMI == nullptr) 638 continue; 639 SUnit *SU = getSUnit(DefMI); 640 if (SU != nullptr && DefMI->isPHI()) { 641 if (!MI->isPHI()) { 642 SDep Dep(SU, SDep::Data, Reg); 643 Dep.setLatency(0); 644 ST.adjustSchedDependency(SU, &I, Dep); 645 I.addPred(Dep); 646 } else { 647 HasPhiUse = Reg; 648 // Add a chain edge to a dependent Phi that isn't an existing 649 // predecessor. 650 if (SU->NodeNum < I.NodeNum && !I.isPred(SU)) 651 I.addPred(SDep(SU, SDep::Barrier)); 652 } 653 } 654 } 655 } 656 // Remove order dependences from an unrelated Phi. 657 if (!SwpPruneDeps) 658 continue; 659 for (auto &PI : I.Preds) { 660 MachineInstr *PMI = PI.getSUnit()->getInstr(); 661 if (PMI->isPHI() && PI.getKind() == SDep::Order) { 662 if (I.getInstr()->isPHI()) { 663 if (PMI->getOperand(0).getReg() == HasPhiUse) 664 continue; 665 if (getLoopPhiReg(*PMI, PMI->getParent()) == HasPhiDef) 666 continue; 667 } 668 RemoveDeps.push_back(PI); 669 } 670 } 671 for (int i = 0, e = RemoveDeps.size(); i != e; ++i) 672 I.removePred(RemoveDeps[i]); 673 } 674 } 675 676 /// Iterate over each DAG node and see if we can change any dependences 677 /// in order to reduce the recurrence MII. 678 void SwingSchedulerDAG::changeDependences() { 679 // See if an instruction can use a value from the previous iteration. 680 // If so, we update the base and offset of the instruction and change 681 // the dependences. 682 for (SUnit &I : SUnits) { 683 unsigned BasePos = 0, OffsetPos = 0, NewBase = 0; 684 int64_t NewOffset = 0; 685 if (!canUseLastOffsetValue(I.getInstr(), BasePos, OffsetPos, NewBase, 686 NewOffset)) 687 continue; 688 689 // Get the MI and SUnit for the instruction that defines the original base. 690 unsigned OrigBase = I.getInstr()->getOperand(BasePos).getReg(); 691 MachineInstr *DefMI = MRI.getUniqueVRegDef(OrigBase); 692 if (!DefMI) 693 continue; 694 SUnit *DefSU = getSUnit(DefMI); 695 if (!DefSU) 696 continue; 697 // Get the MI and SUnit for the instruction that defins the new base. 698 MachineInstr *LastMI = MRI.getUniqueVRegDef(NewBase); 699 if (!LastMI) 700 continue; 701 SUnit *LastSU = getSUnit(LastMI); 702 if (!LastSU) 703 continue; 704 705 if (Topo.IsReachable(&I, LastSU)) 706 continue; 707 708 // Remove the dependence. The value now depends on a prior iteration. 709 SmallVector<SDep, 4> Deps; 710 for (SUnit::pred_iterator P = I.Preds.begin(), E = I.Preds.end(); P != E; 711 ++P) 712 if (P->getSUnit() == DefSU) 713 Deps.push_back(*P); 714 for (int i = 0, e = Deps.size(); i != e; i++) { 715 Topo.RemovePred(&I, Deps[i].getSUnit()); 716 I.removePred(Deps[i]); 717 } 718 // Remove the chain dependence between the instructions. 719 Deps.clear(); 720 for (auto &P : LastSU->Preds) 721 if (P.getSUnit() == &I && P.getKind() == SDep::Order) 722 Deps.push_back(P); 723 for (int i = 0, e = Deps.size(); i != e; i++) { 724 Topo.RemovePred(LastSU, Deps[i].getSUnit()); 725 LastSU->removePred(Deps[i]); 726 } 727 728 // Add a dependence between the new instruction and the instruction 729 // that defines the new base. 730 SDep Dep(&I, SDep::Anti, NewBase); 731 Topo.AddPred(LastSU, &I); 732 LastSU->addPred(Dep); 733 734 // Remember the base and offset information so that we can update the 735 // instruction during code generation. 736 InstrChanges[&I] = std::make_pair(NewBase, NewOffset); 737 } 738 } 739 740 namespace { 741 742 // FuncUnitSorter - Comparison operator used to sort instructions by 743 // the number of functional unit choices. 744 struct FuncUnitSorter { 745 const InstrItineraryData *InstrItins; 746 DenseMap<unsigned, unsigned> Resources; 747 748 FuncUnitSorter(const InstrItineraryData *IID) : InstrItins(IID) {} 749 750 // Compute the number of functional unit alternatives needed 751 // at each stage, and take the minimum value. We prioritize the 752 // instructions by the least number of choices first. 753 unsigned minFuncUnits(const MachineInstr *Inst, unsigned &F) const { 754 unsigned schedClass = Inst->getDesc().getSchedClass(); 755 unsigned min = UINT_MAX; 756 for (const InstrStage *IS = InstrItins->beginStage(schedClass), 757 *IE = InstrItins->endStage(schedClass); 758 IS != IE; ++IS) { 759 unsigned funcUnits = IS->getUnits(); 760 unsigned numAlternatives = countPopulation(funcUnits); 761 if (numAlternatives < min) { 762 min = numAlternatives; 763 F = funcUnits; 764 } 765 } 766 return min; 767 } 768 769 // Compute the critical resources needed by the instruction. This 770 // function records the functional units needed by instructions that 771 // must use only one functional unit. We use this as a tie breaker 772 // for computing the resource MII. The instrutions that require 773 // the same, highly used, functional unit have high priority. 774 void calcCriticalResources(MachineInstr &MI) { 775 unsigned SchedClass = MI.getDesc().getSchedClass(); 776 for (const InstrStage *IS = InstrItins->beginStage(SchedClass), 777 *IE = InstrItins->endStage(SchedClass); 778 IS != IE; ++IS) { 779 unsigned FuncUnits = IS->getUnits(); 780 if (countPopulation(FuncUnits) == 1) 781 Resources[FuncUnits]++; 782 } 783 } 784 785 /// Return true if IS1 has less priority than IS2. 786 bool operator()(const MachineInstr *IS1, const MachineInstr *IS2) const { 787 unsigned F1 = 0, F2 = 0; 788 unsigned MFUs1 = minFuncUnits(IS1, F1); 789 unsigned MFUs2 = minFuncUnits(IS2, F2); 790 if (MFUs1 == 1 && MFUs2 == 1) 791 return Resources.lookup(F1) < Resources.lookup(F2); 792 return MFUs1 > MFUs2; 793 } 794 }; 795 796 } // end anonymous namespace 797 798 /// Calculate the resource constrained minimum initiation interval for the 799 /// specified loop. We use the DFA to model the resources needed for 800 /// each instruction, and we ignore dependences. A different DFA is created 801 /// for each cycle that is required. When adding a new instruction, we attempt 802 /// to add it to each existing DFA, until a legal space is found. If the 803 /// instruction cannot be reserved in an existing DFA, we create a new one. 804 unsigned SwingSchedulerDAG::calculateResMII() { 805 SmallVector<DFAPacketizer *, 8> Resources; 806 MachineBasicBlock *MBB = Loop.getHeader(); 807 Resources.push_back(TII->CreateTargetScheduleState(MF.getSubtarget())); 808 809 // Sort the instructions by the number of available choices for scheduling, 810 // least to most. Use the number of critical resources as the tie breaker. 811 FuncUnitSorter FUS = 812 FuncUnitSorter(MF.getSubtarget().getInstrItineraryData()); 813 for (MachineBasicBlock::iterator I = MBB->getFirstNonPHI(), 814 E = MBB->getFirstTerminator(); 815 I != E; ++I) 816 FUS.calcCriticalResources(*I); 817 PriorityQueue<MachineInstr *, std::vector<MachineInstr *>, FuncUnitSorter> 818 FuncUnitOrder(FUS); 819 820 for (MachineBasicBlock::iterator I = MBB->getFirstNonPHI(), 821 E = MBB->getFirstTerminator(); 822 I != E; ++I) 823 FuncUnitOrder.push(&*I); 824 825 while (!FuncUnitOrder.empty()) { 826 MachineInstr *MI = FuncUnitOrder.top(); 827 FuncUnitOrder.pop(); 828 if (TII->isZeroCost(MI->getOpcode())) 829 continue; 830 // Attempt to reserve the instruction in an existing DFA. At least one 831 // DFA is needed for each cycle. 832 unsigned NumCycles = getSUnit(MI)->Latency; 833 unsigned ReservedCycles = 0; 834 SmallVectorImpl<DFAPacketizer *>::iterator RI = Resources.begin(); 835 SmallVectorImpl<DFAPacketizer *>::iterator RE = Resources.end(); 836 for (unsigned C = 0; C < NumCycles; ++C) 837 while (RI != RE) { 838 if ((*RI++)->canReserveResources(*MI)) { 839 ++ReservedCycles; 840 break; 841 } 842 } 843 // Start reserving resources using existing DFAs. 844 for (unsigned C = 0; C < ReservedCycles; ++C) { 845 --RI; 846 (*RI)->reserveResources(*MI); 847 } 848 // Add new DFAs, if needed, to reserve resources. 849 for (unsigned C = ReservedCycles; C < NumCycles; ++C) { 850 DFAPacketizer *NewResource = 851 TII->CreateTargetScheduleState(MF.getSubtarget()); 852 assert(NewResource->canReserveResources(*MI) && "Reserve error."); 853 NewResource->reserveResources(*MI); 854 Resources.push_back(NewResource); 855 } 856 } 857 int Resmii = Resources.size(); 858 // Delete the memory for each of the DFAs that were created earlier. 859 for (DFAPacketizer *RI : Resources) { 860 DFAPacketizer *D = RI; 861 delete D; 862 } 863 Resources.clear(); 864 return Resmii; 865 } 866 867 /// Calculate the recurrence-constrainted minimum initiation interval. 868 /// Iterate over each circuit. Compute the delay(c) and distance(c) 869 /// for each circuit. The II needs to satisfy the inequality 870 /// delay(c) - II*distance(c) <= 0. For each circuit, choose the smallest 871 /// II that satisfies the inequality, and the RecMII is the maximum 872 /// of those values. 873 unsigned SwingSchedulerDAG::calculateRecMII(NodeSetType &NodeSets) { 874 unsigned RecMII = 0; 875 876 for (NodeSet &Nodes : NodeSets) { 877 if (Nodes.empty()) 878 continue; 879 880 unsigned Delay = Nodes.getLatency(); 881 unsigned Distance = 1; 882 883 // ii = ceil(delay / distance) 884 unsigned CurMII = (Delay + Distance - 1) / Distance; 885 Nodes.setRecMII(CurMII); 886 if (CurMII > RecMII) 887 RecMII = CurMII; 888 } 889 890 return RecMII; 891 } 892 893 /// Swap all the anti dependences in the DAG. That means it is no longer a DAG, 894 /// but we do this to find the circuits, and then change them back. 895 static void swapAntiDependences(std::vector<SUnit> &SUnits) { 896 SmallVector<std::pair<SUnit *, SDep>, 8> DepsAdded; 897 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 898 SUnit *SU = &SUnits[i]; 899 for (SUnit::pred_iterator IP = SU->Preds.begin(), EP = SU->Preds.end(); 900 IP != EP; ++IP) { 901 if (IP->getKind() != SDep::Anti) 902 continue; 903 DepsAdded.push_back(std::make_pair(SU, *IP)); 904 } 905 } 906 for (SmallVector<std::pair<SUnit *, SDep>, 8>::iterator I = DepsAdded.begin(), 907 E = DepsAdded.end(); 908 I != E; ++I) { 909 // Remove this anti dependency and add one in the reverse direction. 910 SUnit *SU = I->first; 911 SDep &D = I->second; 912 SUnit *TargetSU = D.getSUnit(); 913 unsigned Reg = D.getReg(); 914 unsigned Lat = D.getLatency(); 915 SU->removePred(D); 916 SDep Dep(SU, SDep::Anti, Reg); 917 Dep.setLatency(Lat); 918 TargetSU->addPred(Dep); 919 } 920 } 921 922 /// Create the adjacency structure of the nodes in the graph. 923 void SwingSchedulerDAG::Circuits::createAdjacencyStructure( 924 SwingSchedulerDAG *DAG) { 925 BitVector Added(SUnits.size()); 926 DenseMap<int, int> OutputDeps; 927 for (int i = 0, e = SUnits.size(); i != e; ++i) { 928 Added.reset(); 929 // Add any successor to the adjacency matrix and exclude duplicates. 930 for (auto &SI : SUnits[i].Succs) { 931 // Only create a back-edge on the first and last nodes of a dependence 932 // chain. This records any chains and adds them later. 933 if (SI.getKind() == SDep::Output) { 934 int N = SI.getSUnit()->NodeNum; 935 int BackEdge = i; 936 auto Dep = OutputDeps.find(BackEdge); 937 if (Dep != OutputDeps.end()) { 938 BackEdge = Dep->second; 939 OutputDeps.erase(Dep); 940 } 941 OutputDeps[N] = BackEdge; 942 } 943 // Do not process a boundary node, an artificial node. 944 // A back-edge is processed only if it goes to a Phi. 945 if (SI.getSUnit()->isBoundaryNode() || SI.isArtificial() || 946 (SI.getKind() == SDep::Anti && !SI.getSUnit()->getInstr()->isPHI())) 947 continue; 948 int N = SI.getSUnit()->NodeNum; 949 if (!Added.test(N)) { 950 AdjK[i].push_back(N); 951 Added.set(N); 952 } 953 } 954 // A chain edge between a store and a load is treated as a back-edge in the 955 // adjacency matrix. 956 for (auto &PI : SUnits[i].Preds) { 957 if (!SUnits[i].getInstr()->mayStore() || 958 !DAG->isLoopCarriedDep(&SUnits[i], PI, false)) 959 continue; 960 if (PI.getKind() == SDep::Order && PI.getSUnit()->getInstr()->mayLoad()) { 961 int N = PI.getSUnit()->NodeNum; 962 if (!Added.test(N)) { 963 AdjK[i].push_back(N); 964 Added.set(N); 965 } 966 } 967 } 968 } 969 // Add back-edges in the adjacency matrix for the output dependences. 970 for (auto &OD : OutputDeps) 971 if (!Added.test(OD.second)) { 972 AdjK[OD.first].push_back(OD.second); 973 Added.set(OD.second); 974 } 975 } 976 977 /// Identify an elementary circuit in the dependence graph starting at the 978 /// specified node. 979 bool SwingSchedulerDAG::Circuits::circuit(int V, int S, NodeSetType &NodeSets, 980 bool HasBackedge) { 981 SUnit *SV = &SUnits[V]; 982 bool F = false; 983 Stack.insert(SV); 984 Blocked.set(V); 985 986 for (auto W : AdjK[V]) { 987 if (NumPaths > MaxPaths) 988 break; 989 if (W < S) 990 continue; 991 if (W == S) { 992 if (!HasBackedge) 993 NodeSets.push_back(NodeSet(Stack.begin(), Stack.end())); 994 F = true; 995 ++NumPaths; 996 break; 997 } else if (!Blocked.test(W)) { 998 if (circuit(W, S, NodeSets, 999 Node2Idx->at(W) < Node2Idx->at(V) ? true : HasBackedge)) 1000 F = true; 1001 } 1002 } 1003 1004 if (F) 1005 unblock(V); 1006 else { 1007 for (auto W : AdjK[V]) { 1008 if (W < S) 1009 continue; 1010 if (B[W].count(SV) == 0) 1011 B[W].insert(SV); 1012 } 1013 } 1014 Stack.pop_back(); 1015 return F; 1016 } 1017 1018 /// Unblock a node in the circuit finding algorithm. 1019 void SwingSchedulerDAG::Circuits::unblock(int U) { 1020 Blocked.reset(U); 1021 SmallPtrSet<SUnit *, 4> &BU = B[U]; 1022 while (!BU.empty()) { 1023 SmallPtrSet<SUnit *, 4>::iterator SI = BU.begin(); 1024 assert(SI != BU.end() && "Invalid B set."); 1025 SUnit *W = *SI; 1026 BU.erase(W); 1027 if (Blocked.test(W->NodeNum)) 1028 unblock(W->NodeNum); 1029 } 1030 } 1031 1032 /// Identify all the elementary circuits in the dependence graph using 1033 /// Johnson's circuit algorithm. 1034 void SwingSchedulerDAG::findCircuits(NodeSetType &NodeSets) { 1035 // Swap all the anti dependences in the DAG. That means it is no longer a DAG, 1036 // but we do this to find the circuits, and then change them back. 1037 swapAntiDependences(SUnits); 1038 1039 Circuits Cir(SUnits, Topo); 1040 // Create the adjacency structure. 1041 Cir.createAdjacencyStructure(this); 1042 for (int i = 0, e = SUnits.size(); i != e; ++i) { 1043 Cir.reset(); 1044 Cir.circuit(i, i, NodeSets); 1045 } 1046 1047 // Change the dependences back so that we've created a DAG again. 1048 swapAntiDependences(SUnits); 1049 } 1050 1051 // Create artificial dependencies between the source of COPY/REG_SEQUENCE that 1052 // is loop-carried to the USE in next iteration. This will help pipeliner avoid 1053 // additional copies that are needed across iterations. An artificial dependence 1054 // edge is added from USE to SOURCE of COPY/REG_SEQUENCE. 1055 1056 // PHI-------Anti-Dep-----> COPY/REG_SEQUENCE (loop-carried) 1057 // SRCOfCopY------True-Dep---> COPY/REG_SEQUENCE 1058 // PHI-------True-Dep------> USEOfPhi 1059 1060 // The mutation creates 1061 // USEOfPHI -------Artificial-Dep---> SRCOfCopy 1062 1063 // This overall will ensure, the USEOfPHI is scheduled before SRCOfCopy 1064 // (since USE is a predecessor), implies, the COPY/ REG_SEQUENCE is scheduled 1065 // late to avoid additional copies across iterations. The possible scheduling 1066 // order would be 1067 // USEOfPHI --- SRCOfCopy--- COPY/REG_SEQUENCE. 1068 1069 void SwingSchedulerDAG::CopyToPhiMutation::apply(ScheduleDAGInstrs *DAG) { 1070 for (SUnit &SU : DAG->SUnits) { 1071 // Find the COPY/REG_SEQUENCE instruction. 1072 if (!SU.getInstr()->isCopy() && !SU.getInstr()->isRegSequence()) 1073 continue; 1074 1075 // Record the loop carried PHIs. 1076 SmallVector<SUnit *, 4> PHISUs; 1077 // Record the SrcSUs that feed the COPY/REG_SEQUENCE instructions. 1078 SmallVector<SUnit *, 4> SrcSUs; 1079 1080 for (auto &Dep : SU.Preds) { 1081 SUnit *TmpSU = Dep.getSUnit(); 1082 MachineInstr *TmpMI = TmpSU->getInstr(); 1083 SDep::Kind DepKind = Dep.getKind(); 1084 // Save the loop carried PHI. 1085 if (DepKind == SDep::Anti && TmpMI->isPHI()) 1086 PHISUs.push_back(TmpSU); 1087 // Save the source of COPY/REG_SEQUENCE. 1088 // If the source has no pre-decessors, we will end up creating cycles. 1089 else if (DepKind == SDep::Data && !TmpMI->isPHI() && TmpSU->NumPreds > 0) 1090 SrcSUs.push_back(TmpSU); 1091 } 1092 1093 if (PHISUs.size() == 0 || SrcSUs.size() == 0) 1094 continue; 1095 1096 // Find the USEs of PHI. If the use is a PHI or REG_SEQUENCE, push back this 1097 // SUnit to the container. 1098 SmallVector<SUnit *, 8> UseSUs; 1099 for (auto I = PHISUs.begin(); I != PHISUs.end(); ++I) { 1100 for (auto &Dep : (*I)->Succs) { 1101 if (Dep.getKind() != SDep::Data) 1102 continue; 1103 1104 SUnit *TmpSU = Dep.getSUnit(); 1105 MachineInstr *TmpMI = TmpSU->getInstr(); 1106 if (TmpMI->isPHI() || TmpMI->isRegSequence()) { 1107 PHISUs.push_back(TmpSU); 1108 continue; 1109 } 1110 UseSUs.push_back(TmpSU); 1111 } 1112 } 1113 1114 if (UseSUs.size() == 0) 1115 continue; 1116 1117 SwingSchedulerDAG *SDAG = cast<SwingSchedulerDAG>(DAG); 1118 // Add the artificial dependencies if it does not form a cycle. 1119 for (auto I : UseSUs) { 1120 for (auto Src : SrcSUs) { 1121 if (!SDAG->Topo.IsReachable(I, Src) && Src != I) { 1122 Src->addPred(SDep(I, SDep::Artificial)); 1123 SDAG->Topo.AddPred(Src, I); 1124 } 1125 } 1126 } 1127 } 1128 } 1129 1130 /// Return true for DAG nodes that we ignore when computing the cost functions. 1131 /// We ignore the back-edge recurrence in order to avoid unbounded recursion 1132 /// in the calculation of the ASAP, ALAP, etc functions. 1133 static bool ignoreDependence(const SDep &D, bool isPred) { 1134 if (D.isArtificial()) 1135 return true; 1136 return D.getKind() == SDep::Anti && isPred; 1137 } 1138 1139 /// Compute several functions need to order the nodes for scheduling. 1140 /// ASAP - Earliest time to schedule a node. 1141 /// ALAP - Latest time to schedule a node. 1142 /// MOV - Mobility function, difference between ALAP and ASAP. 1143 /// D - Depth of each node. 1144 /// H - Height of each node. 1145 void SwingSchedulerDAG::computeNodeFunctions(NodeSetType &NodeSets) { 1146 ScheduleInfo.resize(SUnits.size()); 1147 1148 LLVM_DEBUG({ 1149 for (ScheduleDAGTopologicalSort::const_iterator I = Topo.begin(), 1150 E = Topo.end(); 1151 I != E; ++I) { 1152 const SUnit &SU = SUnits[*I]; 1153 dumpNode(SU); 1154 } 1155 }); 1156 1157 int maxASAP = 0; 1158 // Compute ASAP and ZeroLatencyDepth. 1159 for (ScheduleDAGTopologicalSort::const_iterator I = Topo.begin(), 1160 E = Topo.end(); 1161 I != E; ++I) { 1162 int asap = 0; 1163 int zeroLatencyDepth = 0; 1164 SUnit *SU = &SUnits[*I]; 1165 for (SUnit::const_pred_iterator IP = SU->Preds.begin(), 1166 EP = SU->Preds.end(); 1167 IP != EP; ++IP) { 1168 SUnit *pred = IP->getSUnit(); 1169 if (IP->getLatency() == 0) 1170 zeroLatencyDepth = 1171 std::max(zeroLatencyDepth, getZeroLatencyDepth(pred) + 1); 1172 if (ignoreDependence(*IP, true)) 1173 continue; 1174 asap = std::max(asap, (int)(getASAP(pred) + IP->getLatency() - 1175 getDistance(pred, SU, *IP) * MII)); 1176 } 1177 maxASAP = std::max(maxASAP, asap); 1178 ScheduleInfo[*I].ASAP = asap; 1179 ScheduleInfo[*I].ZeroLatencyDepth = zeroLatencyDepth; 1180 } 1181 1182 // Compute ALAP, ZeroLatencyHeight, and MOV. 1183 for (ScheduleDAGTopologicalSort::const_reverse_iterator I = Topo.rbegin(), 1184 E = Topo.rend(); 1185 I != E; ++I) { 1186 int alap = maxASAP; 1187 int zeroLatencyHeight = 0; 1188 SUnit *SU = &SUnits[*I]; 1189 for (SUnit::const_succ_iterator IS = SU->Succs.begin(), 1190 ES = SU->Succs.end(); 1191 IS != ES; ++IS) { 1192 SUnit *succ = IS->getSUnit(); 1193 if (IS->getLatency() == 0) 1194 zeroLatencyHeight = 1195 std::max(zeroLatencyHeight, getZeroLatencyHeight(succ) + 1); 1196 if (ignoreDependence(*IS, true)) 1197 continue; 1198 alap = std::min(alap, (int)(getALAP(succ) - IS->getLatency() + 1199 getDistance(SU, succ, *IS) * MII)); 1200 } 1201 1202 ScheduleInfo[*I].ALAP = alap; 1203 ScheduleInfo[*I].ZeroLatencyHeight = zeroLatencyHeight; 1204 } 1205 1206 // After computing the node functions, compute the summary for each node set. 1207 for (NodeSet &I : NodeSets) 1208 I.computeNodeSetInfo(this); 1209 1210 LLVM_DEBUG({ 1211 for (unsigned i = 0; i < SUnits.size(); i++) { 1212 dbgs() << "\tNode " << i << ":\n"; 1213 dbgs() << "\t ASAP = " << getASAP(&SUnits[i]) << "\n"; 1214 dbgs() << "\t ALAP = " << getALAP(&SUnits[i]) << "\n"; 1215 dbgs() << "\t MOV = " << getMOV(&SUnits[i]) << "\n"; 1216 dbgs() << "\t D = " << getDepth(&SUnits[i]) << "\n"; 1217 dbgs() << "\t H = " << getHeight(&SUnits[i]) << "\n"; 1218 dbgs() << "\t ZLD = " << getZeroLatencyDepth(&SUnits[i]) << "\n"; 1219 dbgs() << "\t ZLH = " << getZeroLatencyHeight(&SUnits[i]) << "\n"; 1220 } 1221 }); 1222 } 1223 1224 /// Compute the Pred_L(O) set, as defined in the paper. The set is defined 1225 /// as the predecessors of the elements of NodeOrder that are not also in 1226 /// NodeOrder. 1227 static bool pred_L(SetVector<SUnit *> &NodeOrder, 1228 SmallSetVector<SUnit *, 8> &Preds, 1229 const NodeSet *S = nullptr) { 1230 Preds.clear(); 1231 for (SetVector<SUnit *>::iterator I = NodeOrder.begin(), E = NodeOrder.end(); 1232 I != E; ++I) { 1233 for (SUnit::pred_iterator PI = (*I)->Preds.begin(), PE = (*I)->Preds.end(); 1234 PI != PE; ++PI) { 1235 if (S && S->count(PI->getSUnit()) == 0) 1236 continue; 1237 if (ignoreDependence(*PI, true)) 1238 continue; 1239 if (NodeOrder.count(PI->getSUnit()) == 0) 1240 Preds.insert(PI->getSUnit()); 1241 } 1242 // Back-edges are predecessors with an anti-dependence. 1243 for (SUnit::const_succ_iterator IS = (*I)->Succs.begin(), 1244 ES = (*I)->Succs.end(); 1245 IS != ES; ++IS) { 1246 if (IS->getKind() != SDep::Anti) 1247 continue; 1248 if (S && S->count(IS->getSUnit()) == 0) 1249 continue; 1250 if (NodeOrder.count(IS->getSUnit()) == 0) 1251 Preds.insert(IS->getSUnit()); 1252 } 1253 } 1254 return !Preds.empty(); 1255 } 1256 1257 /// Compute the Succ_L(O) set, as defined in the paper. The set is defined 1258 /// as the successors of the elements of NodeOrder that are not also in 1259 /// NodeOrder. 1260 static bool succ_L(SetVector<SUnit *> &NodeOrder, 1261 SmallSetVector<SUnit *, 8> &Succs, 1262 const NodeSet *S = nullptr) { 1263 Succs.clear(); 1264 for (SetVector<SUnit *>::iterator I = NodeOrder.begin(), E = NodeOrder.end(); 1265 I != E; ++I) { 1266 for (SUnit::succ_iterator SI = (*I)->Succs.begin(), SE = (*I)->Succs.end(); 1267 SI != SE; ++SI) { 1268 if (S && S->count(SI->getSUnit()) == 0) 1269 continue; 1270 if (ignoreDependence(*SI, false)) 1271 continue; 1272 if (NodeOrder.count(SI->getSUnit()) == 0) 1273 Succs.insert(SI->getSUnit()); 1274 } 1275 for (SUnit::const_pred_iterator PI = (*I)->Preds.begin(), 1276 PE = (*I)->Preds.end(); 1277 PI != PE; ++PI) { 1278 if (PI->getKind() != SDep::Anti) 1279 continue; 1280 if (S && S->count(PI->getSUnit()) == 0) 1281 continue; 1282 if (NodeOrder.count(PI->getSUnit()) == 0) 1283 Succs.insert(PI->getSUnit()); 1284 } 1285 } 1286 return !Succs.empty(); 1287 } 1288 1289 /// Return true if there is a path from the specified node to any of the nodes 1290 /// in DestNodes. Keep track and return the nodes in any path. 1291 static bool computePath(SUnit *Cur, SetVector<SUnit *> &Path, 1292 SetVector<SUnit *> &DestNodes, 1293 SetVector<SUnit *> &Exclude, 1294 SmallPtrSet<SUnit *, 8> &Visited) { 1295 if (Cur->isBoundaryNode()) 1296 return false; 1297 if (Exclude.count(Cur) != 0) 1298 return false; 1299 if (DestNodes.count(Cur) != 0) 1300 return true; 1301 if (!Visited.insert(Cur).second) 1302 return Path.count(Cur) != 0; 1303 bool FoundPath = false; 1304 for (auto &SI : Cur->Succs) 1305 FoundPath |= computePath(SI.getSUnit(), Path, DestNodes, Exclude, Visited); 1306 for (auto &PI : Cur->Preds) 1307 if (PI.getKind() == SDep::Anti) 1308 FoundPath |= 1309 computePath(PI.getSUnit(), Path, DestNodes, Exclude, Visited); 1310 if (FoundPath) 1311 Path.insert(Cur); 1312 return FoundPath; 1313 } 1314 1315 /// Return true if Set1 is a subset of Set2. 1316 template <class S1Ty, class S2Ty> static bool isSubset(S1Ty &Set1, S2Ty &Set2) { 1317 for (typename S1Ty::iterator I = Set1.begin(), E = Set1.end(); I != E; ++I) 1318 if (Set2.count(*I) == 0) 1319 return false; 1320 return true; 1321 } 1322 1323 /// Compute the live-out registers for the instructions in a node-set. 1324 /// The live-out registers are those that are defined in the node-set, 1325 /// but not used. Except for use operands of Phis. 1326 static void computeLiveOuts(MachineFunction &MF, RegPressureTracker &RPTracker, 1327 NodeSet &NS) { 1328 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 1329 MachineRegisterInfo &MRI = MF.getRegInfo(); 1330 SmallVector<RegisterMaskPair, 8> LiveOutRegs; 1331 SmallSet<unsigned, 4> Uses; 1332 for (SUnit *SU : NS) { 1333 const MachineInstr *MI = SU->getInstr(); 1334 if (MI->isPHI()) 1335 continue; 1336 for (const MachineOperand &MO : MI->operands()) 1337 if (MO.isReg() && MO.isUse()) { 1338 unsigned Reg = MO.getReg(); 1339 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1340 Uses.insert(Reg); 1341 else if (MRI.isAllocatable(Reg)) 1342 for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) 1343 Uses.insert(*Units); 1344 } 1345 } 1346 for (SUnit *SU : NS) 1347 for (const MachineOperand &MO : SU->getInstr()->operands()) 1348 if (MO.isReg() && MO.isDef() && !MO.isDead()) { 1349 unsigned Reg = MO.getReg(); 1350 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 1351 if (!Uses.count(Reg)) 1352 LiveOutRegs.push_back(RegisterMaskPair(Reg, 1353 LaneBitmask::getNone())); 1354 } else if (MRI.isAllocatable(Reg)) { 1355 for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) 1356 if (!Uses.count(*Units)) 1357 LiveOutRegs.push_back(RegisterMaskPair(*Units, 1358 LaneBitmask::getNone())); 1359 } 1360 } 1361 RPTracker.addLiveRegs(LiveOutRegs); 1362 } 1363 1364 /// A heuristic to filter nodes in recurrent node-sets if the register 1365 /// pressure of a set is too high. 1366 void SwingSchedulerDAG::registerPressureFilter(NodeSetType &NodeSets) { 1367 for (auto &NS : NodeSets) { 1368 // Skip small node-sets since they won't cause register pressure problems. 1369 if (NS.size() <= 2) 1370 continue; 1371 IntervalPressure RecRegPressure; 1372 RegPressureTracker RecRPTracker(RecRegPressure); 1373 RecRPTracker.init(&MF, &RegClassInfo, &LIS, BB, BB->end(), false, true); 1374 computeLiveOuts(MF, RecRPTracker, NS); 1375 RecRPTracker.closeBottom(); 1376 1377 std::vector<SUnit *> SUnits(NS.begin(), NS.end()); 1378 llvm::sort(SUnits, [](const SUnit *A, const SUnit *B) { 1379 return A->NodeNum > B->NodeNum; 1380 }); 1381 1382 for (auto &SU : SUnits) { 1383 // Since we're computing the register pressure for a subset of the 1384 // instructions in a block, we need to set the tracker for each 1385 // instruction in the node-set. The tracker is set to the instruction 1386 // just after the one we're interested in. 1387 MachineBasicBlock::const_iterator CurInstI = SU->getInstr(); 1388 RecRPTracker.setPos(std::next(CurInstI)); 1389 1390 RegPressureDelta RPDelta; 1391 ArrayRef<PressureChange> CriticalPSets; 1392 RecRPTracker.getMaxUpwardPressureDelta(SU->getInstr(), nullptr, RPDelta, 1393 CriticalPSets, 1394 RecRegPressure.MaxSetPressure); 1395 if (RPDelta.Excess.isValid()) { 1396 LLVM_DEBUG( 1397 dbgs() << "Excess register pressure: SU(" << SU->NodeNum << ") " 1398 << TRI->getRegPressureSetName(RPDelta.Excess.getPSet()) 1399 << ":" << RPDelta.Excess.getUnitInc()); 1400 NS.setExceedPressure(SU); 1401 break; 1402 } 1403 RecRPTracker.recede(); 1404 } 1405 } 1406 } 1407 1408 /// A heuristic to colocate node sets that have the same set of 1409 /// successors. 1410 void SwingSchedulerDAG::colocateNodeSets(NodeSetType &NodeSets) { 1411 unsigned Colocate = 0; 1412 for (int i = 0, e = NodeSets.size(); i < e; ++i) { 1413 NodeSet &N1 = NodeSets[i]; 1414 SmallSetVector<SUnit *, 8> S1; 1415 if (N1.empty() || !succ_L(N1, S1)) 1416 continue; 1417 for (int j = i + 1; j < e; ++j) { 1418 NodeSet &N2 = NodeSets[j]; 1419 if (N1.compareRecMII(N2) != 0) 1420 continue; 1421 SmallSetVector<SUnit *, 8> S2; 1422 if (N2.empty() || !succ_L(N2, S2)) 1423 continue; 1424 if (isSubset(S1, S2) && S1.size() == S2.size()) { 1425 N1.setColocate(++Colocate); 1426 N2.setColocate(Colocate); 1427 break; 1428 } 1429 } 1430 } 1431 } 1432 1433 /// Check if the existing node-sets are profitable. If not, then ignore the 1434 /// recurrent node-sets, and attempt to schedule all nodes together. This is 1435 /// a heuristic. If the MII is large and all the recurrent node-sets are small, 1436 /// then it's best to try to schedule all instructions together instead of 1437 /// starting with the recurrent node-sets. 1438 void SwingSchedulerDAG::checkNodeSets(NodeSetType &NodeSets) { 1439 // Look for loops with a large MII. 1440 if (MII < 17) 1441 return; 1442 // Check if the node-set contains only a simple add recurrence. 1443 for (auto &NS : NodeSets) { 1444 if (NS.getRecMII() > 2) 1445 return; 1446 if (NS.getMaxDepth() > MII) 1447 return; 1448 } 1449 NodeSets.clear(); 1450 LLVM_DEBUG(dbgs() << "Clear recurrence node-sets\n"); 1451 return; 1452 } 1453 1454 /// Add the nodes that do not belong to a recurrence set into groups 1455 /// based upon connected componenets. 1456 void SwingSchedulerDAG::groupRemainingNodes(NodeSetType &NodeSets) { 1457 SetVector<SUnit *> NodesAdded; 1458 SmallPtrSet<SUnit *, 8> Visited; 1459 // Add the nodes that are on a path between the previous node sets and 1460 // the current node set. 1461 for (NodeSet &I : NodeSets) { 1462 SmallSetVector<SUnit *, 8> N; 1463 // Add the nodes from the current node set to the previous node set. 1464 if (succ_L(I, N)) { 1465 SetVector<SUnit *> Path; 1466 for (SUnit *NI : N) { 1467 Visited.clear(); 1468 computePath(NI, Path, NodesAdded, I, Visited); 1469 } 1470 if (!Path.empty()) 1471 I.insert(Path.begin(), Path.end()); 1472 } 1473 // Add the nodes from the previous node set to the current node set. 1474 N.clear(); 1475 if (succ_L(NodesAdded, N)) { 1476 SetVector<SUnit *> Path; 1477 for (SUnit *NI : N) { 1478 Visited.clear(); 1479 computePath(NI, Path, I, NodesAdded, Visited); 1480 } 1481 if (!Path.empty()) 1482 I.insert(Path.begin(), Path.end()); 1483 } 1484 NodesAdded.insert(I.begin(), I.end()); 1485 } 1486 1487 // Create a new node set with the connected nodes of any successor of a node 1488 // in a recurrent set. 1489 NodeSet NewSet; 1490 SmallSetVector<SUnit *, 8> N; 1491 if (succ_L(NodesAdded, N)) 1492 for (SUnit *I : N) 1493 addConnectedNodes(I, NewSet, NodesAdded); 1494 if (!NewSet.empty()) 1495 NodeSets.push_back(NewSet); 1496 1497 // Create a new node set with the connected nodes of any predecessor of a node 1498 // in a recurrent set. 1499 NewSet.clear(); 1500 if (pred_L(NodesAdded, N)) 1501 for (SUnit *I : N) 1502 addConnectedNodes(I, NewSet, NodesAdded); 1503 if (!NewSet.empty()) 1504 NodeSets.push_back(NewSet); 1505 1506 // Create new nodes sets with the connected nodes any remaining node that 1507 // has no predecessor. 1508 for (unsigned i = 0; i < SUnits.size(); ++i) { 1509 SUnit *SU = &SUnits[i]; 1510 if (NodesAdded.count(SU) == 0) { 1511 NewSet.clear(); 1512 addConnectedNodes(SU, NewSet, NodesAdded); 1513 if (!NewSet.empty()) 1514 NodeSets.push_back(NewSet); 1515 } 1516 } 1517 } 1518 1519 /// Add the node to the set, and add all is its connected nodes to the set. 1520 void SwingSchedulerDAG::addConnectedNodes(SUnit *SU, NodeSet &NewSet, 1521 SetVector<SUnit *> &NodesAdded) { 1522 NewSet.insert(SU); 1523 NodesAdded.insert(SU); 1524 for (auto &SI : SU->Succs) { 1525 SUnit *Successor = SI.getSUnit(); 1526 if (!SI.isArtificial() && NodesAdded.count(Successor) == 0) 1527 addConnectedNodes(Successor, NewSet, NodesAdded); 1528 } 1529 for (auto &PI : SU->Preds) { 1530 SUnit *Predecessor = PI.getSUnit(); 1531 if (!PI.isArtificial() && NodesAdded.count(Predecessor) == 0) 1532 addConnectedNodes(Predecessor, NewSet, NodesAdded); 1533 } 1534 } 1535 1536 /// Return true if Set1 contains elements in Set2. The elements in common 1537 /// are returned in a different container. 1538 static bool isIntersect(SmallSetVector<SUnit *, 8> &Set1, const NodeSet &Set2, 1539 SmallSetVector<SUnit *, 8> &Result) { 1540 Result.clear(); 1541 for (unsigned i = 0, e = Set1.size(); i != e; ++i) { 1542 SUnit *SU = Set1[i]; 1543 if (Set2.count(SU) != 0) 1544 Result.insert(SU); 1545 } 1546 return !Result.empty(); 1547 } 1548 1549 /// Merge the recurrence node sets that have the same initial node. 1550 void SwingSchedulerDAG::fuseRecs(NodeSetType &NodeSets) { 1551 for (NodeSetType::iterator I = NodeSets.begin(), E = NodeSets.end(); I != E; 1552 ++I) { 1553 NodeSet &NI = *I; 1554 for (NodeSetType::iterator J = I + 1; J != E;) { 1555 NodeSet &NJ = *J; 1556 if (NI.getNode(0)->NodeNum == NJ.getNode(0)->NodeNum) { 1557 if (NJ.compareRecMII(NI) > 0) 1558 NI.setRecMII(NJ.getRecMII()); 1559 for (NodeSet::iterator NII = J->begin(), ENI = J->end(); NII != ENI; 1560 ++NII) 1561 I->insert(*NII); 1562 NodeSets.erase(J); 1563 E = NodeSets.end(); 1564 } else { 1565 ++J; 1566 } 1567 } 1568 } 1569 } 1570 1571 /// Remove nodes that have been scheduled in previous NodeSets. 1572 void SwingSchedulerDAG::removeDuplicateNodes(NodeSetType &NodeSets) { 1573 for (NodeSetType::iterator I = NodeSets.begin(), E = NodeSets.end(); I != E; 1574 ++I) 1575 for (NodeSetType::iterator J = I + 1; J != E;) { 1576 J->remove_if([&](SUnit *SUJ) { return I->count(SUJ); }); 1577 1578 if (J->empty()) { 1579 NodeSets.erase(J); 1580 E = NodeSets.end(); 1581 } else { 1582 ++J; 1583 } 1584 } 1585 } 1586 1587 /// Compute an ordered list of the dependence graph nodes, which 1588 /// indicates the order that the nodes will be scheduled. This is a 1589 /// two-level algorithm. First, a partial order is created, which 1590 /// consists of a list of sets ordered from highest to lowest priority. 1591 void SwingSchedulerDAG::computeNodeOrder(NodeSetType &NodeSets) { 1592 SmallSetVector<SUnit *, 8> R; 1593 NodeOrder.clear(); 1594 1595 for (auto &Nodes : NodeSets) { 1596 LLVM_DEBUG(dbgs() << "NodeSet size " << Nodes.size() << "\n"); 1597 OrderKind Order; 1598 SmallSetVector<SUnit *, 8> N; 1599 if (pred_L(NodeOrder, N) && isSubset(N, Nodes)) { 1600 R.insert(N.begin(), N.end()); 1601 Order = BottomUp; 1602 LLVM_DEBUG(dbgs() << " Bottom up (preds) "); 1603 } else if (succ_L(NodeOrder, N) && isSubset(N, Nodes)) { 1604 R.insert(N.begin(), N.end()); 1605 Order = TopDown; 1606 LLVM_DEBUG(dbgs() << " Top down (succs) "); 1607 } else if (isIntersect(N, Nodes, R)) { 1608 // If some of the successors are in the existing node-set, then use the 1609 // top-down ordering. 1610 Order = TopDown; 1611 LLVM_DEBUG(dbgs() << " Top down (intersect) "); 1612 } else if (NodeSets.size() == 1) { 1613 for (auto &N : Nodes) 1614 if (N->Succs.size() == 0) 1615 R.insert(N); 1616 Order = BottomUp; 1617 LLVM_DEBUG(dbgs() << " Bottom up (all) "); 1618 } else { 1619 // Find the node with the highest ASAP. 1620 SUnit *maxASAP = nullptr; 1621 for (SUnit *SU : Nodes) { 1622 if (maxASAP == nullptr || getASAP(SU) > getASAP(maxASAP) || 1623 (getASAP(SU) == getASAP(maxASAP) && SU->NodeNum > maxASAP->NodeNum)) 1624 maxASAP = SU; 1625 } 1626 R.insert(maxASAP); 1627 Order = BottomUp; 1628 LLVM_DEBUG(dbgs() << " Bottom up (default) "); 1629 } 1630 1631 while (!R.empty()) { 1632 if (Order == TopDown) { 1633 // Choose the node with the maximum height. If more than one, choose 1634 // the node wiTH the maximum ZeroLatencyHeight. If still more than one, 1635 // choose the node with the lowest MOV. 1636 while (!R.empty()) { 1637 SUnit *maxHeight = nullptr; 1638 for (SUnit *I : R) { 1639 if (maxHeight == nullptr || getHeight(I) > getHeight(maxHeight)) 1640 maxHeight = I; 1641 else if (getHeight(I) == getHeight(maxHeight) && 1642 getZeroLatencyHeight(I) > getZeroLatencyHeight(maxHeight)) 1643 maxHeight = I; 1644 else if (getHeight(I) == getHeight(maxHeight) && 1645 getZeroLatencyHeight(I) == 1646 getZeroLatencyHeight(maxHeight) && 1647 getMOV(I) < getMOV(maxHeight)) 1648 maxHeight = I; 1649 } 1650 NodeOrder.insert(maxHeight); 1651 LLVM_DEBUG(dbgs() << maxHeight->NodeNum << " "); 1652 R.remove(maxHeight); 1653 for (const auto &I : maxHeight->Succs) { 1654 if (Nodes.count(I.getSUnit()) == 0) 1655 continue; 1656 if (NodeOrder.count(I.getSUnit()) != 0) 1657 continue; 1658 if (ignoreDependence(I, false)) 1659 continue; 1660 R.insert(I.getSUnit()); 1661 } 1662 // Back-edges are predecessors with an anti-dependence. 1663 for (const auto &I : maxHeight->Preds) { 1664 if (I.getKind() != SDep::Anti) 1665 continue; 1666 if (Nodes.count(I.getSUnit()) == 0) 1667 continue; 1668 if (NodeOrder.count(I.getSUnit()) != 0) 1669 continue; 1670 R.insert(I.getSUnit()); 1671 } 1672 } 1673 Order = BottomUp; 1674 LLVM_DEBUG(dbgs() << "\n Switching order to bottom up "); 1675 SmallSetVector<SUnit *, 8> N; 1676 if (pred_L(NodeOrder, N, &Nodes)) 1677 R.insert(N.begin(), N.end()); 1678 } else { 1679 // Choose the node with the maximum depth. If more than one, choose 1680 // the node with the maximum ZeroLatencyDepth. If still more than one, 1681 // choose the node with the lowest MOV. 1682 while (!R.empty()) { 1683 SUnit *maxDepth = nullptr; 1684 for (SUnit *I : R) { 1685 if (maxDepth == nullptr || getDepth(I) > getDepth(maxDepth)) 1686 maxDepth = I; 1687 else if (getDepth(I) == getDepth(maxDepth) && 1688 getZeroLatencyDepth(I) > getZeroLatencyDepth(maxDepth)) 1689 maxDepth = I; 1690 else if (getDepth(I) == getDepth(maxDepth) && 1691 getZeroLatencyDepth(I) == getZeroLatencyDepth(maxDepth) && 1692 getMOV(I) < getMOV(maxDepth)) 1693 maxDepth = I; 1694 } 1695 NodeOrder.insert(maxDepth); 1696 LLVM_DEBUG(dbgs() << maxDepth->NodeNum << " "); 1697 R.remove(maxDepth); 1698 if (Nodes.isExceedSU(maxDepth)) { 1699 Order = TopDown; 1700 R.clear(); 1701 R.insert(Nodes.getNode(0)); 1702 break; 1703 } 1704 for (const auto &I : maxDepth->Preds) { 1705 if (Nodes.count(I.getSUnit()) == 0) 1706 continue; 1707 if (NodeOrder.count(I.getSUnit()) != 0) 1708 continue; 1709 R.insert(I.getSUnit()); 1710 } 1711 // Back-edges are predecessors with an anti-dependence. 1712 for (const auto &I : maxDepth->Succs) { 1713 if (I.getKind() != SDep::Anti) 1714 continue; 1715 if (Nodes.count(I.getSUnit()) == 0) 1716 continue; 1717 if (NodeOrder.count(I.getSUnit()) != 0) 1718 continue; 1719 R.insert(I.getSUnit()); 1720 } 1721 } 1722 Order = TopDown; 1723 LLVM_DEBUG(dbgs() << "\n Switching order to top down "); 1724 SmallSetVector<SUnit *, 8> N; 1725 if (succ_L(NodeOrder, N, &Nodes)) 1726 R.insert(N.begin(), N.end()); 1727 } 1728 } 1729 LLVM_DEBUG(dbgs() << "\nDone with Nodeset\n"); 1730 } 1731 1732 LLVM_DEBUG({ 1733 dbgs() << "Node order: "; 1734 for (SUnit *I : NodeOrder) 1735 dbgs() << " " << I->NodeNum << " "; 1736 dbgs() << "\n"; 1737 }); 1738 } 1739 1740 /// Process the nodes in the computed order and create the pipelined schedule 1741 /// of the instructions, if possible. Return true if a schedule is found. 1742 bool SwingSchedulerDAG::schedulePipeline(SMSchedule &Schedule) { 1743 if (NodeOrder.empty()) 1744 return false; 1745 1746 bool scheduleFound = false; 1747 // Keep increasing II until a valid schedule is found. 1748 for (unsigned II = MII; II < MII + 10 && !scheduleFound; ++II) { 1749 Schedule.reset(); 1750 Schedule.setInitiationInterval(II); 1751 LLVM_DEBUG(dbgs() << "Try to schedule with " << II << "\n"); 1752 1753 SetVector<SUnit *>::iterator NI = NodeOrder.begin(); 1754 SetVector<SUnit *>::iterator NE = NodeOrder.end(); 1755 do { 1756 SUnit *SU = *NI; 1757 1758 // Compute the schedule time for the instruction, which is based 1759 // upon the scheduled time for any predecessors/successors. 1760 int EarlyStart = INT_MIN; 1761 int LateStart = INT_MAX; 1762 // These values are set when the size of the schedule window is limited 1763 // due to chain dependences. 1764 int SchedEnd = INT_MAX; 1765 int SchedStart = INT_MIN; 1766 Schedule.computeStart(SU, &EarlyStart, &LateStart, &SchedEnd, &SchedStart, 1767 II, this); 1768 LLVM_DEBUG({ 1769 dbgs() << "Inst (" << SU->NodeNum << ") "; 1770 SU->getInstr()->dump(); 1771 dbgs() << "\n"; 1772 }); 1773 LLVM_DEBUG({ 1774 dbgs() << "\tes: " << EarlyStart << " ls: " << LateStart 1775 << " me: " << SchedEnd << " ms: " << SchedStart << "\n"; 1776 }); 1777 1778 if (EarlyStart > LateStart || SchedEnd < EarlyStart || 1779 SchedStart > LateStart) 1780 scheduleFound = false; 1781 else if (EarlyStart != INT_MIN && LateStart == INT_MAX) { 1782 SchedEnd = std::min(SchedEnd, EarlyStart + (int)II - 1); 1783 scheduleFound = Schedule.insert(SU, EarlyStart, SchedEnd, II); 1784 } else if (EarlyStart == INT_MIN && LateStart != INT_MAX) { 1785 SchedStart = std::max(SchedStart, LateStart - (int)II + 1); 1786 scheduleFound = Schedule.insert(SU, LateStart, SchedStart, II); 1787 } else if (EarlyStart != INT_MIN && LateStart != INT_MAX) { 1788 SchedEnd = 1789 std::min(SchedEnd, std::min(LateStart, EarlyStart + (int)II - 1)); 1790 // When scheduling a Phi it is better to start at the late cycle and go 1791 // backwards. The default order may insert the Phi too far away from 1792 // its first dependence. 1793 if (SU->getInstr()->isPHI()) 1794 scheduleFound = Schedule.insert(SU, SchedEnd, EarlyStart, II); 1795 else 1796 scheduleFound = Schedule.insert(SU, EarlyStart, SchedEnd, II); 1797 } else { 1798 int FirstCycle = Schedule.getFirstCycle(); 1799 scheduleFound = Schedule.insert(SU, FirstCycle + getASAP(SU), 1800 FirstCycle + getASAP(SU) + II - 1, II); 1801 } 1802 // Even if we find a schedule, make sure the schedule doesn't exceed the 1803 // allowable number of stages. We keep trying if this happens. 1804 if (scheduleFound) 1805 if (SwpMaxStages > -1 && 1806 Schedule.getMaxStageCount() > (unsigned)SwpMaxStages) 1807 scheduleFound = false; 1808 1809 LLVM_DEBUG({ 1810 if (!scheduleFound) 1811 dbgs() << "\tCan't schedule\n"; 1812 }); 1813 } while (++NI != NE && scheduleFound); 1814 1815 // If a schedule is found, check if it is a valid schedule too. 1816 if (scheduleFound) 1817 scheduleFound = Schedule.isValidSchedule(this); 1818 } 1819 1820 LLVM_DEBUG(dbgs() << "Schedule Found? " << scheduleFound << "\n"); 1821 1822 if (scheduleFound) 1823 Schedule.finalizeSchedule(this); 1824 else 1825 Schedule.reset(); 1826 1827 return scheduleFound && Schedule.getMaxStageCount() > 0; 1828 } 1829 1830 /// Given a schedule for the loop, generate a new version of the loop, 1831 /// and replace the old version. This function generates a prolog 1832 /// that contains the initial iterations in the pipeline, and kernel 1833 /// loop, and the epilogue that contains the code for the final 1834 /// iterations. 1835 void SwingSchedulerDAG::generatePipelinedLoop(SMSchedule &Schedule) { 1836 // Create a new basic block for the kernel and add it to the CFG. 1837 MachineBasicBlock *KernelBB = MF.CreateMachineBasicBlock(BB->getBasicBlock()); 1838 1839 unsigned MaxStageCount = Schedule.getMaxStageCount(); 1840 1841 // Remember the registers that are used in different stages. The index is 1842 // the iteration, or stage, that the instruction is scheduled in. This is 1843 // a map between register names in the original block and the names created 1844 // in each stage of the pipelined loop. 1845 ValueMapTy *VRMap = new ValueMapTy[(MaxStageCount + 1) * 2]; 1846 InstrMapTy InstrMap; 1847 1848 SmallVector<MachineBasicBlock *, 4> PrologBBs; 1849 // Generate the prolog instructions that set up the pipeline. 1850 generateProlog(Schedule, MaxStageCount, KernelBB, VRMap, PrologBBs); 1851 MF.insert(BB->getIterator(), KernelBB); 1852 1853 // Rearrange the instructions to generate the new, pipelined loop, 1854 // and update register names as needed. 1855 for (int Cycle = Schedule.getFirstCycle(), 1856 LastCycle = Schedule.getFinalCycle(); 1857 Cycle <= LastCycle; ++Cycle) { 1858 std::deque<SUnit *> &CycleInstrs = Schedule.getInstructions(Cycle); 1859 // This inner loop schedules each instruction in the cycle. 1860 for (SUnit *CI : CycleInstrs) { 1861 if (CI->getInstr()->isPHI()) 1862 continue; 1863 unsigned StageNum = Schedule.stageScheduled(getSUnit(CI->getInstr())); 1864 MachineInstr *NewMI = cloneInstr(CI->getInstr(), MaxStageCount, StageNum); 1865 updateInstruction(NewMI, false, MaxStageCount, StageNum, Schedule, VRMap); 1866 KernelBB->push_back(NewMI); 1867 InstrMap[NewMI] = CI->getInstr(); 1868 } 1869 } 1870 1871 // Copy any terminator instructions to the new kernel, and update 1872 // names as needed. 1873 for (MachineBasicBlock::iterator I = BB->getFirstTerminator(), 1874 E = BB->instr_end(); 1875 I != E; ++I) { 1876 MachineInstr *NewMI = MF.CloneMachineInstr(&*I); 1877 updateInstruction(NewMI, false, MaxStageCount, 0, Schedule, VRMap); 1878 KernelBB->push_back(NewMI); 1879 InstrMap[NewMI] = &*I; 1880 } 1881 1882 KernelBB->transferSuccessors(BB); 1883 KernelBB->replaceSuccessor(BB, KernelBB); 1884 1885 generateExistingPhis(KernelBB, PrologBBs.back(), KernelBB, KernelBB, Schedule, 1886 VRMap, InstrMap, MaxStageCount, MaxStageCount, false); 1887 generatePhis(KernelBB, PrologBBs.back(), KernelBB, KernelBB, Schedule, VRMap, 1888 InstrMap, MaxStageCount, MaxStageCount, false); 1889 1890 LLVM_DEBUG(dbgs() << "New block\n"; KernelBB->dump();); 1891 1892 SmallVector<MachineBasicBlock *, 4> EpilogBBs; 1893 // Generate the epilog instructions to complete the pipeline. 1894 generateEpilog(Schedule, MaxStageCount, KernelBB, VRMap, EpilogBBs, 1895 PrologBBs); 1896 1897 // We need this step because the register allocation doesn't handle some 1898 // situations well, so we insert copies to help out. 1899 splitLifetimes(KernelBB, EpilogBBs, Schedule); 1900 1901 // Remove dead instructions due to loop induction variables. 1902 removeDeadInstructions(KernelBB, EpilogBBs); 1903 1904 // Add branches between prolog and epilog blocks. 1905 addBranches(PrologBBs, KernelBB, EpilogBBs, Schedule, VRMap); 1906 1907 // Remove the original loop since it's no longer referenced. 1908 for (auto &I : *BB) 1909 LIS.RemoveMachineInstrFromMaps(I); 1910 BB->clear(); 1911 BB->eraseFromParent(); 1912 1913 delete[] VRMap; 1914 } 1915 1916 /// Generate the pipeline prolog code. 1917 void SwingSchedulerDAG::generateProlog(SMSchedule &Schedule, unsigned LastStage, 1918 MachineBasicBlock *KernelBB, 1919 ValueMapTy *VRMap, 1920 MBBVectorTy &PrologBBs) { 1921 MachineBasicBlock *PreheaderBB = MLI->getLoopFor(BB)->getLoopPreheader(); 1922 assert(PreheaderBB != nullptr && 1923 "Need to add code to handle loops w/o preheader"); 1924 MachineBasicBlock *PredBB = PreheaderBB; 1925 InstrMapTy InstrMap; 1926 1927 // Generate a basic block for each stage, not including the last stage, 1928 // which will be generated in the kernel. Each basic block may contain 1929 // instructions from multiple stages/iterations. 1930 for (unsigned i = 0; i < LastStage; ++i) { 1931 // Create and insert the prolog basic block prior to the original loop 1932 // basic block. The original loop is removed later. 1933 MachineBasicBlock *NewBB = MF.CreateMachineBasicBlock(BB->getBasicBlock()); 1934 PrologBBs.push_back(NewBB); 1935 MF.insert(BB->getIterator(), NewBB); 1936 NewBB->transferSuccessors(PredBB); 1937 PredBB->addSuccessor(NewBB); 1938 PredBB = NewBB; 1939 1940 // Generate instructions for each appropriate stage. Process instructions 1941 // in original program order. 1942 for (int StageNum = i; StageNum >= 0; --StageNum) { 1943 for (MachineBasicBlock::iterator BBI = BB->instr_begin(), 1944 BBE = BB->getFirstTerminator(); 1945 BBI != BBE; ++BBI) { 1946 if (Schedule.isScheduledAtStage(getSUnit(&*BBI), (unsigned)StageNum)) { 1947 if (BBI->isPHI()) 1948 continue; 1949 MachineInstr *NewMI = 1950 cloneAndChangeInstr(&*BBI, i, (unsigned)StageNum, Schedule); 1951 updateInstruction(NewMI, false, i, (unsigned)StageNum, Schedule, 1952 VRMap); 1953 NewBB->push_back(NewMI); 1954 InstrMap[NewMI] = &*BBI; 1955 } 1956 } 1957 } 1958 rewritePhiValues(NewBB, i, Schedule, VRMap, InstrMap); 1959 LLVM_DEBUG({ 1960 dbgs() << "prolog:\n"; 1961 NewBB->dump(); 1962 }); 1963 } 1964 1965 PredBB->replaceSuccessor(BB, KernelBB); 1966 1967 // Check if we need to remove the branch from the preheader to the original 1968 // loop, and replace it with a branch to the new loop. 1969 unsigned numBranches = TII->removeBranch(*PreheaderBB); 1970 if (numBranches) { 1971 SmallVector<MachineOperand, 0> Cond; 1972 TII->insertBranch(*PreheaderBB, PrologBBs[0], nullptr, Cond, DebugLoc()); 1973 } 1974 } 1975 1976 /// Generate the pipeline epilog code. The epilog code finishes the iterations 1977 /// that were started in either the prolog or the kernel. We create a basic 1978 /// block for each stage that needs to complete. 1979 void SwingSchedulerDAG::generateEpilog(SMSchedule &Schedule, unsigned LastStage, 1980 MachineBasicBlock *KernelBB, 1981 ValueMapTy *VRMap, 1982 MBBVectorTy &EpilogBBs, 1983 MBBVectorTy &PrologBBs) { 1984 // We need to change the branch from the kernel to the first epilog block, so 1985 // this call to analyze branch uses the kernel rather than the original BB. 1986 MachineBasicBlock *TBB = nullptr, *FBB = nullptr; 1987 SmallVector<MachineOperand, 4> Cond; 1988 bool checkBranch = TII->analyzeBranch(*KernelBB, TBB, FBB, Cond); 1989 assert(!checkBranch && "generateEpilog must be able to analyze the branch"); 1990 if (checkBranch) 1991 return; 1992 1993 MachineBasicBlock::succ_iterator LoopExitI = KernelBB->succ_begin(); 1994 if (*LoopExitI == KernelBB) 1995 ++LoopExitI; 1996 assert(LoopExitI != KernelBB->succ_end() && "Expecting a successor"); 1997 MachineBasicBlock *LoopExitBB = *LoopExitI; 1998 1999 MachineBasicBlock *PredBB = KernelBB; 2000 MachineBasicBlock *EpilogStart = LoopExitBB; 2001 InstrMapTy InstrMap; 2002 2003 // Generate a basic block for each stage, not including the last stage, 2004 // which was generated for the kernel. Each basic block may contain 2005 // instructions from multiple stages/iterations. 2006 int EpilogStage = LastStage + 1; 2007 for (unsigned i = LastStage; i >= 1; --i, ++EpilogStage) { 2008 MachineBasicBlock *NewBB = MF.CreateMachineBasicBlock(); 2009 EpilogBBs.push_back(NewBB); 2010 MF.insert(BB->getIterator(), NewBB); 2011 2012 PredBB->replaceSuccessor(LoopExitBB, NewBB); 2013 NewBB->addSuccessor(LoopExitBB); 2014 2015 if (EpilogStart == LoopExitBB) 2016 EpilogStart = NewBB; 2017 2018 // Add instructions to the epilog depending on the current block. 2019 // Process instructions in original program order. 2020 for (unsigned StageNum = i; StageNum <= LastStage; ++StageNum) { 2021 for (auto &BBI : *BB) { 2022 if (BBI.isPHI()) 2023 continue; 2024 MachineInstr *In = &BBI; 2025 if (Schedule.isScheduledAtStage(getSUnit(In), StageNum)) { 2026 // Instructions with memoperands in the epilog are updated with 2027 // conservative values. 2028 MachineInstr *NewMI = cloneInstr(In, UINT_MAX, 0); 2029 updateInstruction(NewMI, i == 1, EpilogStage, 0, Schedule, VRMap); 2030 NewBB->push_back(NewMI); 2031 InstrMap[NewMI] = In; 2032 } 2033 } 2034 } 2035 generateExistingPhis(NewBB, PrologBBs[i - 1], PredBB, KernelBB, Schedule, 2036 VRMap, InstrMap, LastStage, EpilogStage, i == 1); 2037 generatePhis(NewBB, PrologBBs[i - 1], PredBB, KernelBB, Schedule, VRMap, 2038 InstrMap, LastStage, EpilogStage, i == 1); 2039 PredBB = NewBB; 2040 2041 LLVM_DEBUG({ 2042 dbgs() << "epilog:\n"; 2043 NewBB->dump(); 2044 }); 2045 } 2046 2047 // Fix any Phi nodes in the loop exit block. 2048 for (MachineInstr &MI : *LoopExitBB) { 2049 if (!MI.isPHI()) 2050 break; 2051 for (unsigned i = 2, e = MI.getNumOperands() + 1; i != e; i += 2) { 2052 MachineOperand &MO = MI.getOperand(i); 2053 if (MO.getMBB() == BB) 2054 MO.setMBB(PredBB); 2055 } 2056 } 2057 2058 // Create a branch to the new epilog from the kernel. 2059 // Remove the original branch and add a new branch to the epilog. 2060 TII->removeBranch(*KernelBB); 2061 TII->insertBranch(*KernelBB, KernelBB, EpilogStart, Cond, DebugLoc()); 2062 // Add a branch to the loop exit. 2063 if (EpilogBBs.size() > 0) { 2064 MachineBasicBlock *LastEpilogBB = EpilogBBs.back(); 2065 SmallVector<MachineOperand, 4> Cond1; 2066 TII->insertBranch(*LastEpilogBB, LoopExitBB, nullptr, Cond1, DebugLoc()); 2067 } 2068 } 2069 2070 /// Replace all uses of FromReg that appear outside the specified 2071 /// basic block with ToReg. 2072 static void replaceRegUsesAfterLoop(unsigned FromReg, unsigned ToReg, 2073 MachineBasicBlock *MBB, 2074 MachineRegisterInfo &MRI, 2075 LiveIntervals &LIS) { 2076 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(FromReg), 2077 E = MRI.use_end(); 2078 I != E;) { 2079 MachineOperand &O = *I; 2080 ++I; 2081 if (O.getParent()->getParent() != MBB) 2082 O.setReg(ToReg); 2083 } 2084 if (!LIS.hasInterval(ToReg)) 2085 LIS.createEmptyInterval(ToReg); 2086 } 2087 2088 /// Return true if the register has a use that occurs outside the 2089 /// specified loop. 2090 static bool hasUseAfterLoop(unsigned Reg, MachineBasicBlock *BB, 2091 MachineRegisterInfo &MRI) { 2092 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg), 2093 E = MRI.use_end(); 2094 I != E; ++I) 2095 if (I->getParent()->getParent() != BB) 2096 return true; 2097 return false; 2098 } 2099 2100 /// Generate Phis for the specific block in the generated pipelined code. 2101 /// This function looks at the Phis from the original code to guide the 2102 /// creation of new Phis. 2103 void SwingSchedulerDAG::generateExistingPhis( 2104 MachineBasicBlock *NewBB, MachineBasicBlock *BB1, MachineBasicBlock *BB2, 2105 MachineBasicBlock *KernelBB, SMSchedule &Schedule, ValueMapTy *VRMap, 2106 InstrMapTy &InstrMap, unsigned LastStageNum, unsigned CurStageNum, 2107 bool IsLast) { 2108 // Compute the stage number for the initial value of the Phi, which 2109 // comes from the prolog. The prolog to use depends on to which kernel/ 2110 // epilog that we're adding the Phi. 2111 unsigned PrologStage = 0; 2112 unsigned PrevStage = 0; 2113 bool InKernel = (LastStageNum == CurStageNum); 2114 if (InKernel) { 2115 PrologStage = LastStageNum - 1; 2116 PrevStage = CurStageNum; 2117 } else { 2118 PrologStage = LastStageNum - (CurStageNum - LastStageNum); 2119 PrevStage = LastStageNum + (CurStageNum - LastStageNum) - 1; 2120 } 2121 2122 for (MachineBasicBlock::iterator BBI = BB->instr_begin(), 2123 BBE = BB->getFirstNonPHI(); 2124 BBI != BBE; ++BBI) { 2125 unsigned Def = BBI->getOperand(0).getReg(); 2126 2127 unsigned InitVal = 0; 2128 unsigned LoopVal = 0; 2129 getPhiRegs(*BBI, BB, InitVal, LoopVal); 2130 2131 unsigned PhiOp1 = 0; 2132 // The Phi value from the loop body typically is defined in the loop, but 2133 // not always. So, we need to check if the value is defined in the loop. 2134 unsigned PhiOp2 = LoopVal; 2135 if (VRMap[LastStageNum].count(LoopVal)) 2136 PhiOp2 = VRMap[LastStageNum][LoopVal]; 2137 2138 int StageScheduled = Schedule.stageScheduled(getSUnit(&*BBI)); 2139 int LoopValStage = 2140 Schedule.stageScheduled(getSUnit(MRI.getVRegDef(LoopVal))); 2141 unsigned NumStages = Schedule.getStagesForReg(Def, CurStageNum); 2142 if (NumStages == 0) { 2143 // We don't need to generate a Phi anymore, but we need to rename any uses 2144 // of the Phi value. 2145 unsigned NewReg = VRMap[PrevStage][LoopVal]; 2146 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, 0, &*BBI, 2147 Def, InitVal, NewReg); 2148 if (VRMap[CurStageNum].count(LoopVal)) 2149 VRMap[CurStageNum][Def] = VRMap[CurStageNum][LoopVal]; 2150 } 2151 // Adjust the number of Phis needed depending on the number of prologs left, 2152 // and the distance from where the Phi is first scheduled. The number of 2153 // Phis cannot exceed the number of prolog stages. Each stage can 2154 // potentially define two values. 2155 unsigned MaxPhis = PrologStage + 2; 2156 if (!InKernel && (int)PrologStage <= LoopValStage) 2157 MaxPhis = std::max((int)MaxPhis - (int)LoopValStage, 1); 2158 unsigned NumPhis = std::min(NumStages, MaxPhis); 2159 2160 unsigned NewReg = 0; 2161 unsigned AccessStage = (LoopValStage != -1) ? LoopValStage : StageScheduled; 2162 // In the epilog, we may need to look back one stage to get the correct 2163 // Phi name because the epilog and prolog blocks execute the same stage. 2164 // The correct name is from the previous block only when the Phi has 2165 // been completely scheduled prior to the epilog, and Phi value is not 2166 // needed in multiple stages. 2167 int StageDiff = 0; 2168 if (!InKernel && StageScheduled >= LoopValStage && AccessStage == 0 && 2169 NumPhis == 1) 2170 StageDiff = 1; 2171 // Adjust the computations below when the phi and the loop definition 2172 // are scheduled in different stages. 2173 if (InKernel && LoopValStage != -1 && StageScheduled > LoopValStage) 2174 StageDiff = StageScheduled - LoopValStage; 2175 for (unsigned np = 0; np < NumPhis; ++np) { 2176 // If the Phi hasn't been scheduled, then use the initial Phi operand 2177 // value. Otherwise, use the scheduled version of the instruction. This 2178 // is a little complicated when a Phi references another Phi. 2179 if (np > PrologStage || StageScheduled >= (int)LastStageNum) 2180 PhiOp1 = InitVal; 2181 // Check if the Phi has already been scheduled in a prolog stage. 2182 else if (PrologStage >= AccessStage + StageDiff + np && 2183 VRMap[PrologStage - StageDiff - np].count(LoopVal) != 0) 2184 PhiOp1 = VRMap[PrologStage - StageDiff - np][LoopVal]; 2185 // Check if the Phi has already been scheduled, but the loop instruction 2186 // is either another Phi, or doesn't occur in the loop. 2187 else if (PrologStage >= AccessStage + StageDiff + np) { 2188 // If the Phi references another Phi, we need to examine the other 2189 // Phi to get the correct value. 2190 PhiOp1 = LoopVal; 2191 MachineInstr *InstOp1 = MRI.getVRegDef(PhiOp1); 2192 int Indirects = 1; 2193 while (InstOp1 && InstOp1->isPHI() && InstOp1->getParent() == BB) { 2194 int PhiStage = Schedule.stageScheduled(getSUnit(InstOp1)); 2195 if ((int)(PrologStage - StageDiff - np) < PhiStage + Indirects) 2196 PhiOp1 = getInitPhiReg(*InstOp1, BB); 2197 else 2198 PhiOp1 = getLoopPhiReg(*InstOp1, BB); 2199 InstOp1 = MRI.getVRegDef(PhiOp1); 2200 int PhiOpStage = Schedule.stageScheduled(getSUnit(InstOp1)); 2201 int StageAdj = (PhiOpStage != -1 ? PhiStage - PhiOpStage : 0); 2202 if (PhiOpStage != -1 && PrologStage - StageAdj >= Indirects + np && 2203 VRMap[PrologStage - StageAdj - Indirects - np].count(PhiOp1)) { 2204 PhiOp1 = VRMap[PrologStage - StageAdj - Indirects - np][PhiOp1]; 2205 break; 2206 } 2207 ++Indirects; 2208 } 2209 } else 2210 PhiOp1 = InitVal; 2211 // If this references a generated Phi in the kernel, get the Phi operand 2212 // from the incoming block. 2213 if (MachineInstr *InstOp1 = MRI.getVRegDef(PhiOp1)) 2214 if (InstOp1->isPHI() && InstOp1->getParent() == KernelBB) 2215 PhiOp1 = getInitPhiReg(*InstOp1, KernelBB); 2216 2217 MachineInstr *PhiInst = MRI.getVRegDef(LoopVal); 2218 bool LoopDefIsPhi = PhiInst && PhiInst->isPHI(); 2219 // In the epilog, a map lookup is needed to get the value from the kernel, 2220 // or previous epilog block. How is does this depends on if the 2221 // instruction is scheduled in the previous block. 2222 if (!InKernel) { 2223 int StageDiffAdj = 0; 2224 if (LoopValStage != -1 && StageScheduled > LoopValStage) 2225 StageDiffAdj = StageScheduled - LoopValStage; 2226 // Use the loop value defined in the kernel, unless the kernel 2227 // contains the last definition of the Phi. 2228 if (np == 0 && PrevStage == LastStageNum && 2229 (StageScheduled != 0 || LoopValStage != 0) && 2230 VRMap[PrevStage - StageDiffAdj].count(LoopVal)) 2231 PhiOp2 = VRMap[PrevStage - StageDiffAdj][LoopVal]; 2232 // Use the value defined by the Phi. We add one because we switch 2233 // from looking at the loop value to the Phi definition. 2234 else if (np > 0 && PrevStage == LastStageNum && 2235 VRMap[PrevStage - np + 1].count(Def)) 2236 PhiOp2 = VRMap[PrevStage - np + 1][Def]; 2237 // Use the loop value defined in the kernel. 2238 else if (static_cast<unsigned>(LoopValStage) > PrologStage + 1 && 2239 VRMap[PrevStage - StageDiffAdj - np].count(LoopVal)) 2240 PhiOp2 = VRMap[PrevStage - StageDiffAdj - np][LoopVal]; 2241 // Use the value defined by the Phi, unless we're generating the first 2242 // epilog and the Phi refers to a Phi in a different stage. 2243 else if (VRMap[PrevStage - np].count(Def) && 2244 (!LoopDefIsPhi || PrevStage != LastStageNum)) 2245 PhiOp2 = VRMap[PrevStage - np][Def]; 2246 } 2247 2248 // Check if we can reuse an existing Phi. This occurs when a Phi 2249 // references another Phi, and the other Phi is scheduled in an 2250 // earlier stage. We can try to reuse an existing Phi up until the last 2251 // stage of the current Phi. 2252 if (LoopDefIsPhi) { 2253 if (static_cast<int>(PrologStage - np) >= StageScheduled) { 2254 int LVNumStages = Schedule.getStagesForPhi(LoopVal); 2255 int StageDiff = (StageScheduled - LoopValStage); 2256 LVNumStages -= StageDiff; 2257 // Make sure the loop value Phi has been processed already. 2258 if (LVNumStages > (int)np && VRMap[CurStageNum].count(LoopVal)) { 2259 NewReg = PhiOp2; 2260 unsigned ReuseStage = CurStageNum; 2261 if (Schedule.isLoopCarried(this, *PhiInst)) 2262 ReuseStage -= LVNumStages; 2263 // Check if the Phi to reuse has been generated yet. If not, then 2264 // there is nothing to reuse. 2265 if (VRMap[ReuseStage - np].count(LoopVal)) { 2266 NewReg = VRMap[ReuseStage - np][LoopVal]; 2267 2268 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np, 2269 &*BBI, Def, NewReg); 2270 // Update the map with the new Phi name. 2271 VRMap[CurStageNum - np][Def] = NewReg; 2272 PhiOp2 = NewReg; 2273 if (VRMap[LastStageNum - np - 1].count(LoopVal)) 2274 PhiOp2 = VRMap[LastStageNum - np - 1][LoopVal]; 2275 2276 if (IsLast && np == NumPhis - 1) 2277 replaceRegUsesAfterLoop(Def, NewReg, BB, MRI, LIS); 2278 continue; 2279 } 2280 } 2281 } 2282 if (InKernel && StageDiff > 0 && 2283 VRMap[CurStageNum - StageDiff - np].count(LoopVal)) 2284 PhiOp2 = VRMap[CurStageNum - StageDiff - np][LoopVal]; 2285 } 2286 2287 const TargetRegisterClass *RC = MRI.getRegClass(Def); 2288 NewReg = MRI.createVirtualRegister(RC); 2289 2290 MachineInstrBuilder NewPhi = 2291 BuildMI(*NewBB, NewBB->getFirstNonPHI(), DebugLoc(), 2292 TII->get(TargetOpcode::PHI), NewReg); 2293 NewPhi.addReg(PhiOp1).addMBB(BB1); 2294 NewPhi.addReg(PhiOp2).addMBB(BB2); 2295 if (np == 0) 2296 InstrMap[NewPhi] = &*BBI; 2297 2298 // We define the Phis after creating the new pipelined code, so 2299 // we need to rename the Phi values in scheduled instructions. 2300 2301 unsigned PrevReg = 0; 2302 if (InKernel && VRMap[PrevStage - np].count(LoopVal)) 2303 PrevReg = VRMap[PrevStage - np][LoopVal]; 2304 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np, &*BBI, 2305 Def, NewReg, PrevReg); 2306 // If the Phi has been scheduled, use the new name for rewriting. 2307 if (VRMap[CurStageNum - np].count(Def)) { 2308 unsigned R = VRMap[CurStageNum - np][Def]; 2309 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np, &*BBI, 2310 R, NewReg); 2311 } 2312 2313 // Check if we need to rename any uses that occurs after the loop. The 2314 // register to replace depends on whether the Phi is scheduled in the 2315 // epilog. 2316 if (IsLast && np == NumPhis - 1) 2317 replaceRegUsesAfterLoop(Def, NewReg, BB, MRI, LIS); 2318 2319 // In the kernel, a dependent Phi uses the value from this Phi. 2320 if (InKernel) 2321 PhiOp2 = NewReg; 2322 2323 // Update the map with the new Phi name. 2324 VRMap[CurStageNum - np][Def] = NewReg; 2325 } 2326 2327 while (NumPhis++ < NumStages) { 2328 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, NumPhis, 2329 &*BBI, Def, NewReg, 0); 2330 } 2331 2332 // Check if we need to rename a Phi that has been eliminated due to 2333 // scheduling. 2334 if (NumStages == 0 && IsLast && VRMap[CurStageNum].count(LoopVal)) 2335 replaceRegUsesAfterLoop(Def, VRMap[CurStageNum][LoopVal], BB, MRI, LIS); 2336 } 2337 } 2338 2339 /// Generate Phis for the specified block in the generated pipelined code. 2340 /// These are new Phis needed because the definition is scheduled after the 2341 /// use in the pipelined sequence. 2342 void SwingSchedulerDAG::generatePhis( 2343 MachineBasicBlock *NewBB, MachineBasicBlock *BB1, MachineBasicBlock *BB2, 2344 MachineBasicBlock *KernelBB, SMSchedule &Schedule, ValueMapTy *VRMap, 2345 InstrMapTy &InstrMap, unsigned LastStageNum, unsigned CurStageNum, 2346 bool IsLast) { 2347 // Compute the stage number that contains the initial Phi value, and 2348 // the Phi from the previous stage. 2349 unsigned PrologStage = 0; 2350 unsigned PrevStage = 0; 2351 unsigned StageDiff = CurStageNum - LastStageNum; 2352 bool InKernel = (StageDiff == 0); 2353 if (InKernel) { 2354 PrologStage = LastStageNum - 1; 2355 PrevStage = CurStageNum; 2356 } else { 2357 PrologStage = LastStageNum - StageDiff; 2358 PrevStage = LastStageNum + StageDiff - 1; 2359 } 2360 2361 for (MachineBasicBlock::iterator BBI = BB->getFirstNonPHI(), 2362 BBE = BB->instr_end(); 2363 BBI != BBE; ++BBI) { 2364 for (unsigned i = 0, e = BBI->getNumOperands(); i != e; ++i) { 2365 MachineOperand &MO = BBI->getOperand(i); 2366 if (!MO.isReg() || !MO.isDef() || 2367 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) 2368 continue; 2369 2370 int StageScheduled = Schedule.stageScheduled(getSUnit(&*BBI)); 2371 assert(StageScheduled != -1 && "Expecting scheduled instruction."); 2372 unsigned Def = MO.getReg(); 2373 unsigned NumPhis = Schedule.getStagesForReg(Def, CurStageNum); 2374 // An instruction scheduled in stage 0 and is used after the loop 2375 // requires a phi in the epilog for the last definition from either 2376 // the kernel or prolog. 2377 if (!InKernel && NumPhis == 0 && StageScheduled == 0 && 2378 hasUseAfterLoop(Def, BB, MRI)) 2379 NumPhis = 1; 2380 if (!InKernel && (unsigned)StageScheduled > PrologStage) 2381 continue; 2382 2383 unsigned PhiOp2 = VRMap[PrevStage][Def]; 2384 if (MachineInstr *InstOp2 = MRI.getVRegDef(PhiOp2)) 2385 if (InstOp2->isPHI() && InstOp2->getParent() == NewBB) 2386 PhiOp2 = getLoopPhiReg(*InstOp2, BB2); 2387 // The number of Phis can't exceed the number of prolog stages. The 2388 // prolog stage number is zero based. 2389 if (NumPhis > PrologStage + 1 - StageScheduled) 2390 NumPhis = PrologStage + 1 - StageScheduled; 2391 for (unsigned np = 0; np < NumPhis; ++np) { 2392 unsigned PhiOp1 = VRMap[PrologStage][Def]; 2393 if (np <= PrologStage) 2394 PhiOp1 = VRMap[PrologStage - np][Def]; 2395 if (MachineInstr *InstOp1 = MRI.getVRegDef(PhiOp1)) { 2396 if (InstOp1->isPHI() && InstOp1->getParent() == KernelBB) 2397 PhiOp1 = getInitPhiReg(*InstOp1, KernelBB); 2398 if (InstOp1->isPHI() && InstOp1->getParent() == NewBB) 2399 PhiOp1 = getInitPhiReg(*InstOp1, NewBB); 2400 } 2401 if (!InKernel) 2402 PhiOp2 = VRMap[PrevStage - np][Def]; 2403 2404 const TargetRegisterClass *RC = MRI.getRegClass(Def); 2405 unsigned NewReg = MRI.createVirtualRegister(RC); 2406 2407 MachineInstrBuilder NewPhi = 2408 BuildMI(*NewBB, NewBB->getFirstNonPHI(), DebugLoc(), 2409 TII->get(TargetOpcode::PHI), NewReg); 2410 NewPhi.addReg(PhiOp1).addMBB(BB1); 2411 NewPhi.addReg(PhiOp2).addMBB(BB2); 2412 if (np == 0) 2413 InstrMap[NewPhi] = &*BBI; 2414 2415 // Rewrite uses and update the map. The actions depend upon whether 2416 // we generating code for the kernel or epilog blocks. 2417 if (InKernel) { 2418 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np, 2419 &*BBI, PhiOp1, NewReg); 2420 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np, 2421 &*BBI, PhiOp2, NewReg); 2422 2423 PhiOp2 = NewReg; 2424 VRMap[PrevStage - np - 1][Def] = NewReg; 2425 } else { 2426 VRMap[CurStageNum - np][Def] = NewReg; 2427 if (np == NumPhis - 1) 2428 rewriteScheduledInstr(NewBB, Schedule, InstrMap, CurStageNum, np, 2429 &*BBI, Def, NewReg); 2430 } 2431 if (IsLast && np == NumPhis - 1) 2432 replaceRegUsesAfterLoop(Def, NewReg, BB, MRI, LIS); 2433 } 2434 } 2435 } 2436 } 2437 2438 /// Remove instructions that generate values with no uses. 2439 /// Typically, these are induction variable operations that generate values 2440 /// used in the loop itself. A dead instruction has a definition with 2441 /// no uses, or uses that occur in the original loop only. 2442 void SwingSchedulerDAG::removeDeadInstructions(MachineBasicBlock *KernelBB, 2443 MBBVectorTy &EpilogBBs) { 2444 // For each epilog block, check that the value defined by each instruction 2445 // is used. If not, delete it. 2446 for (MBBVectorTy::reverse_iterator MBB = EpilogBBs.rbegin(), 2447 MBE = EpilogBBs.rend(); 2448 MBB != MBE; ++MBB) 2449 for (MachineBasicBlock::reverse_instr_iterator MI = (*MBB)->instr_rbegin(), 2450 ME = (*MBB)->instr_rend(); 2451 MI != ME;) { 2452 // From DeadMachineInstructionElem. Don't delete inline assembly. 2453 if (MI->isInlineAsm()) { 2454 ++MI; 2455 continue; 2456 } 2457 bool SawStore = false; 2458 // Check if it's safe to remove the instruction due to side effects. 2459 // We can, and want to, remove Phis here. 2460 if (!MI->isSafeToMove(nullptr, SawStore) && !MI->isPHI()) { 2461 ++MI; 2462 continue; 2463 } 2464 bool used = true; 2465 for (MachineInstr::mop_iterator MOI = MI->operands_begin(), 2466 MOE = MI->operands_end(); 2467 MOI != MOE; ++MOI) { 2468 if (!MOI->isReg() || !MOI->isDef()) 2469 continue; 2470 unsigned reg = MOI->getReg(); 2471 // Assume physical registers are used, unless they are marked dead. 2472 if (TargetRegisterInfo::isPhysicalRegister(reg)) { 2473 used = !MOI->isDead(); 2474 if (used) 2475 break; 2476 continue; 2477 } 2478 unsigned realUses = 0; 2479 for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(reg), 2480 EI = MRI.use_end(); 2481 UI != EI; ++UI) { 2482 // Check if there are any uses that occur only in the original 2483 // loop. If so, that's not a real use. 2484 if (UI->getParent()->getParent() != BB) { 2485 realUses++; 2486 used = true; 2487 break; 2488 } 2489 } 2490 if (realUses > 0) 2491 break; 2492 used = false; 2493 } 2494 if (!used) { 2495 LIS.RemoveMachineInstrFromMaps(*MI); 2496 MI++->eraseFromParent(); 2497 continue; 2498 } 2499 ++MI; 2500 } 2501 // In the kernel block, check if we can remove a Phi that generates a value 2502 // used in an instruction removed in the epilog block. 2503 for (MachineBasicBlock::iterator BBI = KernelBB->instr_begin(), 2504 BBE = KernelBB->getFirstNonPHI(); 2505 BBI != BBE;) { 2506 MachineInstr *MI = &*BBI; 2507 ++BBI; 2508 unsigned reg = MI->getOperand(0).getReg(); 2509 if (MRI.use_begin(reg) == MRI.use_end()) { 2510 LIS.RemoveMachineInstrFromMaps(*MI); 2511 MI->eraseFromParent(); 2512 } 2513 } 2514 } 2515 2516 /// For loop carried definitions, we split the lifetime of a virtual register 2517 /// that has uses past the definition in the next iteration. A copy with a new 2518 /// virtual register is inserted before the definition, which helps with 2519 /// generating a better register assignment. 2520 /// 2521 /// v1 = phi(a, v2) v1 = phi(a, v2) 2522 /// v2 = phi(b, v3) v2 = phi(b, v3) 2523 /// v3 = .. v4 = copy v1 2524 /// .. = V1 v3 = .. 2525 /// .. = v4 2526 void SwingSchedulerDAG::splitLifetimes(MachineBasicBlock *KernelBB, 2527 MBBVectorTy &EpilogBBs, 2528 SMSchedule &Schedule) { 2529 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 2530 for (auto &PHI : KernelBB->phis()) { 2531 unsigned Def = PHI.getOperand(0).getReg(); 2532 // Check for any Phi definition that used as an operand of another Phi 2533 // in the same block. 2534 for (MachineRegisterInfo::use_instr_iterator I = MRI.use_instr_begin(Def), 2535 E = MRI.use_instr_end(); 2536 I != E; ++I) { 2537 if (I->isPHI() && I->getParent() == KernelBB) { 2538 // Get the loop carried definition. 2539 unsigned LCDef = getLoopPhiReg(PHI, KernelBB); 2540 if (!LCDef) 2541 continue; 2542 MachineInstr *MI = MRI.getVRegDef(LCDef); 2543 if (!MI || MI->getParent() != KernelBB || MI->isPHI()) 2544 continue; 2545 // Search through the rest of the block looking for uses of the Phi 2546 // definition. If one occurs, then split the lifetime. 2547 unsigned SplitReg = 0; 2548 for (auto &BBJ : make_range(MachineBasicBlock::instr_iterator(MI), 2549 KernelBB->instr_end())) 2550 if (BBJ.readsRegister(Def)) { 2551 // We split the lifetime when we find the first use. 2552 if (SplitReg == 0) { 2553 SplitReg = MRI.createVirtualRegister(MRI.getRegClass(Def)); 2554 BuildMI(*KernelBB, MI, MI->getDebugLoc(), 2555 TII->get(TargetOpcode::COPY), SplitReg) 2556 .addReg(Def); 2557 } 2558 BBJ.substituteRegister(Def, SplitReg, 0, *TRI); 2559 } 2560 if (!SplitReg) 2561 continue; 2562 // Search through each of the epilog blocks for any uses to be renamed. 2563 for (auto &Epilog : EpilogBBs) 2564 for (auto &I : *Epilog) 2565 if (I.readsRegister(Def)) 2566 I.substituteRegister(Def, SplitReg, 0, *TRI); 2567 break; 2568 } 2569 } 2570 } 2571 } 2572 2573 /// Remove the incoming block from the Phis in a basic block. 2574 static void removePhis(MachineBasicBlock *BB, MachineBasicBlock *Incoming) { 2575 for (MachineInstr &MI : *BB) { 2576 if (!MI.isPHI()) 2577 break; 2578 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) 2579 if (MI.getOperand(i + 1).getMBB() == Incoming) { 2580 MI.RemoveOperand(i + 1); 2581 MI.RemoveOperand(i); 2582 break; 2583 } 2584 } 2585 } 2586 2587 /// Create branches from each prolog basic block to the appropriate epilog 2588 /// block. These edges are needed if the loop ends before reaching the 2589 /// kernel. 2590 void SwingSchedulerDAG::addBranches(MBBVectorTy &PrologBBs, 2591 MachineBasicBlock *KernelBB, 2592 MBBVectorTy &EpilogBBs, 2593 SMSchedule &Schedule, ValueMapTy *VRMap) { 2594 assert(PrologBBs.size() == EpilogBBs.size() && "Prolog/Epilog mismatch"); 2595 MachineInstr *IndVar = Pass.LI.LoopInductionVar; 2596 MachineInstr *Cmp = Pass.LI.LoopCompare; 2597 MachineBasicBlock *LastPro = KernelBB; 2598 MachineBasicBlock *LastEpi = KernelBB; 2599 2600 // Start from the blocks connected to the kernel and work "out" 2601 // to the first prolog and the last epilog blocks. 2602 SmallVector<MachineInstr *, 4> PrevInsts; 2603 unsigned MaxIter = PrologBBs.size() - 1; 2604 unsigned LC = UINT_MAX; 2605 unsigned LCMin = UINT_MAX; 2606 for (unsigned i = 0, j = MaxIter; i <= MaxIter; ++i, --j) { 2607 // Add branches to the prolog that go to the corresponding 2608 // epilog, and the fall-thru prolog/kernel block. 2609 MachineBasicBlock *Prolog = PrologBBs[j]; 2610 MachineBasicBlock *Epilog = EpilogBBs[i]; 2611 // We've executed one iteration, so decrement the loop count and check for 2612 // the loop end. 2613 SmallVector<MachineOperand, 4> Cond; 2614 // Check if the LOOP0 has already been removed. If so, then there is no need 2615 // to reduce the trip count. 2616 if (LC != 0) 2617 LC = TII->reduceLoopCount(*Prolog, IndVar, *Cmp, Cond, PrevInsts, j, 2618 MaxIter); 2619 2620 // Record the value of the first trip count, which is used to determine if 2621 // branches and blocks can be removed for constant trip counts. 2622 if (LCMin == UINT_MAX) 2623 LCMin = LC; 2624 2625 unsigned numAdded = 0; 2626 if (TargetRegisterInfo::isVirtualRegister(LC)) { 2627 Prolog->addSuccessor(Epilog); 2628 numAdded = TII->insertBranch(*Prolog, Epilog, LastPro, Cond, DebugLoc()); 2629 } else if (j >= LCMin) { 2630 Prolog->addSuccessor(Epilog); 2631 Prolog->removeSuccessor(LastPro); 2632 LastEpi->removeSuccessor(Epilog); 2633 numAdded = TII->insertBranch(*Prolog, Epilog, nullptr, Cond, DebugLoc()); 2634 removePhis(Epilog, LastEpi); 2635 // Remove the blocks that are no longer referenced. 2636 if (LastPro != LastEpi) { 2637 LastEpi->clear(); 2638 LastEpi->eraseFromParent(); 2639 } 2640 LastPro->clear(); 2641 LastPro->eraseFromParent(); 2642 } else { 2643 numAdded = TII->insertBranch(*Prolog, LastPro, nullptr, Cond, DebugLoc()); 2644 removePhis(Epilog, Prolog); 2645 } 2646 LastPro = Prolog; 2647 LastEpi = Epilog; 2648 for (MachineBasicBlock::reverse_instr_iterator I = Prolog->instr_rbegin(), 2649 E = Prolog->instr_rend(); 2650 I != E && numAdded > 0; ++I, --numAdded) 2651 updateInstruction(&*I, false, j, 0, Schedule, VRMap); 2652 } 2653 } 2654 2655 /// Return true if we can compute the amount the instruction changes 2656 /// during each iteration. Set Delta to the amount of the change. 2657 bool SwingSchedulerDAG::computeDelta(MachineInstr &MI, unsigned &Delta) { 2658 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 2659 MachineOperand *BaseOp; 2660 int64_t Offset; 2661 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI)) 2662 return false; 2663 2664 if (!BaseOp->isReg()) 2665 return false; 2666 2667 unsigned BaseReg = BaseOp->getReg(); 2668 2669 MachineRegisterInfo &MRI = MF.getRegInfo(); 2670 // Check if there is a Phi. If so, get the definition in the loop. 2671 MachineInstr *BaseDef = MRI.getVRegDef(BaseReg); 2672 if (BaseDef && BaseDef->isPHI()) { 2673 BaseReg = getLoopPhiReg(*BaseDef, MI.getParent()); 2674 BaseDef = MRI.getVRegDef(BaseReg); 2675 } 2676 if (!BaseDef) 2677 return false; 2678 2679 int D = 0; 2680 if (!TII->getIncrementValue(*BaseDef, D) && D >= 0) 2681 return false; 2682 2683 Delta = D; 2684 return true; 2685 } 2686 2687 /// Update the memory operand with a new offset when the pipeliner 2688 /// generates a new copy of the instruction that refers to a 2689 /// different memory location. 2690 void SwingSchedulerDAG::updateMemOperands(MachineInstr &NewMI, 2691 MachineInstr &OldMI, unsigned Num) { 2692 if (Num == 0) 2693 return; 2694 // If the instruction has memory operands, then adjust the offset 2695 // when the instruction appears in different stages. 2696 if (NewMI.memoperands_empty()) 2697 return; 2698 SmallVector<MachineMemOperand *, 2> NewMMOs; 2699 for (MachineMemOperand *MMO : NewMI.memoperands()) { 2700 if (MMO->isVolatile() || (MMO->isInvariant() && MMO->isDereferenceable()) || 2701 (!MMO->getValue())) { 2702 NewMMOs.push_back(MMO); 2703 continue; 2704 } 2705 unsigned Delta; 2706 if (Num != UINT_MAX && computeDelta(OldMI, Delta)) { 2707 int64_t AdjOffset = Delta * Num; 2708 NewMMOs.push_back( 2709 MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize())); 2710 } else { 2711 NewMMOs.push_back( 2712 MF.getMachineMemOperand(MMO, 0, MemoryLocation::UnknownSize)); 2713 } 2714 } 2715 NewMI.setMemRefs(MF, NewMMOs); 2716 } 2717 2718 /// Clone the instruction for the new pipelined loop and update the 2719 /// memory operands, if needed. 2720 MachineInstr *SwingSchedulerDAG::cloneInstr(MachineInstr *OldMI, 2721 unsigned CurStageNum, 2722 unsigned InstStageNum) { 2723 MachineInstr *NewMI = MF.CloneMachineInstr(OldMI); 2724 // Check for tied operands in inline asm instructions. This should be handled 2725 // elsewhere, but I'm not sure of the best solution. 2726 if (OldMI->isInlineAsm()) 2727 for (unsigned i = 0, e = OldMI->getNumOperands(); i != e; ++i) { 2728 const auto &MO = OldMI->getOperand(i); 2729 if (MO.isReg() && MO.isUse()) 2730 break; 2731 unsigned UseIdx; 2732 if (OldMI->isRegTiedToUseOperand(i, &UseIdx)) 2733 NewMI->tieOperands(i, UseIdx); 2734 } 2735 updateMemOperands(*NewMI, *OldMI, CurStageNum - InstStageNum); 2736 return NewMI; 2737 } 2738 2739 /// Clone the instruction for the new pipelined loop. If needed, this 2740 /// function updates the instruction using the values saved in the 2741 /// InstrChanges structure. 2742 MachineInstr *SwingSchedulerDAG::cloneAndChangeInstr(MachineInstr *OldMI, 2743 unsigned CurStageNum, 2744 unsigned InstStageNum, 2745 SMSchedule &Schedule) { 2746 MachineInstr *NewMI = MF.CloneMachineInstr(OldMI); 2747 DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It = 2748 InstrChanges.find(getSUnit(OldMI)); 2749 if (It != InstrChanges.end()) { 2750 std::pair<unsigned, int64_t> RegAndOffset = It->second; 2751 unsigned BasePos, OffsetPos; 2752 if (!TII->getBaseAndOffsetPosition(*OldMI, BasePos, OffsetPos)) 2753 return nullptr; 2754 int64_t NewOffset = OldMI->getOperand(OffsetPos).getImm(); 2755 MachineInstr *LoopDef = findDefInLoop(RegAndOffset.first); 2756 if (Schedule.stageScheduled(getSUnit(LoopDef)) > (signed)InstStageNum) 2757 NewOffset += RegAndOffset.second * (CurStageNum - InstStageNum); 2758 NewMI->getOperand(OffsetPos).setImm(NewOffset); 2759 } 2760 updateMemOperands(*NewMI, *OldMI, CurStageNum - InstStageNum); 2761 return NewMI; 2762 } 2763 2764 /// Update the machine instruction with new virtual registers. This 2765 /// function may change the defintions and/or uses. 2766 void SwingSchedulerDAG::updateInstruction(MachineInstr *NewMI, bool LastDef, 2767 unsigned CurStageNum, 2768 unsigned InstrStageNum, 2769 SMSchedule &Schedule, 2770 ValueMapTy *VRMap) { 2771 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { 2772 MachineOperand &MO = NewMI->getOperand(i); 2773 if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg())) 2774 continue; 2775 unsigned reg = MO.getReg(); 2776 if (MO.isDef()) { 2777 // Create a new virtual register for the definition. 2778 const TargetRegisterClass *RC = MRI.getRegClass(reg); 2779 unsigned NewReg = MRI.createVirtualRegister(RC); 2780 MO.setReg(NewReg); 2781 VRMap[CurStageNum][reg] = NewReg; 2782 if (LastDef) 2783 replaceRegUsesAfterLoop(reg, NewReg, BB, MRI, LIS); 2784 } else if (MO.isUse()) { 2785 MachineInstr *Def = MRI.getVRegDef(reg); 2786 // Compute the stage that contains the last definition for instruction. 2787 int DefStageNum = Schedule.stageScheduled(getSUnit(Def)); 2788 unsigned StageNum = CurStageNum; 2789 if (DefStageNum != -1 && (int)InstrStageNum > DefStageNum) { 2790 // Compute the difference in stages between the defintion and the use. 2791 unsigned StageDiff = (InstrStageNum - DefStageNum); 2792 // Make an adjustment to get the last definition. 2793 StageNum -= StageDiff; 2794 } 2795 if (VRMap[StageNum].count(reg)) 2796 MO.setReg(VRMap[StageNum][reg]); 2797 } 2798 } 2799 } 2800 2801 /// Return the instruction in the loop that defines the register. 2802 /// If the definition is a Phi, then follow the Phi operand to 2803 /// the instruction in the loop. 2804 MachineInstr *SwingSchedulerDAG::findDefInLoop(unsigned Reg) { 2805 SmallPtrSet<MachineInstr *, 8> Visited; 2806 MachineInstr *Def = MRI.getVRegDef(Reg); 2807 while (Def->isPHI()) { 2808 if (!Visited.insert(Def).second) 2809 break; 2810 for (unsigned i = 1, e = Def->getNumOperands(); i < e; i += 2) 2811 if (Def->getOperand(i + 1).getMBB() == BB) { 2812 Def = MRI.getVRegDef(Def->getOperand(i).getReg()); 2813 break; 2814 } 2815 } 2816 return Def; 2817 } 2818 2819 /// Return the new name for the value from the previous stage. 2820 unsigned SwingSchedulerDAG::getPrevMapVal(unsigned StageNum, unsigned PhiStage, 2821 unsigned LoopVal, unsigned LoopStage, 2822 ValueMapTy *VRMap, 2823 MachineBasicBlock *BB) { 2824 unsigned PrevVal = 0; 2825 if (StageNum > PhiStage) { 2826 MachineInstr *LoopInst = MRI.getVRegDef(LoopVal); 2827 if (PhiStage == LoopStage && VRMap[StageNum - 1].count(LoopVal)) 2828 // The name is defined in the previous stage. 2829 PrevVal = VRMap[StageNum - 1][LoopVal]; 2830 else if (VRMap[StageNum].count(LoopVal)) 2831 // The previous name is defined in the current stage when the instruction 2832 // order is swapped. 2833 PrevVal = VRMap[StageNum][LoopVal]; 2834 else if (!LoopInst->isPHI() || LoopInst->getParent() != BB) 2835 // The loop value hasn't yet been scheduled. 2836 PrevVal = LoopVal; 2837 else if (StageNum == PhiStage + 1) 2838 // The loop value is another phi, which has not been scheduled. 2839 PrevVal = getInitPhiReg(*LoopInst, BB); 2840 else if (StageNum > PhiStage + 1 && LoopInst->getParent() == BB) 2841 // The loop value is another phi, which has been scheduled. 2842 PrevVal = 2843 getPrevMapVal(StageNum - 1, PhiStage, getLoopPhiReg(*LoopInst, BB), 2844 LoopStage, VRMap, BB); 2845 } 2846 return PrevVal; 2847 } 2848 2849 /// Rewrite the Phi values in the specified block to use the mappings 2850 /// from the initial operand. Once the Phi is scheduled, we switch 2851 /// to using the loop value instead of the Phi value, so those names 2852 /// do not need to be rewritten. 2853 void SwingSchedulerDAG::rewritePhiValues(MachineBasicBlock *NewBB, 2854 unsigned StageNum, 2855 SMSchedule &Schedule, 2856 ValueMapTy *VRMap, 2857 InstrMapTy &InstrMap) { 2858 for (auto &PHI : BB->phis()) { 2859 unsigned InitVal = 0; 2860 unsigned LoopVal = 0; 2861 getPhiRegs(PHI, BB, InitVal, LoopVal); 2862 unsigned PhiDef = PHI.getOperand(0).getReg(); 2863 2864 unsigned PhiStage = 2865 (unsigned)Schedule.stageScheduled(getSUnit(MRI.getVRegDef(PhiDef))); 2866 unsigned LoopStage = 2867 (unsigned)Schedule.stageScheduled(getSUnit(MRI.getVRegDef(LoopVal))); 2868 unsigned NumPhis = Schedule.getStagesForPhi(PhiDef); 2869 if (NumPhis > StageNum) 2870 NumPhis = StageNum; 2871 for (unsigned np = 0; np <= NumPhis; ++np) { 2872 unsigned NewVal = 2873 getPrevMapVal(StageNum - np, PhiStage, LoopVal, LoopStage, VRMap, BB); 2874 if (!NewVal) 2875 NewVal = InitVal; 2876 rewriteScheduledInstr(NewBB, Schedule, InstrMap, StageNum - np, np, &PHI, 2877 PhiDef, NewVal); 2878 } 2879 } 2880 } 2881 2882 /// Rewrite a previously scheduled instruction to use the register value 2883 /// from the new instruction. Make sure the instruction occurs in the 2884 /// basic block, and we don't change the uses in the new instruction. 2885 void SwingSchedulerDAG::rewriteScheduledInstr( 2886 MachineBasicBlock *BB, SMSchedule &Schedule, InstrMapTy &InstrMap, 2887 unsigned CurStageNum, unsigned PhiNum, MachineInstr *Phi, unsigned OldReg, 2888 unsigned NewReg, unsigned PrevReg) { 2889 bool InProlog = (CurStageNum < Schedule.getMaxStageCount()); 2890 int StagePhi = Schedule.stageScheduled(getSUnit(Phi)) + PhiNum; 2891 // Rewrite uses that have been scheduled already to use the new 2892 // Phi register. 2893 for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(OldReg), 2894 EI = MRI.use_end(); 2895 UI != EI;) { 2896 MachineOperand &UseOp = *UI; 2897 MachineInstr *UseMI = UseOp.getParent(); 2898 ++UI; 2899 if (UseMI->getParent() != BB) 2900 continue; 2901 if (UseMI->isPHI()) { 2902 if (!Phi->isPHI() && UseMI->getOperand(0).getReg() == NewReg) 2903 continue; 2904 if (getLoopPhiReg(*UseMI, BB) != OldReg) 2905 continue; 2906 } 2907 InstrMapTy::iterator OrigInstr = InstrMap.find(UseMI); 2908 assert(OrigInstr != InstrMap.end() && "Instruction not scheduled."); 2909 SUnit *OrigMISU = getSUnit(OrigInstr->second); 2910 int StageSched = Schedule.stageScheduled(OrigMISU); 2911 int CycleSched = Schedule.cycleScheduled(OrigMISU); 2912 unsigned ReplaceReg = 0; 2913 // This is the stage for the scheduled instruction. 2914 if (StagePhi == StageSched && Phi->isPHI()) { 2915 int CyclePhi = Schedule.cycleScheduled(getSUnit(Phi)); 2916 if (PrevReg && InProlog) 2917 ReplaceReg = PrevReg; 2918 else if (PrevReg && !Schedule.isLoopCarried(this, *Phi) && 2919 (CyclePhi <= CycleSched || OrigMISU->getInstr()->isPHI())) 2920 ReplaceReg = PrevReg; 2921 else 2922 ReplaceReg = NewReg; 2923 } 2924 // The scheduled instruction occurs before the scheduled Phi, and the 2925 // Phi is not loop carried. 2926 if (!InProlog && StagePhi + 1 == StageSched && 2927 !Schedule.isLoopCarried(this, *Phi)) 2928 ReplaceReg = NewReg; 2929 if (StagePhi > StageSched && Phi->isPHI()) 2930 ReplaceReg = NewReg; 2931 if (!InProlog && !Phi->isPHI() && StagePhi < StageSched) 2932 ReplaceReg = NewReg; 2933 if (ReplaceReg) { 2934 MRI.constrainRegClass(ReplaceReg, MRI.getRegClass(OldReg)); 2935 UseOp.setReg(ReplaceReg); 2936 } 2937 } 2938 } 2939 2940 /// Check if we can change the instruction to use an offset value from the 2941 /// previous iteration. If so, return true and set the base and offset values 2942 /// so that we can rewrite the load, if necessary. 2943 /// v1 = Phi(v0, v3) 2944 /// v2 = load v1, 0 2945 /// v3 = post_store v1, 4, x 2946 /// This function enables the load to be rewritten as v2 = load v3, 4. 2947 bool SwingSchedulerDAG::canUseLastOffsetValue(MachineInstr *MI, 2948 unsigned &BasePos, 2949 unsigned &OffsetPos, 2950 unsigned &NewBase, 2951 int64_t &Offset) { 2952 // Get the load instruction. 2953 if (TII->isPostIncrement(*MI)) 2954 return false; 2955 unsigned BasePosLd, OffsetPosLd; 2956 if (!TII->getBaseAndOffsetPosition(*MI, BasePosLd, OffsetPosLd)) 2957 return false; 2958 unsigned BaseReg = MI->getOperand(BasePosLd).getReg(); 2959 2960 // Look for the Phi instruction. 2961 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo(); 2962 MachineInstr *Phi = MRI.getVRegDef(BaseReg); 2963 if (!Phi || !Phi->isPHI()) 2964 return false; 2965 // Get the register defined in the loop block. 2966 unsigned PrevReg = getLoopPhiReg(*Phi, MI->getParent()); 2967 if (!PrevReg) 2968 return false; 2969 2970 // Check for the post-increment load/store instruction. 2971 MachineInstr *PrevDef = MRI.getVRegDef(PrevReg); 2972 if (!PrevDef || PrevDef == MI) 2973 return false; 2974 2975 if (!TII->isPostIncrement(*PrevDef)) 2976 return false; 2977 2978 unsigned BasePos1 = 0, OffsetPos1 = 0; 2979 if (!TII->getBaseAndOffsetPosition(*PrevDef, BasePos1, OffsetPos1)) 2980 return false; 2981 2982 // Make sure that the instructions do not access the same memory location in 2983 // the next iteration. 2984 int64_t LoadOffset = MI->getOperand(OffsetPosLd).getImm(); 2985 int64_t StoreOffset = PrevDef->getOperand(OffsetPos1).getImm(); 2986 MachineInstr *NewMI = MF.CloneMachineInstr(MI); 2987 NewMI->getOperand(OffsetPosLd).setImm(LoadOffset + StoreOffset); 2988 bool Disjoint = TII->areMemAccessesTriviallyDisjoint(*NewMI, *PrevDef); 2989 MF.DeleteMachineInstr(NewMI); 2990 if (!Disjoint) 2991 return false; 2992 2993 // Set the return value once we determine that we return true. 2994 BasePos = BasePosLd; 2995 OffsetPos = OffsetPosLd; 2996 NewBase = PrevReg; 2997 Offset = StoreOffset; 2998 return true; 2999 } 3000 3001 /// Apply changes to the instruction if needed. The changes are need 3002 /// to improve the scheduling and depend up on the final schedule. 3003 void SwingSchedulerDAG::applyInstrChange(MachineInstr *MI, 3004 SMSchedule &Schedule) { 3005 SUnit *SU = getSUnit(MI); 3006 DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It = 3007 InstrChanges.find(SU); 3008 if (It != InstrChanges.end()) { 3009 std::pair<unsigned, int64_t> RegAndOffset = It->second; 3010 unsigned BasePos, OffsetPos; 3011 if (!TII->getBaseAndOffsetPosition(*MI, BasePos, OffsetPos)) 3012 return; 3013 unsigned BaseReg = MI->getOperand(BasePos).getReg(); 3014 MachineInstr *LoopDef = findDefInLoop(BaseReg); 3015 int DefStageNum = Schedule.stageScheduled(getSUnit(LoopDef)); 3016 int DefCycleNum = Schedule.cycleScheduled(getSUnit(LoopDef)); 3017 int BaseStageNum = Schedule.stageScheduled(SU); 3018 int BaseCycleNum = Schedule.cycleScheduled(SU); 3019 if (BaseStageNum < DefStageNum) { 3020 MachineInstr *NewMI = MF.CloneMachineInstr(MI); 3021 int OffsetDiff = DefStageNum - BaseStageNum; 3022 if (DefCycleNum < BaseCycleNum) { 3023 NewMI->getOperand(BasePos).setReg(RegAndOffset.first); 3024 if (OffsetDiff > 0) 3025 --OffsetDiff; 3026 } 3027 int64_t NewOffset = 3028 MI->getOperand(OffsetPos).getImm() + RegAndOffset.second * OffsetDiff; 3029 NewMI->getOperand(OffsetPos).setImm(NewOffset); 3030 SU->setInstr(NewMI); 3031 MISUnitMap[NewMI] = SU; 3032 NewMIs.insert(NewMI); 3033 } 3034 } 3035 } 3036 3037 /// Return true for an order or output dependence that is loop carried 3038 /// potentially. A dependence is loop carried if the destination defines a valu 3039 /// that may be used or defined by the source in a subsequent iteration. 3040 bool SwingSchedulerDAG::isLoopCarriedDep(SUnit *Source, const SDep &Dep, 3041 bool isSucc) { 3042 if ((Dep.getKind() != SDep::Order && Dep.getKind() != SDep::Output) || 3043 Dep.isArtificial()) 3044 return false; 3045 3046 if (!SwpPruneLoopCarried) 3047 return true; 3048 3049 if (Dep.getKind() == SDep::Output) 3050 return true; 3051 3052 MachineInstr *SI = Source->getInstr(); 3053 MachineInstr *DI = Dep.getSUnit()->getInstr(); 3054 if (!isSucc) 3055 std::swap(SI, DI); 3056 assert(SI != nullptr && DI != nullptr && "Expecting SUnit with an MI."); 3057 3058 // Assume ordered loads and stores may have a loop carried dependence. 3059 if (SI->hasUnmodeledSideEffects() || DI->hasUnmodeledSideEffects() || 3060 SI->hasOrderedMemoryRef() || DI->hasOrderedMemoryRef()) 3061 return true; 3062 3063 // Only chain dependences between a load and store can be loop carried. 3064 if (!DI->mayStore() || !SI->mayLoad()) 3065 return false; 3066 3067 unsigned DeltaS, DeltaD; 3068 if (!computeDelta(*SI, DeltaS) || !computeDelta(*DI, DeltaD)) 3069 return true; 3070 3071 MachineOperand *BaseOpS, *BaseOpD; 3072 int64_t OffsetS, OffsetD; 3073 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 3074 if (!TII->getMemOperandWithOffset(*SI, BaseOpS, OffsetS, TRI) || 3075 !TII->getMemOperandWithOffset(*DI, BaseOpD, OffsetD, TRI)) 3076 return true; 3077 3078 if (!BaseOpS->isIdenticalTo(*BaseOpD)) 3079 return true; 3080 3081 // Check that the base register is incremented by a constant value for each 3082 // iteration. 3083 MachineInstr *Def = MRI.getVRegDef(BaseOpS->getReg()); 3084 if (!Def || !Def->isPHI()) 3085 return true; 3086 unsigned InitVal = 0; 3087 unsigned LoopVal = 0; 3088 getPhiRegs(*Def, BB, InitVal, LoopVal); 3089 MachineInstr *LoopDef = MRI.getVRegDef(LoopVal); 3090 int D = 0; 3091 if (!LoopDef || !TII->getIncrementValue(*LoopDef, D)) 3092 return true; 3093 3094 uint64_t AccessSizeS = (*SI->memoperands_begin())->getSize(); 3095 uint64_t AccessSizeD = (*DI->memoperands_begin())->getSize(); 3096 3097 // This is the main test, which checks the offset values and the loop 3098 // increment value to determine if the accesses may be loop carried. 3099 if (OffsetS >= OffsetD) 3100 return OffsetS + AccessSizeS > DeltaS; 3101 else 3102 return OffsetD + AccessSizeD > DeltaD; 3103 3104 return true; 3105 } 3106 3107 void SwingSchedulerDAG::postprocessDAG() { 3108 for (auto &M : Mutations) 3109 M->apply(this); 3110 } 3111 3112 /// Try to schedule the node at the specified StartCycle and continue 3113 /// until the node is schedule or the EndCycle is reached. This function 3114 /// returns true if the node is scheduled. This routine may search either 3115 /// forward or backward for a place to insert the instruction based upon 3116 /// the relative values of StartCycle and EndCycle. 3117 bool SMSchedule::insert(SUnit *SU, int StartCycle, int EndCycle, int II) { 3118 bool forward = true; 3119 if (StartCycle > EndCycle) 3120 forward = false; 3121 3122 // The terminating condition depends on the direction. 3123 int termCycle = forward ? EndCycle + 1 : EndCycle - 1; 3124 for (int curCycle = StartCycle; curCycle != termCycle; 3125 forward ? ++curCycle : --curCycle) { 3126 3127 // Add the already scheduled instructions at the specified cycle to the DFA. 3128 Resources->clearResources(); 3129 for (int checkCycle = FirstCycle + ((curCycle - FirstCycle) % II); 3130 checkCycle <= LastCycle; checkCycle += II) { 3131 std::deque<SUnit *> &cycleInstrs = ScheduledInstrs[checkCycle]; 3132 3133 for (std::deque<SUnit *>::iterator I = cycleInstrs.begin(), 3134 E = cycleInstrs.end(); 3135 I != E; ++I) { 3136 if (ST.getInstrInfo()->isZeroCost((*I)->getInstr()->getOpcode())) 3137 continue; 3138 assert(Resources->canReserveResources(*(*I)->getInstr()) && 3139 "These instructions have already been scheduled."); 3140 Resources->reserveResources(*(*I)->getInstr()); 3141 } 3142 } 3143 if (ST.getInstrInfo()->isZeroCost(SU->getInstr()->getOpcode()) || 3144 Resources->canReserveResources(*SU->getInstr())) { 3145 LLVM_DEBUG({ 3146 dbgs() << "\tinsert at cycle " << curCycle << " "; 3147 SU->getInstr()->dump(); 3148 }); 3149 3150 ScheduledInstrs[curCycle].push_back(SU); 3151 InstrToCycle.insert(std::make_pair(SU, curCycle)); 3152 if (curCycle > LastCycle) 3153 LastCycle = curCycle; 3154 if (curCycle < FirstCycle) 3155 FirstCycle = curCycle; 3156 return true; 3157 } 3158 LLVM_DEBUG({ 3159 dbgs() << "\tfailed to insert at cycle " << curCycle << " "; 3160 SU->getInstr()->dump(); 3161 }); 3162 } 3163 return false; 3164 } 3165 3166 // Return the cycle of the earliest scheduled instruction in the chain. 3167 int SMSchedule::earliestCycleInChain(const SDep &Dep) { 3168 SmallPtrSet<SUnit *, 8> Visited; 3169 SmallVector<SDep, 8> Worklist; 3170 Worklist.push_back(Dep); 3171 int EarlyCycle = INT_MAX; 3172 while (!Worklist.empty()) { 3173 const SDep &Cur = Worklist.pop_back_val(); 3174 SUnit *PrevSU = Cur.getSUnit(); 3175 if (Visited.count(PrevSU)) 3176 continue; 3177 std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(PrevSU); 3178 if (it == InstrToCycle.end()) 3179 continue; 3180 EarlyCycle = std::min(EarlyCycle, it->second); 3181 for (const auto &PI : PrevSU->Preds) 3182 if (PI.getKind() == SDep::Order || Dep.getKind() == SDep::Output) 3183 Worklist.push_back(PI); 3184 Visited.insert(PrevSU); 3185 } 3186 return EarlyCycle; 3187 } 3188 3189 // Return the cycle of the latest scheduled instruction in the chain. 3190 int SMSchedule::latestCycleInChain(const SDep &Dep) { 3191 SmallPtrSet<SUnit *, 8> Visited; 3192 SmallVector<SDep, 8> Worklist; 3193 Worklist.push_back(Dep); 3194 int LateCycle = INT_MIN; 3195 while (!Worklist.empty()) { 3196 const SDep &Cur = Worklist.pop_back_val(); 3197 SUnit *SuccSU = Cur.getSUnit(); 3198 if (Visited.count(SuccSU)) 3199 continue; 3200 std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SuccSU); 3201 if (it == InstrToCycle.end()) 3202 continue; 3203 LateCycle = std::max(LateCycle, it->second); 3204 for (const auto &SI : SuccSU->Succs) 3205 if (SI.getKind() == SDep::Order || Dep.getKind() == SDep::Output) 3206 Worklist.push_back(SI); 3207 Visited.insert(SuccSU); 3208 } 3209 return LateCycle; 3210 } 3211 3212 /// If an instruction has a use that spans multiple iterations, then 3213 /// return true. These instructions are characterized by having a back-ege 3214 /// to a Phi, which contains a reference to another Phi. 3215 static SUnit *multipleIterations(SUnit *SU, SwingSchedulerDAG *DAG) { 3216 for (auto &P : SU->Preds) 3217 if (DAG->isBackedge(SU, P) && P.getSUnit()->getInstr()->isPHI()) 3218 for (auto &S : P.getSUnit()->Succs) 3219 if (S.getKind() == SDep::Data && S.getSUnit()->getInstr()->isPHI()) 3220 return P.getSUnit(); 3221 return nullptr; 3222 } 3223 3224 /// Compute the scheduling start slot for the instruction. The start slot 3225 /// depends on any predecessor or successor nodes scheduled already. 3226 void SMSchedule::computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart, 3227 int *MinEnd, int *MaxStart, int II, 3228 SwingSchedulerDAG *DAG) { 3229 // Iterate over each instruction that has been scheduled already. The start 3230 // slot computation depends on whether the previously scheduled instruction 3231 // is a predecessor or successor of the specified instruction. 3232 for (int cycle = getFirstCycle(); cycle <= LastCycle; ++cycle) { 3233 3234 // Iterate over each instruction in the current cycle. 3235 for (SUnit *I : getInstructions(cycle)) { 3236 // Because we're processing a DAG for the dependences, we recognize 3237 // the back-edge in recurrences by anti dependences. 3238 for (unsigned i = 0, e = (unsigned)SU->Preds.size(); i != e; ++i) { 3239 const SDep &Dep = SU->Preds[i]; 3240 if (Dep.getSUnit() == I) { 3241 if (!DAG->isBackedge(SU, Dep)) { 3242 int EarlyStart = cycle + Dep.getLatency() - 3243 DAG->getDistance(Dep.getSUnit(), SU, Dep) * II; 3244 *MaxEarlyStart = std::max(*MaxEarlyStart, EarlyStart); 3245 if (DAG->isLoopCarriedDep(SU, Dep, false)) { 3246 int End = earliestCycleInChain(Dep) + (II - 1); 3247 *MinEnd = std::min(*MinEnd, End); 3248 } 3249 } else { 3250 int LateStart = cycle - Dep.getLatency() + 3251 DAG->getDistance(SU, Dep.getSUnit(), Dep) * II; 3252 *MinLateStart = std::min(*MinLateStart, LateStart); 3253 } 3254 } 3255 // For instruction that requires multiple iterations, make sure that 3256 // the dependent instruction is not scheduled past the definition. 3257 SUnit *BE = multipleIterations(I, DAG); 3258 if (BE && Dep.getSUnit() == BE && !SU->getInstr()->isPHI() && 3259 !SU->isPred(I)) 3260 *MinLateStart = std::min(*MinLateStart, cycle); 3261 } 3262 for (unsigned i = 0, e = (unsigned)SU->Succs.size(); i != e; ++i) { 3263 if (SU->Succs[i].getSUnit() == I) { 3264 const SDep &Dep = SU->Succs[i]; 3265 if (!DAG->isBackedge(SU, Dep)) { 3266 int LateStart = cycle - Dep.getLatency() + 3267 DAG->getDistance(SU, Dep.getSUnit(), Dep) * II; 3268 *MinLateStart = std::min(*MinLateStart, LateStart); 3269 if (DAG->isLoopCarriedDep(SU, Dep)) { 3270 int Start = latestCycleInChain(Dep) + 1 - II; 3271 *MaxStart = std::max(*MaxStart, Start); 3272 } 3273 } else { 3274 int EarlyStart = cycle + Dep.getLatency() - 3275 DAG->getDistance(Dep.getSUnit(), SU, Dep) * II; 3276 *MaxEarlyStart = std::max(*MaxEarlyStart, EarlyStart); 3277 } 3278 } 3279 } 3280 } 3281 } 3282 } 3283 3284 /// Order the instructions within a cycle so that the definitions occur 3285 /// before the uses. Returns true if the instruction is added to the start 3286 /// of the list, or false if added to the end. 3287 void SMSchedule::orderDependence(SwingSchedulerDAG *SSD, SUnit *SU, 3288 std::deque<SUnit *> &Insts) { 3289 MachineInstr *MI = SU->getInstr(); 3290 bool OrderBeforeUse = false; 3291 bool OrderAfterDef = false; 3292 bool OrderBeforeDef = false; 3293 unsigned MoveDef = 0; 3294 unsigned MoveUse = 0; 3295 int StageInst1 = stageScheduled(SU); 3296 3297 unsigned Pos = 0; 3298 for (std::deque<SUnit *>::iterator I = Insts.begin(), E = Insts.end(); I != E; 3299 ++I, ++Pos) { 3300 for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { 3301 MachineOperand &MO = MI->getOperand(i); 3302 if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg())) 3303 continue; 3304 3305 unsigned Reg = MO.getReg(); 3306 unsigned BasePos, OffsetPos; 3307 if (ST.getInstrInfo()->getBaseAndOffsetPosition(*MI, BasePos, OffsetPos)) 3308 if (MI->getOperand(BasePos).getReg() == Reg) 3309 if (unsigned NewReg = SSD->getInstrBaseReg(SU)) 3310 Reg = NewReg; 3311 bool Reads, Writes; 3312 std::tie(Reads, Writes) = 3313 (*I)->getInstr()->readsWritesVirtualRegister(Reg); 3314 if (MO.isDef() && Reads && stageScheduled(*I) <= StageInst1) { 3315 OrderBeforeUse = true; 3316 if (MoveUse == 0) 3317 MoveUse = Pos; 3318 } else if (MO.isDef() && Reads && stageScheduled(*I) > StageInst1) { 3319 // Add the instruction after the scheduled instruction. 3320 OrderAfterDef = true; 3321 MoveDef = Pos; 3322 } else if (MO.isUse() && Writes && stageScheduled(*I) == StageInst1) { 3323 if (cycleScheduled(*I) == cycleScheduled(SU) && !(*I)->isSucc(SU)) { 3324 OrderBeforeUse = true; 3325 if (MoveUse == 0) 3326 MoveUse = Pos; 3327 } else { 3328 OrderAfterDef = true; 3329 MoveDef = Pos; 3330 } 3331 } else if (MO.isUse() && Writes && stageScheduled(*I) > StageInst1) { 3332 OrderBeforeUse = true; 3333 if (MoveUse == 0) 3334 MoveUse = Pos; 3335 if (MoveUse != 0) { 3336 OrderAfterDef = true; 3337 MoveDef = Pos - 1; 3338 } 3339 } else if (MO.isUse() && Writes && stageScheduled(*I) < StageInst1) { 3340 // Add the instruction before the scheduled instruction. 3341 OrderBeforeUse = true; 3342 if (MoveUse == 0) 3343 MoveUse = Pos; 3344 } else if (MO.isUse() && stageScheduled(*I) == StageInst1 && 3345 isLoopCarriedDefOfUse(SSD, (*I)->getInstr(), MO)) { 3346 if (MoveUse == 0) { 3347 OrderBeforeDef = true; 3348 MoveUse = Pos; 3349 } 3350 } 3351 } 3352 // Check for order dependences between instructions. Make sure the source 3353 // is ordered before the destination. 3354 for (auto &S : SU->Succs) { 3355 if (S.getSUnit() != *I) 3356 continue; 3357 if (S.getKind() == SDep::Order && stageScheduled(*I) == StageInst1) { 3358 OrderBeforeUse = true; 3359 if (Pos < MoveUse) 3360 MoveUse = Pos; 3361 } 3362 } 3363 for (auto &P : SU->Preds) { 3364 if (P.getSUnit() != *I) 3365 continue; 3366 if (P.getKind() == SDep::Order && stageScheduled(*I) == StageInst1) { 3367 OrderAfterDef = true; 3368 MoveDef = Pos; 3369 } 3370 } 3371 } 3372 3373 // A circular dependence. 3374 if (OrderAfterDef && OrderBeforeUse && MoveUse == MoveDef) 3375 OrderBeforeUse = false; 3376 3377 // OrderAfterDef takes precedences over OrderBeforeDef. The latter is due 3378 // to a loop-carried dependence. 3379 if (OrderBeforeDef) 3380 OrderBeforeUse = !OrderAfterDef || (MoveUse > MoveDef); 3381 3382 // The uncommon case when the instruction order needs to be updated because 3383 // there is both a use and def. 3384 if (OrderBeforeUse && OrderAfterDef) { 3385 SUnit *UseSU = Insts.at(MoveUse); 3386 SUnit *DefSU = Insts.at(MoveDef); 3387 if (MoveUse > MoveDef) { 3388 Insts.erase(Insts.begin() + MoveUse); 3389 Insts.erase(Insts.begin() + MoveDef); 3390 } else { 3391 Insts.erase(Insts.begin() + MoveDef); 3392 Insts.erase(Insts.begin() + MoveUse); 3393 } 3394 orderDependence(SSD, UseSU, Insts); 3395 orderDependence(SSD, SU, Insts); 3396 orderDependence(SSD, DefSU, Insts); 3397 return; 3398 } 3399 // Put the new instruction first if there is a use in the list. Otherwise, 3400 // put it at the end of the list. 3401 if (OrderBeforeUse) 3402 Insts.push_front(SU); 3403 else 3404 Insts.push_back(SU); 3405 } 3406 3407 /// Return true if the scheduled Phi has a loop carried operand. 3408 bool SMSchedule::isLoopCarried(SwingSchedulerDAG *SSD, MachineInstr &Phi) { 3409 if (!Phi.isPHI()) 3410 return false; 3411 assert(Phi.isPHI() && "Expecting a Phi."); 3412 SUnit *DefSU = SSD->getSUnit(&Phi); 3413 unsigned DefCycle = cycleScheduled(DefSU); 3414 int DefStage = stageScheduled(DefSU); 3415 3416 unsigned InitVal = 0; 3417 unsigned LoopVal = 0; 3418 getPhiRegs(Phi, Phi.getParent(), InitVal, LoopVal); 3419 SUnit *UseSU = SSD->getSUnit(MRI.getVRegDef(LoopVal)); 3420 if (!UseSU) 3421 return true; 3422 if (UseSU->getInstr()->isPHI()) 3423 return true; 3424 unsigned LoopCycle = cycleScheduled(UseSU); 3425 int LoopStage = stageScheduled(UseSU); 3426 return (LoopCycle > DefCycle) || (LoopStage <= DefStage); 3427 } 3428 3429 /// Return true if the instruction is a definition that is loop carried 3430 /// and defines the use on the next iteration. 3431 /// v1 = phi(v2, v3) 3432 /// (Def) v3 = op v1 3433 /// (MO) = v1 3434 /// If MO appears before Def, then then v1 and v3 may get assigned to the same 3435 /// register. 3436 bool SMSchedule::isLoopCarriedDefOfUse(SwingSchedulerDAG *SSD, 3437 MachineInstr *Def, MachineOperand &MO) { 3438 if (!MO.isReg()) 3439 return false; 3440 if (Def->isPHI()) 3441 return false; 3442 MachineInstr *Phi = MRI.getVRegDef(MO.getReg()); 3443 if (!Phi || !Phi->isPHI() || Phi->getParent() != Def->getParent()) 3444 return false; 3445 if (!isLoopCarried(SSD, *Phi)) 3446 return false; 3447 unsigned LoopReg = getLoopPhiReg(*Phi, Phi->getParent()); 3448 for (unsigned i = 0, e = Def->getNumOperands(); i != e; ++i) { 3449 MachineOperand &DMO = Def->getOperand(i); 3450 if (!DMO.isReg() || !DMO.isDef()) 3451 continue; 3452 if (DMO.getReg() == LoopReg) 3453 return true; 3454 } 3455 return false; 3456 } 3457 3458 // Check if the generated schedule is valid. This function checks if 3459 // an instruction that uses a physical register is scheduled in a 3460 // different stage than the definition. The pipeliner does not handle 3461 // physical register values that may cross a basic block boundary. 3462 bool SMSchedule::isValidSchedule(SwingSchedulerDAG *SSD) { 3463 for (int i = 0, e = SSD->SUnits.size(); i < e; ++i) { 3464 SUnit &SU = SSD->SUnits[i]; 3465 if (!SU.hasPhysRegDefs) 3466 continue; 3467 int StageDef = stageScheduled(&SU); 3468 assert(StageDef != -1 && "Instruction should have been scheduled."); 3469 for (auto &SI : SU.Succs) 3470 if (SI.isAssignedRegDep()) 3471 if (ST.getRegisterInfo()->isPhysicalRegister(SI.getReg())) 3472 if (stageScheduled(SI.getSUnit()) != StageDef) 3473 return false; 3474 } 3475 return true; 3476 } 3477 3478 /// A property of the node order in swing-modulo-scheduling is 3479 /// that for nodes outside circuits the following holds: 3480 /// none of them is scheduled after both a successor and a 3481 /// predecessor. 3482 /// The method below checks whether the property is met. 3483 /// If not, debug information is printed and statistics information updated. 3484 /// Note that we do not use an assert statement. 3485 /// The reason is that although an invalid node oder may prevent 3486 /// the pipeliner from finding a pipelined schedule for arbitrary II, 3487 /// it does not lead to the generation of incorrect code. 3488 void SwingSchedulerDAG::checkValidNodeOrder(const NodeSetType &Circuits) const { 3489 3490 // a sorted vector that maps each SUnit to its index in the NodeOrder 3491 typedef std::pair<SUnit *, unsigned> UnitIndex; 3492 std::vector<UnitIndex> Indices(NodeOrder.size(), std::make_pair(nullptr, 0)); 3493 3494 for (unsigned i = 0, s = NodeOrder.size(); i < s; ++i) 3495 Indices.push_back(std::make_pair(NodeOrder[i], i)); 3496 3497 auto CompareKey = [](UnitIndex i1, UnitIndex i2) { 3498 return std::get<0>(i1) < std::get<0>(i2); 3499 }; 3500 3501 // sort, so that we can perform a binary search 3502 llvm::sort(Indices, CompareKey); 3503 3504 bool Valid = true; 3505 (void)Valid; 3506 // for each SUnit in the NodeOrder, check whether 3507 // it appears after both a successor and a predecessor 3508 // of the SUnit. If this is the case, and the SUnit 3509 // is not part of circuit, then the NodeOrder is not 3510 // valid. 3511 for (unsigned i = 0, s = NodeOrder.size(); i < s; ++i) { 3512 SUnit *SU = NodeOrder[i]; 3513 unsigned Index = i; 3514 3515 bool PredBefore = false; 3516 bool SuccBefore = false; 3517 3518 SUnit *Succ; 3519 SUnit *Pred; 3520 (void)Succ; 3521 (void)Pred; 3522 3523 for (SDep &PredEdge : SU->Preds) { 3524 SUnit *PredSU = PredEdge.getSUnit(); 3525 unsigned PredIndex = 3526 std::get<1>(*std::lower_bound(Indices.begin(), Indices.end(), 3527 std::make_pair(PredSU, 0), CompareKey)); 3528 if (!PredSU->getInstr()->isPHI() && PredIndex < Index) { 3529 PredBefore = true; 3530 Pred = PredSU; 3531 break; 3532 } 3533 } 3534 3535 for (SDep &SuccEdge : SU->Succs) { 3536 SUnit *SuccSU = SuccEdge.getSUnit(); 3537 unsigned SuccIndex = 3538 std::get<1>(*std::lower_bound(Indices.begin(), Indices.end(), 3539 std::make_pair(SuccSU, 0), CompareKey)); 3540 if (!SuccSU->getInstr()->isPHI() && SuccIndex < Index) { 3541 SuccBefore = true; 3542 Succ = SuccSU; 3543 break; 3544 } 3545 } 3546 3547 if (PredBefore && SuccBefore && !SU->getInstr()->isPHI()) { 3548 // instructions in circuits are allowed to be scheduled 3549 // after both a successor and predecessor. 3550 bool InCircuit = std::any_of( 3551 Circuits.begin(), Circuits.end(), 3552 [SU](const NodeSet &Circuit) { return Circuit.count(SU); }); 3553 if (InCircuit) 3554 LLVM_DEBUG(dbgs() << "In a circuit, predecessor ";); 3555 else { 3556 Valid = false; 3557 NumNodeOrderIssues++; 3558 LLVM_DEBUG(dbgs() << "Predecessor ";); 3559 } 3560 LLVM_DEBUG(dbgs() << Pred->NodeNum << " and successor " << Succ->NodeNum 3561 << " are scheduled before node " << SU->NodeNum 3562 << "\n";); 3563 } 3564 } 3565 3566 LLVM_DEBUG({ 3567 if (!Valid) 3568 dbgs() << "Invalid node order found!\n"; 3569 }); 3570 } 3571 3572 /// Attempt to fix the degenerate cases when the instruction serialization 3573 /// causes the register lifetimes to overlap. For example, 3574 /// p' = store_pi(p, b) 3575 /// = load p, offset 3576 /// In this case p and p' overlap, which means that two registers are needed. 3577 /// Instead, this function changes the load to use p' and updates the offset. 3578 void SwingSchedulerDAG::fixupRegisterOverlaps(std::deque<SUnit *> &Instrs) { 3579 unsigned OverlapReg = 0; 3580 unsigned NewBaseReg = 0; 3581 for (SUnit *SU : Instrs) { 3582 MachineInstr *MI = SU->getInstr(); 3583 for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { 3584 const MachineOperand &MO = MI->getOperand(i); 3585 // Look for an instruction that uses p. The instruction occurs in the 3586 // same cycle but occurs later in the serialized order. 3587 if (MO.isReg() && MO.isUse() && MO.getReg() == OverlapReg) { 3588 // Check that the instruction appears in the InstrChanges structure, 3589 // which contains instructions that can have the offset updated. 3590 DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It = 3591 InstrChanges.find(SU); 3592 if (It != InstrChanges.end()) { 3593 unsigned BasePos, OffsetPos; 3594 // Update the base register and adjust the offset. 3595 if (TII->getBaseAndOffsetPosition(*MI, BasePos, OffsetPos)) { 3596 MachineInstr *NewMI = MF.CloneMachineInstr(MI); 3597 NewMI->getOperand(BasePos).setReg(NewBaseReg); 3598 int64_t NewOffset = 3599 MI->getOperand(OffsetPos).getImm() - It->second.second; 3600 NewMI->getOperand(OffsetPos).setImm(NewOffset); 3601 SU->setInstr(NewMI); 3602 MISUnitMap[NewMI] = SU; 3603 NewMIs.insert(NewMI); 3604 } 3605 } 3606 OverlapReg = 0; 3607 NewBaseReg = 0; 3608 break; 3609 } 3610 // Look for an instruction of the form p' = op(p), which uses and defines 3611 // two virtual registers that get allocated to the same physical register. 3612 unsigned TiedUseIdx = 0; 3613 if (MI->isRegTiedToUseOperand(i, &TiedUseIdx)) { 3614 // OverlapReg is p in the example above. 3615 OverlapReg = MI->getOperand(TiedUseIdx).getReg(); 3616 // NewBaseReg is p' in the example above. 3617 NewBaseReg = MI->getOperand(i).getReg(); 3618 break; 3619 } 3620 } 3621 } 3622 } 3623 3624 /// After the schedule has been formed, call this function to combine 3625 /// the instructions from the different stages/cycles. That is, this 3626 /// function creates a schedule that represents a single iteration. 3627 void SMSchedule::finalizeSchedule(SwingSchedulerDAG *SSD) { 3628 // Move all instructions to the first stage from later stages. 3629 for (int cycle = getFirstCycle(); cycle <= getFinalCycle(); ++cycle) { 3630 for (int stage = 1, lastStage = getMaxStageCount(); stage <= lastStage; 3631 ++stage) { 3632 std::deque<SUnit *> &cycleInstrs = 3633 ScheduledInstrs[cycle + (stage * InitiationInterval)]; 3634 for (std::deque<SUnit *>::reverse_iterator I = cycleInstrs.rbegin(), 3635 E = cycleInstrs.rend(); 3636 I != E; ++I) 3637 ScheduledInstrs[cycle].push_front(*I); 3638 } 3639 } 3640 // Iterate over the definitions in each instruction, and compute the 3641 // stage difference for each use. Keep the maximum value. 3642 for (auto &I : InstrToCycle) { 3643 int DefStage = stageScheduled(I.first); 3644 MachineInstr *MI = I.first->getInstr(); 3645 for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { 3646 MachineOperand &Op = MI->getOperand(i); 3647 if (!Op.isReg() || !Op.isDef()) 3648 continue; 3649 3650 unsigned Reg = Op.getReg(); 3651 unsigned MaxDiff = 0; 3652 bool PhiIsSwapped = false; 3653 for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(Reg), 3654 EI = MRI.use_end(); 3655 UI != EI; ++UI) { 3656 MachineOperand &UseOp = *UI; 3657 MachineInstr *UseMI = UseOp.getParent(); 3658 SUnit *SUnitUse = SSD->getSUnit(UseMI); 3659 int UseStage = stageScheduled(SUnitUse); 3660 unsigned Diff = 0; 3661 if (UseStage != -1 && UseStage >= DefStage) 3662 Diff = UseStage - DefStage; 3663 if (MI->isPHI()) { 3664 if (isLoopCarried(SSD, *MI)) 3665 ++Diff; 3666 else 3667 PhiIsSwapped = true; 3668 } 3669 MaxDiff = std::max(Diff, MaxDiff); 3670 } 3671 RegToStageDiff[Reg] = std::make_pair(MaxDiff, PhiIsSwapped); 3672 } 3673 } 3674 3675 // Erase all the elements in the later stages. Only one iteration should 3676 // remain in the scheduled list, and it contains all the instructions. 3677 for (int cycle = getFinalCycle() + 1; cycle <= LastCycle; ++cycle) 3678 ScheduledInstrs.erase(cycle); 3679 3680 // Change the registers in instruction as specified in the InstrChanges 3681 // map. We need to use the new registers to create the correct order. 3682 for (int i = 0, e = SSD->SUnits.size(); i != e; ++i) { 3683 SUnit *SU = &SSD->SUnits[i]; 3684 SSD->applyInstrChange(SU->getInstr(), *this); 3685 } 3686 3687 // Reorder the instructions in each cycle to fix and improve the 3688 // generated code. 3689 for (int Cycle = getFirstCycle(), E = getFinalCycle(); Cycle <= E; ++Cycle) { 3690 std::deque<SUnit *> &cycleInstrs = ScheduledInstrs[Cycle]; 3691 std::deque<SUnit *> newOrderPhi; 3692 for (unsigned i = 0, e = cycleInstrs.size(); i < e; ++i) { 3693 SUnit *SU = cycleInstrs[i]; 3694 if (SU->getInstr()->isPHI()) 3695 newOrderPhi.push_back(SU); 3696 } 3697 std::deque<SUnit *> newOrderI; 3698 for (unsigned i = 0, e = cycleInstrs.size(); i < e; ++i) { 3699 SUnit *SU = cycleInstrs[i]; 3700 if (!SU->getInstr()->isPHI()) 3701 orderDependence(SSD, SU, newOrderI); 3702 } 3703 // Replace the old order with the new order. 3704 cycleInstrs.swap(newOrderPhi); 3705 cycleInstrs.insert(cycleInstrs.end(), newOrderI.begin(), newOrderI.end()); 3706 SSD->fixupRegisterOverlaps(cycleInstrs); 3707 } 3708 3709 LLVM_DEBUG(dump();); 3710 } 3711 3712 void NodeSet::print(raw_ostream &os) const { 3713 os << "Num nodes " << size() << " rec " << RecMII << " mov " << MaxMOV 3714 << " depth " << MaxDepth << " col " << Colocate << "\n"; 3715 for (const auto &I : Nodes) 3716 os << " SU(" << I->NodeNum << ") " << *(I->getInstr()); 3717 os << "\n"; 3718 } 3719 3720 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 3721 /// Print the schedule information to the given output. 3722 void SMSchedule::print(raw_ostream &os) const { 3723 // Iterate over each cycle. 3724 for (int cycle = getFirstCycle(); cycle <= getFinalCycle(); ++cycle) { 3725 // Iterate over each instruction in the cycle. 3726 const_sched_iterator cycleInstrs = ScheduledInstrs.find(cycle); 3727 for (SUnit *CI : cycleInstrs->second) { 3728 os << "cycle " << cycle << " (" << stageScheduled(CI) << ") "; 3729 os << "(" << CI->NodeNum << ") "; 3730 CI->getInstr()->print(os); 3731 os << "\n"; 3732 } 3733 } 3734 } 3735 3736 /// Utility function used for debugging to print the schedule. 3737 LLVM_DUMP_METHOD void SMSchedule::dump() const { print(dbgs()); } 3738 LLVM_DUMP_METHOD void NodeSet::dump() const { print(dbgs()); } 3739 3740 #endif 3741 3742 3743 3744