1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file This implements the ScheduleDAGInstrs class, which implements 10 /// re-scheduling of MachineInstrs. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 15 #include "llvm/ADT/IntEqClasses.h" 16 #include "llvm/ADT/MapVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/SparseSet.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/CodeGen/LiveIntervals.h" 24 #include "llvm/CodeGen/LivePhysRegs.h" 25 #include "llvm/CodeGen/MachineBasicBlock.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBundle.h" 30 #include "llvm/CodeGen/MachineMemOperand.h" 31 #include "llvm/CodeGen/MachineOperand.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/PseudoSourceValue.h" 34 #include "llvm/CodeGen/RegisterPressure.h" 35 #include "llvm/CodeGen/ScheduleDAG.h" 36 #include "llvm/CodeGen/ScheduleDFS.h" 37 #include "llvm/CodeGen/SlotIndexes.h" 38 #include "llvm/CodeGen/TargetRegisterInfo.h" 39 #include "llvm/CodeGen/TargetSubtargetInfo.h" 40 #include "llvm/Config/llvm-config.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/Operator.h" 46 #include "llvm/IR/Type.h" 47 #include "llvm/IR/Value.h" 48 #include "llvm/MC/LaneBitmask.h" 49 #include "llvm/MC/MCRegisterInfo.h" 50 #include "llvm/Support/Casting.h" 51 #include "llvm/Support/CommandLine.h" 52 #include "llvm/Support/Compiler.h" 53 #include "llvm/Support/Debug.h" 54 #include "llvm/Support/ErrorHandling.h" 55 #include "llvm/Support/Format.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <iterator> 60 #include <string> 61 #include <utility> 62 #include <vector> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "machine-scheduler" 67 68 static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden, 69 cl::ZeroOrMore, cl::init(false), 70 cl::desc("Enable use of AA during MI DAG construction")); 71 72 static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, 73 cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction")); 74 75 // Note: the two options below might be used in tuning compile time vs 76 // output quality. Setting HugeRegion so large that it will never be 77 // reached means best-effort, but may be slow. 78 79 // When Stores and Loads maps (or NonAliasStores and NonAliasLoads) 80 // together hold this many SUs, a reduction of maps will be done. 81 static cl::opt<unsigned> HugeRegion("dag-maps-huge-region", cl::Hidden, 82 cl::init(1000), cl::desc("The limit to use while constructing the DAG " 83 "prior to scheduling, at which point a trade-off " 84 "is made to avoid excessive compile time.")); 85 86 static cl::opt<unsigned> ReductionSize( 87 "dag-maps-reduction-size", cl::Hidden, 88 cl::desc("A huge scheduling region will have maps reduced by this many " 89 "nodes at a time. Defaults to HugeRegion / 2.")); 90 91 static unsigned getReductionSize() { 92 // Always reduce a huge region with half of the elements, except 93 // when user sets this number explicitly. 94 if (ReductionSize.getNumOccurrences() == 0) 95 return HugeRegion / 2; 96 return ReductionSize; 97 } 98 99 static void dumpSUList(ScheduleDAGInstrs::SUList &L) { 100 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 101 dbgs() << "{ "; 102 for (const SUnit *su : L) { 103 dbgs() << "SU(" << su->NodeNum << ")"; 104 if (su != L.back()) 105 dbgs() << ", "; 106 } 107 dbgs() << "}\n"; 108 #endif 109 } 110 111 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 112 const MachineLoopInfo *mli, 113 bool RemoveKillFlags) 114 : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), 115 RemoveKillFlags(RemoveKillFlags), 116 UnknownValue(UndefValue::get( 117 Type::getVoidTy(mf.getFunction().getContext()))), Topo(SUnits, &ExitSU) { 118 DbgValues.clear(); 119 120 const TargetSubtargetInfo &ST = mf.getSubtarget(); 121 SchedModel.init(&ST); 122 } 123 124 /// If this machine instr has memory reference information and it can be 125 /// tracked to a normal reference to a known object, return the Value 126 /// for that object. This function returns false the memory location is 127 /// unknown or may alias anything. 128 static bool getUnderlyingObjectsForInstr(const MachineInstr *MI, 129 const MachineFrameInfo &MFI, 130 UnderlyingObjectsVector &Objects, 131 const DataLayout &DL) { 132 auto allMMOsOkay = [&]() { 133 for (const MachineMemOperand *MMO : MI->memoperands()) { 134 // TODO: Figure out whether isAtomic is really necessary (see D57601). 135 if (MMO->isVolatile() || MMO->isAtomic()) 136 return false; 137 138 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) { 139 // Function that contain tail calls don't have unique PseudoSourceValue 140 // objects. Two PseudoSourceValues might refer to the same or 141 // overlapping locations. The client code calling this function assumes 142 // this is not the case. So return a conservative answer of no known 143 // object. 144 if (MFI.hasTailCall()) 145 return false; 146 147 // For now, ignore PseudoSourceValues which may alias LLVM IR values 148 // because the code that uses this function has no way to cope with 149 // such aliases. 150 if (PSV->isAliased(&MFI)) 151 return false; 152 153 bool MayAlias = PSV->mayAlias(&MFI); 154 Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias)); 155 } else if (const Value *V = MMO->getValue()) { 156 SmallVector<Value *, 4> Objs; 157 if (!getUnderlyingObjectsForCodeGen(V, Objs, DL)) 158 return false; 159 160 for (Value *V : Objs) { 161 assert(isIdentifiedObject(V)); 162 Objects.push_back(UnderlyingObjectsVector::value_type(V, true)); 163 } 164 } else 165 return false; 166 } 167 return true; 168 }; 169 170 if (!allMMOsOkay()) { 171 Objects.clear(); 172 return false; 173 } 174 175 return true; 176 } 177 178 void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) { 179 BB = bb; 180 } 181 182 void ScheduleDAGInstrs::finishBlock() { 183 // Subclasses should no longer refer to the old block. 184 BB = nullptr; 185 } 186 187 void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, 188 MachineBasicBlock::iterator begin, 189 MachineBasicBlock::iterator end, 190 unsigned regioninstrs) { 191 assert(bb == BB && "startBlock should set BB"); 192 RegionBegin = begin; 193 RegionEnd = end; 194 NumRegionInstrs = regioninstrs; 195 } 196 197 void ScheduleDAGInstrs::exitRegion() { 198 // Nothing to do. 199 } 200 201 void ScheduleDAGInstrs::addSchedBarrierDeps() { 202 MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : nullptr; 203 ExitSU.setInstr(ExitMI); 204 // Add dependencies on the defs and uses of the instruction. 205 if (ExitMI) { 206 for (const MachineOperand &MO : ExitMI->operands()) { 207 if (!MO.isReg() || MO.isDef()) continue; 208 unsigned Reg = MO.getReg(); 209 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 210 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg)); 211 } else if (TargetRegisterInfo::isVirtualRegister(Reg) && MO.readsReg()) { 212 addVRegUseDeps(&ExitSU, ExitMI->getOperandNo(&MO)); 213 } 214 } 215 } 216 if (!ExitMI || (!ExitMI->isCall() && !ExitMI->isBarrier())) { 217 // For others, e.g. fallthrough, conditional branch, assume the exit 218 // uses all the registers that are livein to the successor blocks. 219 for (const MachineBasicBlock *Succ : BB->successors()) { 220 for (const auto &LI : Succ->liveins()) { 221 if (!Uses.contains(LI.PhysReg)) 222 Uses.insert(PhysRegSUOper(&ExitSU, -1, LI.PhysReg)); 223 } 224 } 225 } 226 } 227 228 /// MO is an operand of SU's instruction that defines a physical register. Adds 229 /// data dependencies from SU to any uses of the physical register. 230 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { 231 const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx); 232 assert(MO.isDef() && "expect physreg def"); 233 234 // Ask the target if address-backscheduling is desirable, and if so how much. 235 const TargetSubtargetInfo &ST = MF.getSubtarget(); 236 237 // Only use any non-zero latency for real defs/uses, in contrast to 238 // "fake" operands added by regalloc. 239 const MCInstrDesc *DefMIDesc = &SU->getInstr()->getDesc(); 240 bool ImplicitPseudoDef = (OperIdx >= DefMIDesc->getNumOperands() && 241 !DefMIDesc->hasImplicitDefOfPhysReg(MO.getReg())); 242 for (MCRegAliasIterator Alias(MO.getReg(), TRI, true); 243 Alias.isValid(); ++Alias) { 244 if (!Uses.contains(*Alias)) 245 continue; 246 for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) { 247 SUnit *UseSU = I->SU; 248 if (UseSU == SU) 249 continue; 250 251 // Adjust the dependence latency using operand def/use information, 252 // then allow the target to perform its own adjustments. 253 int UseOp = I->OpIdx; 254 MachineInstr *RegUse = nullptr; 255 SDep Dep; 256 if (UseOp < 0) 257 Dep = SDep(SU, SDep::Artificial); 258 else { 259 // Set the hasPhysRegDefs only for physreg defs that have a use within 260 // the scheduling region. 261 SU->hasPhysRegDefs = true; 262 Dep = SDep(SU, SDep::Data, *Alias); 263 RegUse = UseSU->getInstr(); 264 } 265 const MCInstrDesc *UseMIDesc = 266 (RegUse ? &UseSU->getInstr()->getDesc() : nullptr); 267 bool ImplicitPseudoUse = 268 (UseMIDesc && UseOp >= ((int)UseMIDesc->getNumOperands()) && 269 !UseMIDesc->hasImplicitUseOfPhysReg(*Alias)); 270 if (!ImplicitPseudoDef && !ImplicitPseudoUse) { 271 Dep.setLatency(SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, 272 RegUse, UseOp)); 273 ST.adjustSchedDependency(SU, UseSU, Dep); 274 } else 275 Dep.setLatency(0); 276 277 UseSU->addPred(Dep); 278 } 279 } 280 } 281 282 /// Adds register dependencies (data, anti, and output) from this SUnit 283 /// to following instructions in the same scheduling region that depend the 284 /// physical register referenced at OperIdx. 285 void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { 286 MachineInstr *MI = SU->getInstr(); 287 MachineOperand &MO = MI->getOperand(OperIdx); 288 unsigned Reg = MO.getReg(); 289 // We do not need to track any dependencies for constant registers. 290 if (MRI.isConstantPhysReg(Reg)) 291 return; 292 293 // Optionally add output and anti dependencies. For anti 294 // dependencies we use a latency of 0 because for a multi-issue 295 // target we want to allow the defining instruction to issue 296 // in the same cycle as the using instruction. 297 // TODO: Using a latency of 1 here for output dependencies assumes 298 // there's no cost for reusing registers. 299 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 300 for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) { 301 if (!Defs.contains(*Alias)) 302 continue; 303 for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) { 304 SUnit *DefSU = I->SU; 305 if (DefSU == &ExitSU) 306 continue; 307 if (DefSU != SU && 308 (Kind != SDep::Output || !MO.isDead() || 309 !DefSU->getInstr()->registerDefIsDead(*Alias))) { 310 if (Kind == SDep::Anti) 311 DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias)); 312 else { 313 SDep Dep(SU, Kind, /*Reg=*/*Alias); 314 Dep.setLatency( 315 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 316 DefSU->addPred(Dep); 317 } 318 } 319 } 320 } 321 322 if (!MO.isDef()) { 323 SU->hasPhysRegUses = true; 324 // Either insert a new Reg2SUnits entry with an empty SUnits list, or 325 // retrieve the existing SUnits list for this register's uses. 326 // Push this SUnit on the use list. 327 Uses.insert(PhysRegSUOper(SU, OperIdx, Reg)); 328 if (RemoveKillFlags) 329 MO.setIsKill(false); 330 } else { 331 addPhysRegDataDeps(SU, OperIdx); 332 333 // Clear previous uses and defs of this register and its subergisters. 334 for (MCSubRegIterator SubReg(Reg, TRI, true); SubReg.isValid(); ++SubReg) { 335 if (Uses.contains(*SubReg)) 336 Uses.eraseAll(*SubReg); 337 if (!MO.isDead()) 338 Defs.eraseAll(*SubReg); 339 } 340 if (MO.isDead() && SU->isCall) { 341 // Calls will not be reordered because of chain dependencies (see 342 // below). Since call operands are dead, calls may continue to be added 343 // to the DefList making dependence checking quadratic in the size of 344 // the block. Instead, we leave only one call at the back of the 345 // DefList. 346 Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg); 347 Reg2SUnitsMap::iterator B = P.first; 348 Reg2SUnitsMap::iterator I = P.second; 349 for (bool isBegin = I == B; !isBegin; /* empty */) { 350 isBegin = (--I) == B; 351 if (!I->SU->isCall) 352 break; 353 I = Defs.erase(I); 354 } 355 } 356 357 // Defs are pushed in the order they are visited and never reordered. 358 Defs.insert(PhysRegSUOper(SU, OperIdx, Reg)); 359 } 360 } 361 362 LaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const 363 { 364 unsigned Reg = MO.getReg(); 365 // No point in tracking lanemasks if we don't have interesting subregisters. 366 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 367 if (!RC.HasDisjunctSubRegs) 368 return LaneBitmask::getAll(); 369 370 unsigned SubReg = MO.getSubReg(); 371 if (SubReg == 0) 372 return RC.getLaneMask(); 373 return TRI->getSubRegIndexLaneMask(SubReg); 374 } 375 376 /// Adds register output and data dependencies from this SUnit to instructions 377 /// that occur later in the same scheduling region if they read from or write to 378 /// the virtual register defined at OperIdx. 379 /// 380 /// TODO: Hoist loop induction variable increments. This has to be 381 /// reevaluated. Generally, IV scheduling should be done before coalescing. 382 void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { 383 MachineInstr *MI = SU->getInstr(); 384 MachineOperand &MO = MI->getOperand(OperIdx); 385 unsigned Reg = MO.getReg(); 386 387 LaneBitmask DefLaneMask; 388 LaneBitmask KillLaneMask; 389 if (TrackLaneMasks) { 390 bool IsKill = MO.getSubReg() == 0 || MO.isUndef(); 391 DefLaneMask = getLaneMaskForMO(MO); 392 // If we have a <read-undef> flag, none of the lane values comes from an 393 // earlier instruction. 394 KillLaneMask = IsKill ? LaneBitmask::getAll() : DefLaneMask; 395 396 // Clear undef flag, we'll re-add it later once we know which subregister 397 // Def is first. 398 MO.setIsUndef(false); 399 } else { 400 DefLaneMask = LaneBitmask::getAll(); 401 KillLaneMask = LaneBitmask::getAll(); 402 } 403 404 if (MO.isDead()) { 405 assert(CurrentVRegUses.find(Reg) == CurrentVRegUses.end() && 406 "Dead defs should have no uses"); 407 } else { 408 // Add data dependence to all uses we found so far. 409 const TargetSubtargetInfo &ST = MF.getSubtarget(); 410 for (VReg2SUnitOperIdxMultiMap::iterator I = CurrentVRegUses.find(Reg), 411 E = CurrentVRegUses.end(); I != E; /*empty*/) { 412 LaneBitmask LaneMask = I->LaneMask; 413 // Ignore uses of other lanes. 414 if ((LaneMask & KillLaneMask).none()) { 415 ++I; 416 continue; 417 } 418 419 if ((LaneMask & DefLaneMask).any()) { 420 SUnit *UseSU = I->SU; 421 MachineInstr *Use = UseSU->getInstr(); 422 SDep Dep(SU, SDep::Data, Reg); 423 Dep.setLatency(SchedModel.computeOperandLatency(MI, OperIdx, Use, 424 I->OperandIndex)); 425 ST.adjustSchedDependency(SU, UseSU, Dep); 426 UseSU->addPred(Dep); 427 } 428 429 LaneMask &= ~KillLaneMask; 430 // If we found a Def for all lanes of this use, remove it from the list. 431 if (LaneMask.any()) { 432 I->LaneMask = LaneMask; 433 ++I; 434 } else 435 I = CurrentVRegUses.erase(I); 436 } 437 } 438 439 // Shortcut: Singly defined vregs do not have output/anti dependencies. 440 if (MRI.hasOneDef(Reg)) 441 return; 442 443 // Add output dependence to the next nearest defs of this vreg. 444 // 445 // Unless this definition is dead, the output dependence should be 446 // transitively redundant with antidependencies from this definition's 447 // uses. We're conservative for now until we have a way to guarantee the uses 448 // are not eliminated sometime during scheduling. The output dependence edge 449 // is also useful if output latency exceeds def-use latency. 450 LaneBitmask LaneMask = DefLaneMask; 451 for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 452 CurrentVRegDefs.end())) { 453 // Ignore defs for other lanes. 454 if ((V2SU.LaneMask & LaneMask).none()) 455 continue; 456 // Add an output dependence. 457 SUnit *DefSU = V2SU.SU; 458 // Ignore additional defs of the same lanes in one instruction. This can 459 // happen because lanemasks are shared for targets with too many 460 // subregisters. We also use some representration tricks/hacks where we 461 // add super-register defs/uses, to imply that although we only access parts 462 // of the reg we care about the full one. 463 if (DefSU == SU) 464 continue; 465 SDep Dep(SU, SDep::Output, Reg); 466 Dep.setLatency( 467 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 468 DefSU->addPred(Dep); 469 470 // Update current definition. This can get tricky if the def was about a 471 // bigger lanemask before. We then have to shrink it and create a new 472 // VReg2SUnit for the non-overlapping part. 473 LaneBitmask OverlapMask = V2SU.LaneMask & LaneMask; 474 LaneBitmask NonOverlapMask = V2SU.LaneMask & ~LaneMask; 475 V2SU.SU = SU; 476 V2SU.LaneMask = OverlapMask; 477 if (NonOverlapMask.any()) 478 CurrentVRegDefs.insert(VReg2SUnit(Reg, NonOverlapMask, DefSU)); 479 } 480 // If there was no CurrentVRegDefs entry for some lanes yet, create one. 481 if (LaneMask.any()) 482 CurrentVRegDefs.insert(VReg2SUnit(Reg, LaneMask, SU)); 483 } 484 485 /// Adds a register data dependency if the instruction that defines the 486 /// virtual register used at OperIdx is mapped to an SUnit. Add a register 487 /// antidependency from this SUnit to instructions that occur later in the same 488 /// scheduling region if they write the virtual register. 489 /// 490 /// TODO: Handle ExitSU "uses" properly. 491 void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { 492 const MachineInstr *MI = SU->getInstr(); 493 const MachineOperand &MO = MI->getOperand(OperIdx); 494 unsigned Reg = MO.getReg(); 495 496 // Remember the use. Data dependencies will be added when we find the def. 497 LaneBitmask LaneMask = TrackLaneMasks ? getLaneMaskForMO(MO) 498 : LaneBitmask::getAll(); 499 CurrentVRegUses.insert(VReg2SUnitOperIdx(Reg, LaneMask, OperIdx, SU)); 500 501 // Add antidependences to the following defs of the vreg. 502 for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 503 CurrentVRegDefs.end())) { 504 // Ignore defs for unrelated lanes. 505 LaneBitmask PrevDefLaneMask = V2SU.LaneMask; 506 if ((PrevDefLaneMask & LaneMask).none()) 507 continue; 508 if (V2SU.SU == SU) 509 continue; 510 511 V2SU.SU->addPred(SDep(SU, SDep::Anti, Reg)); 512 } 513 } 514 515 /// Returns true if MI is an instruction we are unable to reason about 516 /// (like a call or something with unmodeled side effects). 517 static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) { 518 return MI->isCall() || MI->hasUnmodeledSideEffects() || 519 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad(AA)); 520 } 521 522 void ScheduleDAGInstrs::addChainDependency (SUnit *SUa, SUnit *SUb, 523 unsigned Latency) { 524 if (SUa->getInstr()->mayAlias(AAForDep, *SUb->getInstr(), UseTBAA)) { 525 SDep Dep(SUa, SDep::MayAliasMem); 526 Dep.setLatency(Latency); 527 SUb->addPred(Dep); 528 } 529 } 530 531 /// Creates an SUnit for each real instruction, numbered in top-down 532 /// topological order. The instruction order A < B, implies that no edge exists 533 /// from B to A. 534 /// 535 /// Map each real instruction to its SUnit. 536 /// 537 /// After initSUnits, the SUnits vector cannot be resized and the scheduler may 538 /// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs 539 /// instead of pointers. 540 /// 541 /// MachineScheduler relies on initSUnits numbering the nodes by their order in 542 /// the original instruction list. 543 void ScheduleDAGInstrs::initSUnits() { 544 // We'll be allocating one SUnit for each real instruction in the region, 545 // which is contained within a basic block. 546 SUnits.reserve(NumRegionInstrs); 547 548 for (MachineInstr &MI : make_range(RegionBegin, RegionEnd)) { 549 if (MI.isDebugInstr()) 550 continue; 551 552 SUnit *SU = newSUnit(&MI); 553 MISUnitMap[&MI] = SU; 554 555 SU->isCall = MI.isCall(); 556 SU->isCommutable = MI.isCommutable(); 557 558 // Assign the Latency field of SU using target-provided information. 559 SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); 560 561 // If this SUnit uses a reserved or unbuffered resource, mark it as such. 562 // 563 // Reserved resources block an instruction from issuing and stall the 564 // entire pipeline. These are identified by BufferSize=0. 565 // 566 // Unbuffered resources prevent execution of subsequent instructions that 567 // require the same resources. This is used for in-order execution pipelines 568 // within an out-of-order core. These are identified by BufferSize=1. 569 if (SchedModel.hasInstrSchedModel()) { 570 const MCSchedClassDesc *SC = getSchedClass(SU); 571 for (const MCWriteProcResEntry &PRE : 572 make_range(SchedModel.getWriteProcResBegin(SC), 573 SchedModel.getWriteProcResEnd(SC))) { 574 switch (SchedModel.getProcResource(PRE.ProcResourceIdx)->BufferSize) { 575 case 0: 576 SU->hasReservedResource = true; 577 break; 578 case 1: 579 SU->isUnbuffered = true; 580 break; 581 default: 582 break; 583 } 584 } 585 } 586 } 587 } 588 589 class ScheduleDAGInstrs::Value2SUsMap : public MapVector<ValueType, SUList> { 590 /// Current total number of SUs in map. 591 unsigned NumNodes = 0; 592 593 /// 1 for loads, 0 for stores. (see comment in SUList) 594 unsigned TrueMemOrderLatency; 595 596 public: 597 Value2SUsMap(unsigned lat = 0) : TrueMemOrderLatency(lat) {} 598 599 /// To keep NumNodes up to date, insert() is used instead of 600 /// this operator w/ push_back(). 601 ValueType &operator[](const SUList &Key) { 602 llvm_unreachable("Don't use. Use insert() instead."); }; 603 604 /// Adds SU to the SUList of V. If Map grows huge, reduce its size by calling 605 /// reduce(). 606 void inline insert(SUnit *SU, ValueType V) { 607 MapVector::operator[](V).push_back(SU); 608 NumNodes++; 609 } 610 611 /// Clears the list of SUs mapped to V. 612 void inline clearList(ValueType V) { 613 iterator Itr = find(V); 614 if (Itr != end()) { 615 assert(NumNodes >= Itr->second.size()); 616 NumNodes -= Itr->second.size(); 617 618 Itr->second.clear(); 619 } 620 } 621 622 /// Clears map from all contents. 623 void clear() { 624 MapVector<ValueType, SUList>::clear(); 625 NumNodes = 0; 626 } 627 628 unsigned inline size() const { return NumNodes; } 629 630 /// Counts the number of SUs in this map after a reduction. 631 void reComputeSize() { 632 NumNodes = 0; 633 for (auto &I : *this) 634 NumNodes += I.second.size(); 635 } 636 637 unsigned inline getTrueMemOrderLatency() const { 638 return TrueMemOrderLatency; 639 } 640 641 void dump(); 642 }; 643 644 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 645 Value2SUsMap &Val2SUsMap) { 646 for (auto &I : Val2SUsMap) 647 addChainDependencies(SU, I.second, 648 Val2SUsMap.getTrueMemOrderLatency()); 649 } 650 651 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 652 Value2SUsMap &Val2SUsMap, 653 ValueType V) { 654 Value2SUsMap::iterator Itr = Val2SUsMap.find(V); 655 if (Itr != Val2SUsMap.end()) 656 addChainDependencies(SU, Itr->second, 657 Val2SUsMap.getTrueMemOrderLatency()); 658 } 659 660 void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap &map) { 661 assert(BarrierChain != nullptr); 662 663 for (auto &I : map) { 664 SUList &sus = I.second; 665 for (auto *SU : sus) 666 SU->addPredBarrier(BarrierChain); 667 } 668 map.clear(); 669 } 670 671 void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap &map) { 672 assert(BarrierChain != nullptr); 673 674 // Go through all lists of SUs. 675 for (Value2SUsMap::iterator I = map.begin(), EE = map.end(); I != EE;) { 676 Value2SUsMap::iterator CurrItr = I++; 677 SUList &sus = CurrItr->second; 678 SUList::iterator SUItr = sus.begin(), SUEE = sus.end(); 679 for (; SUItr != SUEE; ++SUItr) { 680 // Stop on BarrierChain or any instruction above it. 681 if ((*SUItr)->NodeNum <= BarrierChain->NodeNum) 682 break; 683 684 (*SUItr)->addPredBarrier(BarrierChain); 685 } 686 687 // Remove also the BarrierChain from list if present. 688 if (SUItr != SUEE && *SUItr == BarrierChain) 689 SUItr++; 690 691 // Remove all SUs that are now successors of BarrierChain. 692 if (SUItr != sus.begin()) 693 sus.erase(sus.begin(), SUItr); 694 } 695 696 // Remove all entries with empty su lists. 697 map.remove_if([&](std::pair<ValueType, SUList> &mapEntry) { 698 return (mapEntry.second.empty()); }); 699 700 // Recompute the size of the map (NumNodes). 701 map.reComputeSize(); 702 } 703 704 void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, 705 RegPressureTracker *RPTracker, 706 PressureDiffs *PDiffs, 707 LiveIntervals *LIS, 708 bool TrackLaneMasks) { 709 const TargetSubtargetInfo &ST = MF.getSubtarget(); 710 bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI 711 : ST.useAA(); 712 AAForDep = UseAA ? AA : nullptr; 713 714 BarrierChain = nullptr; 715 SUnit *FPBarrierChain = nullptr; 716 717 this->TrackLaneMasks = TrackLaneMasks; 718 MISUnitMap.clear(); 719 ScheduleDAG::clearDAG(); 720 721 // Create an SUnit for each real instruction. 722 initSUnits(); 723 724 if (PDiffs) 725 PDiffs->init(SUnits.size()); 726 727 // We build scheduling units by walking a block's instruction list 728 // from bottom to top. 729 730 // Each MIs' memory operand(s) is analyzed to a list of underlying 731 // objects. The SU is then inserted in the SUList(s) mapped from the 732 // Value(s). Each Value thus gets mapped to lists of SUs depending 733 // on it, stores and loads kept separately. Two SUs are trivially 734 // non-aliasing if they both depend on only identified Values and do 735 // not share any common Value. 736 Value2SUsMap Stores, Loads(1 /*TrueMemOrderLatency*/); 737 738 // Certain memory accesses are known to not alias any SU in Stores 739 // or Loads, and have therefore their own 'NonAlias' 740 // domain. E.g. spill / reload instructions never alias LLVM I/R 741 // Values. It would be nice to assume that this type of memory 742 // accesses always have a proper memory operand modelling, and are 743 // therefore never unanalyzable, but this is conservatively not 744 // done. 745 Value2SUsMap NonAliasStores, NonAliasLoads(1 /*TrueMemOrderLatency*/); 746 747 // Remove any stale debug info; sometimes BuildSchedGraph is called again 748 // without emitting the info from the previous call. 749 DbgValues.clear(); 750 FirstDbgValue = nullptr; 751 752 assert(Defs.empty() && Uses.empty() && 753 "Only BuildGraph should update Defs/Uses"); 754 Defs.setUniverse(TRI->getNumRegs()); 755 Uses.setUniverse(TRI->getNumRegs()); 756 757 assert(CurrentVRegDefs.empty() && "nobody else should use CurrentVRegDefs"); 758 assert(CurrentVRegUses.empty() && "nobody else should use CurrentVRegUses"); 759 unsigned NumVirtRegs = MRI.getNumVirtRegs(); 760 CurrentVRegDefs.setUniverse(NumVirtRegs); 761 CurrentVRegUses.setUniverse(NumVirtRegs); 762 763 // Model data dependencies between instructions being scheduled and the 764 // ExitSU. 765 addSchedBarrierDeps(); 766 767 // Walk the list of instructions, from bottom moving up. 768 MachineInstr *DbgMI = nullptr; 769 for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; 770 MII != MIE; --MII) { 771 MachineInstr &MI = *std::prev(MII); 772 if (DbgMI) { 773 DbgValues.push_back(std::make_pair(DbgMI, &MI)); 774 DbgMI = nullptr; 775 } 776 777 if (MI.isDebugValue()) { 778 DbgMI = &MI; 779 continue; 780 } 781 if (MI.isDebugLabel()) 782 continue; 783 784 SUnit *SU = MISUnitMap[&MI]; 785 assert(SU && "No SUnit mapped to this MI"); 786 787 if (RPTracker) { 788 RegisterOperands RegOpers; 789 RegOpers.collect(MI, *TRI, MRI, TrackLaneMasks, false); 790 if (TrackLaneMasks) { 791 SlotIndex SlotIdx = LIS->getInstructionIndex(MI); 792 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx); 793 } 794 if (PDiffs != nullptr) 795 PDiffs->addInstruction(SU->NodeNum, RegOpers, MRI); 796 797 if (RPTracker->getPos() == RegionEnd || &*RPTracker->getPos() != &MI) 798 RPTracker->recedeSkipDebugValues(); 799 assert(&*RPTracker->getPos() == &MI && "RPTracker in sync"); 800 RPTracker->recede(RegOpers); 801 } 802 803 assert( 804 (CanHandleTerminators || (!MI.isTerminator() && !MI.isPosition())) && 805 "Cannot schedule terminators or labels!"); 806 807 // Add register-based dependencies (data, anti, and output). 808 // For some instructions (calls, returns, inline-asm, etc.) there can 809 // be explicit uses and implicit defs, in which case the use will appear 810 // on the operand list before the def. Do two passes over the operand 811 // list to make sure that defs are processed before any uses. 812 bool HasVRegDef = false; 813 for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 814 const MachineOperand &MO = MI.getOperand(j); 815 if (!MO.isReg() || !MO.isDef()) 816 continue; 817 unsigned Reg = MO.getReg(); 818 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 819 addPhysRegDeps(SU, j); 820 } else if (TargetRegisterInfo::isVirtualRegister(Reg)) { 821 HasVRegDef = true; 822 addVRegDefDeps(SU, j); 823 } 824 } 825 // Now process all uses. 826 for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 827 const MachineOperand &MO = MI.getOperand(j); 828 // Only look at use operands. 829 // We do not need to check for MO.readsReg() here because subsequent 830 // subregister defs will get output dependence edges and need no 831 // additional use dependencies. 832 if (!MO.isReg() || !MO.isUse()) 833 continue; 834 unsigned Reg = MO.getReg(); 835 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 836 addPhysRegDeps(SU, j); 837 } else if (TargetRegisterInfo::isVirtualRegister(Reg) && MO.readsReg()) { 838 addVRegUseDeps(SU, j); 839 } 840 } 841 842 // If we haven't seen any uses in this scheduling region, create a 843 // dependence edge to ExitSU to model the live-out latency. This is required 844 // for vreg defs with no in-region use, and prefetches with no vreg def. 845 // 846 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This 847 // check currently relies on being called before adding chain deps. 848 if (SU->NumSuccs == 0 && SU->Latency > 1 && (HasVRegDef || MI.mayLoad())) { 849 SDep Dep(SU, SDep::Artificial); 850 Dep.setLatency(SU->Latency - 1); 851 ExitSU.addPred(Dep); 852 } 853 854 // Add memory dependencies (Note: isStoreToStackSlot and 855 // isLoadFromStackSLot are not usable after stack slots are lowered to 856 // actual addresses). 857 858 // This is a barrier event that acts as a pivotal node in the DAG. 859 if (isGlobalMemoryObject(AA, &MI)) { 860 861 // Become the barrier chain. 862 if (BarrierChain) 863 BarrierChain->addPredBarrier(SU); 864 BarrierChain = SU; 865 866 LLVM_DEBUG(dbgs() << "Global memory object and new barrier chain: SU(" 867 << BarrierChain->NodeNum << ").\n";); 868 869 // Add dependencies against everything below it and clear maps. 870 addBarrierChain(Stores); 871 addBarrierChain(Loads); 872 addBarrierChain(NonAliasStores); 873 addBarrierChain(NonAliasLoads); 874 875 // Add dependency against previous FP barrier and reset FP barrier. 876 if (FPBarrierChain) 877 FPBarrierChain->addPredBarrier(BarrierChain); 878 FPBarrierChain = BarrierChain; 879 880 continue; 881 } 882 883 // Instructions that may raise FP exceptions depend on each other. 884 if (MI.mayRaiseFPException()) { 885 if (FPBarrierChain) 886 FPBarrierChain->addPredBarrier(SU); 887 FPBarrierChain = SU; 888 } 889 890 // If it's not a store or a variant load, we're done. 891 if (!MI.mayStore() && 892 !(MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))) 893 continue; 894 895 // Always add dependecy edge to BarrierChain if present. 896 if (BarrierChain) 897 BarrierChain->addPredBarrier(SU); 898 899 // Find the underlying objects for MI. The Objs vector is either 900 // empty, or filled with the Values of memory locations which this 901 // SU depends on. 902 UnderlyingObjectsVector Objs; 903 bool ObjsFound = getUnderlyingObjectsForInstr(&MI, MFI, Objs, 904 MF.getDataLayout()); 905 906 if (MI.mayStore()) { 907 if (!ObjsFound) { 908 // An unknown store depends on all stores and loads. 909 addChainDependencies(SU, Stores); 910 addChainDependencies(SU, NonAliasStores); 911 addChainDependencies(SU, Loads); 912 addChainDependencies(SU, NonAliasLoads); 913 914 // Map this store to 'UnknownValue'. 915 Stores.insert(SU, UnknownValue); 916 } else { 917 // Add precise dependencies against all previously seen memory 918 // accesses mapped to the same Value(s). 919 for (const UnderlyingObject &UnderlObj : Objs) { 920 ValueType V = UnderlObj.getValue(); 921 bool ThisMayAlias = UnderlObj.mayAlias(); 922 923 // Add dependencies to previous stores and loads mapped to V. 924 addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 925 addChainDependencies(SU, (ThisMayAlias ? Loads : NonAliasLoads), V); 926 } 927 // Update the store map after all chains have been added to avoid adding 928 // self-loop edge if multiple underlying objects are present. 929 for (const UnderlyingObject &UnderlObj : Objs) { 930 ValueType V = UnderlObj.getValue(); 931 bool ThisMayAlias = UnderlObj.mayAlias(); 932 933 // Map this store to V. 934 (ThisMayAlias ? Stores : NonAliasStores).insert(SU, V); 935 } 936 // The store may have dependencies to unanalyzable loads and 937 // stores. 938 addChainDependencies(SU, Loads, UnknownValue); 939 addChainDependencies(SU, Stores, UnknownValue); 940 } 941 } else { // SU is a load. 942 if (!ObjsFound) { 943 // An unknown load depends on all stores. 944 addChainDependencies(SU, Stores); 945 addChainDependencies(SU, NonAliasStores); 946 947 Loads.insert(SU, UnknownValue); 948 } else { 949 for (const UnderlyingObject &UnderlObj : Objs) { 950 ValueType V = UnderlObj.getValue(); 951 bool ThisMayAlias = UnderlObj.mayAlias(); 952 953 // Add precise dependencies against all previously seen stores 954 // mapping to the same Value(s). 955 addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 956 957 // Map this load to V. 958 (ThisMayAlias ? Loads : NonAliasLoads).insert(SU, V); 959 } 960 // The load may have dependencies to unanalyzable stores. 961 addChainDependencies(SU, Stores, UnknownValue); 962 } 963 } 964 965 // Reduce maps if they grow huge. 966 if (Stores.size() + Loads.size() >= HugeRegion) { 967 LLVM_DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";); 968 reduceHugeMemNodeMaps(Stores, Loads, getReductionSize()); 969 } 970 if (NonAliasStores.size() + NonAliasLoads.size() >= HugeRegion) { 971 LLVM_DEBUG( 972 dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";); 973 reduceHugeMemNodeMaps(NonAliasStores, NonAliasLoads, getReductionSize()); 974 } 975 } 976 977 if (DbgMI) 978 FirstDbgValue = DbgMI; 979 980 Defs.clear(); 981 Uses.clear(); 982 CurrentVRegDefs.clear(); 983 CurrentVRegUses.clear(); 984 985 Topo.MarkDirty(); 986 } 987 988 raw_ostream &llvm::operator<<(raw_ostream &OS, const PseudoSourceValue* PSV) { 989 PSV->printCustom(OS); 990 return OS; 991 } 992 993 void ScheduleDAGInstrs::Value2SUsMap::dump() { 994 for (auto &Itr : *this) { 995 if (Itr.first.is<const Value*>()) { 996 const Value *V = Itr.first.get<const Value*>(); 997 if (isa<UndefValue>(V)) 998 dbgs() << "Unknown"; 999 else 1000 V->printAsOperand(dbgs()); 1001 } 1002 else if (Itr.first.is<const PseudoSourceValue*>()) 1003 dbgs() << Itr.first.get<const PseudoSourceValue*>(); 1004 else 1005 llvm_unreachable("Unknown Value type."); 1006 1007 dbgs() << " : "; 1008 dumpSUList(Itr.second); 1009 } 1010 } 1011 1012 void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores, 1013 Value2SUsMap &loads, unsigned N) { 1014 LLVM_DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n"; stores.dump(); 1015 dbgs() << "Loading SUnits:\n"; loads.dump()); 1016 1017 // Insert all SU's NodeNums into a vector and sort it. 1018 std::vector<unsigned> NodeNums; 1019 NodeNums.reserve(stores.size() + loads.size()); 1020 for (auto &I : stores) 1021 for (auto *SU : I.second) 1022 NodeNums.push_back(SU->NodeNum); 1023 for (auto &I : loads) 1024 for (auto *SU : I.second) 1025 NodeNums.push_back(SU->NodeNum); 1026 llvm::sort(NodeNums); 1027 1028 // The N last elements in NodeNums will be removed, and the SU with 1029 // the lowest NodeNum of them will become the new BarrierChain to 1030 // let the not yet seen SUs have a dependency to the removed SUs. 1031 assert(N <= NodeNums.size()); 1032 SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)]; 1033 if (BarrierChain) { 1034 // The aliasing and non-aliasing maps reduce independently of each 1035 // other, but share a common BarrierChain. Check if the 1036 // newBarrierChain is above the former one. If it is not, it may 1037 // introduce a loop to use newBarrierChain, so keep the old one. 1038 if (newBarrierChain->NodeNum < BarrierChain->NodeNum) { 1039 BarrierChain->addPredBarrier(newBarrierChain); 1040 BarrierChain = newBarrierChain; 1041 LLVM_DEBUG(dbgs() << "Inserting new barrier chain: SU(" 1042 << BarrierChain->NodeNum << ").\n";); 1043 } 1044 else 1045 LLVM_DEBUG(dbgs() << "Keeping old barrier chain: SU(" 1046 << BarrierChain->NodeNum << ").\n";); 1047 } 1048 else 1049 BarrierChain = newBarrierChain; 1050 1051 insertBarrierChain(stores); 1052 insertBarrierChain(loads); 1053 1054 LLVM_DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n"; stores.dump(); 1055 dbgs() << "Loading SUnits:\n"; loads.dump()); 1056 } 1057 1058 static void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs, 1059 MachineInstr &MI, bool addToLiveRegs) { 1060 for (MachineOperand &MO : MI.operands()) { 1061 if (!MO.isReg() || !MO.readsReg()) 1062 continue; 1063 unsigned Reg = MO.getReg(); 1064 if (!Reg) 1065 continue; 1066 1067 // Things that are available after the instruction are killed by it. 1068 bool IsKill = LiveRegs.available(MRI, Reg); 1069 MO.setIsKill(IsKill); 1070 if (addToLiveRegs) 1071 LiveRegs.addReg(Reg); 1072 } 1073 } 1074 1075 void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) { 1076 LLVM_DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB) << '\n'); 1077 1078 LiveRegs.init(*TRI); 1079 LiveRegs.addLiveOuts(MBB); 1080 1081 // Examine block from end to start... 1082 for (MachineInstr &MI : make_range(MBB.rbegin(), MBB.rend())) { 1083 if (MI.isDebugInstr()) 1084 continue; 1085 1086 // Update liveness. Registers that are defed but not used in this 1087 // instruction are now dead. Mark register and all subregs as they 1088 // are completely defined. 1089 for (ConstMIBundleOperands O(MI); O.isValid(); ++O) { 1090 const MachineOperand &MO = *O; 1091 if (MO.isReg()) { 1092 if (!MO.isDef()) 1093 continue; 1094 unsigned Reg = MO.getReg(); 1095 if (!Reg) 1096 continue; 1097 LiveRegs.removeReg(Reg); 1098 } else if (MO.isRegMask()) { 1099 LiveRegs.removeRegsInMask(MO); 1100 } 1101 } 1102 1103 // If there is a bundle header fix it up first. 1104 if (!MI.isBundled()) { 1105 toggleKills(MRI, LiveRegs, MI, true); 1106 } else { 1107 MachineBasicBlock::instr_iterator First = MI.getIterator(); 1108 if (MI.isBundle()) { 1109 toggleKills(MRI, LiveRegs, MI, false); 1110 ++First; 1111 } 1112 // Some targets make the (questionable) assumtion that the instructions 1113 // inside the bundle are ordered and consequently only the last use of 1114 // a register inside the bundle can kill it. 1115 MachineBasicBlock::instr_iterator I = std::next(First); 1116 while (I->isBundledWithSucc()) 1117 ++I; 1118 do { 1119 if (!I->isDebugInstr()) 1120 toggleKills(MRI, LiveRegs, *I, true); 1121 --I; 1122 } while(I != First); 1123 } 1124 } 1125 } 1126 1127 void ScheduleDAGInstrs::dumpNode(const SUnit &SU) const { 1128 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1129 dumpNodeName(SU); 1130 dbgs() << ": "; 1131 SU.getInstr()->dump(); 1132 #endif 1133 } 1134 1135 void ScheduleDAGInstrs::dump() const { 1136 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1137 if (EntrySU.getInstr() != nullptr) 1138 dumpNodeAll(EntrySU); 1139 for (const SUnit &SU : SUnits) 1140 dumpNodeAll(SU); 1141 if (ExitSU.getInstr() != nullptr) 1142 dumpNodeAll(ExitSU); 1143 #endif 1144 } 1145 1146 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 1147 std::string s; 1148 raw_string_ostream oss(s); 1149 if (SU == &EntrySU) 1150 oss << "<entry>"; 1151 else if (SU == &ExitSU) 1152 oss << "<exit>"; 1153 else 1154 SU->getInstr()->print(oss, /*SkipOpers=*/true); 1155 return oss.str(); 1156 } 1157 1158 /// Return the basic block label. It is not necessarilly unique because a block 1159 /// contains multiple scheduling regions. But it is fine for visualization. 1160 std::string ScheduleDAGInstrs::getDAGName() const { 1161 return "dag." + BB->getFullName(); 1162 } 1163 1164 bool ScheduleDAGInstrs::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 1165 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 1166 } 1167 1168 bool ScheduleDAGInstrs::addEdge(SUnit *SuccSU, const SDep &PredDep) { 1169 if (SuccSU != &ExitSU) { 1170 // Do not use WillCreateCycle, it assumes SD scheduling. 1171 // If Pred is reachable from Succ, then the edge creates a cycle. 1172 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 1173 return false; 1174 Topo.AddPredQueued(SuccSU, PredDep.getSUnit()); 1175 } 1176 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 1177 // Return true regardless of whether a new edge needed to be inserted. 1178 return true; 1179 } 1180 1181 //===----------------------------------------------------------------------===// 1182 // SchedDFSResult Implementation 1183 //===----------------------------------------------------------------------===// 1184 1185 namespace llvm { 1186 1187 /// Internal state used to compute SchedDFSResult. 1188 class SchedDFSImpl { 1189 SchedDFSResult &R; 1190 1191 /// Join DAG nodes into equivalence classes by their subtree. 1192 IntEqClasses SubtreeClasses; 1193 /// List PredSU, SuccSU pairs that represent data edges between subtrees. 1194 std::vector<std::pair<const SUnit *, const SUnit*>> ConnectionPairs; 1195 1196 struct RootData { 1197 unsigned NodeID; 1198 unsigned ParentNodeID; ///< Parent node (member of the parent subtree). 1199 unsigned SubInstrCount = 0; ///< Instr count in this tree only, not 1200 /// children. 1201 1202 RootData(unsigned id): NodeID(id), 1203 ParentNodeID(SchedDFSResult::InvalidSubtreeID) {} 1204 1205 unsigned getSparseSetIndex() const { return NodeID; } 1206 }; 1207 1208 SparseSet<RootData> RootSet; 1209 1210 public: 1211 SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) { 1212 RootSet.setUniverse(R.DFSNodeData.size()); 1213 } 1214 1215 /// Returns true if this node been visited by the DFS traversal. 1216 /// 1217 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node 1218 /// ID. Later, SubtreeID is updated but remains valid. 1219 bool isVisited(const SUnit *SU) const { 1220 return R.DFSNodeData[SU->NodeNum].SubtreeID 1221 != SchedDFSResult::InvalidSubtreeID; 1222 } 1223 1224 /// Initializes this node's instruction count. We don't need to flag the node 1225 /// visited until visitPostorder because the DAG cannot have cycles. 1226 void visitPreorder(const SUnit *SU) { 1227 R.DFSNodeData[SU->NodeNum].InstrCount = 1228 SU->getInstr()->isTransient() ? 0 : 1; 1229 } 1230 1231 /// Called once for each node after all predecessors are visited. Revisit this 1232 /// node's predecessors and potentially join them now that we know the ILP of 1233 /// the other predecessors. 1234 void visitPostorderNode(const SUnit *SU) { 1235 // Mark this node as the root of a subtree. It may be joined with its 1236 // successors later. 1237 R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum; 1238 RootData RData(SU->NodeNum); 1239 RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1; 1240 1241 // If any predecessors are still in their own subtree, they either cannot be 1242 // joined or are large enough to remain separate. If this parent node's 1243 // total instruction count is not greater than a child subtree by at least 1244 // the subtree limit, then try to join it now since splitting subtrees is 1245 // only useful if multiple high-pressure paths are possible. 1246 unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount; 1247 for (const SDep &PredDep : SU->Preds) { 1248 if (PredDep.getKind() != SDep::Data) 1249 continue; 1250 unsigned PredNum = PredDep.getSUnit()->NodeNum; 1251 if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit) 1252 joinPredSubtree(PredDep, SU, /*CheckLimit=*/false); 1253 1254 // Either link or merge the TreeData entry from the child to the parent. 1255 if (R.DFSNodeData[PredNum].SubtreeID == PredNum) { 1256 // If the predecessor's parent is invalid, this is a tree edge and the 1257 // current node is the parent. 1258 if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID) 1259 RootSet[PredNum].ParentNodeID = SU->NodeNum; 1260 } 1261 else if (RootSet.count(PredNum)) { 1262 // The predecessor is not a root, but is still in the root set. This 1263 // must be the new parent that it was just joined to. Note that 1264 // RootSet[PredNum].ParentNodeID may either be invalid or may still be 1265 // set to the original parent. 1266 RData.SubInstrCount += RootSet[PredNum].SubInstrCount; 1267 RootSet.erase(PredNum); 1268 } 1269 } 1270 RootSet[SU->NodeNum] = RData; 1271 } 1272 1273 /// Called once for each tree edge after calling visitPostOrderNode on 1274 /// the predecessor. Increment the parent node's instruction count and 1275 /// preemptively join this subtree to its parent's if it is small enough. 1276 void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { 1277 R.DFSNodeData[Succ->NodeNum].InstrCount 1278 += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount; 1279 joinPredSubtree(PredDep, Succ); 1280 } 1281 1282 /// Adds a connection for cross edges. 1283 void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) { 1284 ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ)); 1285 } 1286 1287 /// Sets each node's subtree ID to the representative ID and record 1288 /// connections between trees. 1289 void finalize() { 1290 SubtreeClasses.compress(); 1291 R.DFSTreeData.resize(SubtreeClasses.getNumClasses()); 1292 assert(SubtreeClasses.getNumClasses() == RootSet.size() 1293 && "number of roots should match trees"); 1294 for (const RootData &Root : RootSet) { 1295 unsigned TreeID = SubtreeClasses[Root.NodeID]; 1296 if (Root.ParentNodeID != SchedDFSResult::InvalidSubtreeID) 1297 R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[Root.ParentNodeID]; 1298 R.DFSTreeData[TreeID].SubInstrCount = Root.SubInstrCount; 1299 // Note that SubInstrCount may be greater than InstrCount if we joined 1300 // subtrees across a cross edge. InstrCount will be attributed to the 1301 // original parent, while SubInstrCount will be attributed to the joined 1302 // parent. 1303 } 1304 R.SubtreeConnections.resize(SubtreeClasses.getNumClasses()); 1305 R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses()); 1306 LLVM_DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n"); 1307 for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) { 1308 R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx]; 1309 LLVM_DEBUG(dbgs() << " SU(" << Idx << ") in tree " 1310 << R.DFSNodeData[Idx].SubtreeID << '\n'); 1311 } 1312 for (const std::pair<const SUnit*, const SUnit*> &P : ConnectionPairs) { 1313 unsigned PredTree = SubtreeClasses[P.first->NodeNum]; 1314 unsigned SuccTree = SubtreeClasses[P.second->NodeNum]; 1315 if (PredTree == SuccTree) 1316 continue; 1317 unsigned Depth = P.first->getDepth(); 1318 addConnection(PredTree, SuccTree, Depth); 1319 addConnection(SuccTree, PredTree, Depth); 1320 } 1321 } 1322 1323 protected: 1324 /// Joins the predecessor subtree with the successor that is its DFS parent. 1325 /// Applies some heuristics before joining. 1326 bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ, 1327 bool CheckLimit = true) { 1328 assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges"); 1329 1330 // Check if the predecessor is already joined. 1331 const SUnit *PredSU = PredDep.getSUnit(); 1332 unsigned PredNum = PredSU->NodeNum; 1333 if (R.DFSNodeData[PredNum].SubtreeID != PredNum) 1334 return false; 1335 1336 // Four is the magic number of successors before a node is considered a 1337 // pinch point. 1338 unsigned NumDataSucs = 0; 1339 for (const SDep &SuccDep : PredSU->Succs) { 1340 if (SuccDep.getKind() == SDep::Data) { 1341 if (++NumDataSucs >= 4) 1342 return false; 1343 } 1344 } 1345 if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit) 1346 return false; 1347 R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum; 1348 SubtreeClasses.join(Succ->NodeNum, PredNum); 1349 return true; 1350 } 1351 1352 /// Called by finalize() to record a connection between trees. 1353 void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) { 1354 if (!Depth) 1355 return; 1356 1357 do { 1358 SmallVectorImpl<SchedDFSResult::Connection> &Connections = 1359 R.SubtreeConnections[FromTree]; 1360 for (SchedDFSResult::Connection &C : Connections) { 1361 if (C.TreeID == ToTree) { 1362 C.Level = std::max(C.Level, Depth); 1363 return; 1364 } 1365 } 1366 Connections.push_back(SchedDFSResult::Connection(ToTree, Depth)); 1367 FromTree = R.DFSTreeData[FromTree].ParentTreeID; 1368 } while (FromTree != SchedDFSResult::InvalidSubtreeID); 1369 } 1370 }; 1371 1372 } // end namespace llvm 1373 1374 namespace { 1375 1376 /// Manage the stack used by a reverse depth-first search over the DAG. 1377 class SchedDAGReverseDFS { 1378 std::vector<std::pair<const SUnit *, SUnit::const_pred_iterator>> DFSStack; 1379 1380 public: 1381 bool isComplete() const { return DFSStack.empty(); } 1382 1383 void follow(const SUnit *SU) { 1384 DFSStack.push_back(std::make_pair(SU, SU->Preds.begin())); 1385 } 1386 void advance() { ++DFSStack.back().second; } 1387 1388 const SDep *backtrack() { 1389 DFSStack.pop_back(); 1390 return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second); 1391 } 1392 1393 const SUnit *getCurr() const { return DFSStack.back().first; } 1394 1395 SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; } 1396 1397 SUnit::const_pred_iterator getPredEnd() const { 1398 return getCurr()->Preds.end(); 1399 } 1400 }; 1401 1402 } // end anonymous namespace 1403 1404 static bool hasDataSucc(const SUnit *SU) { 1405 for (const SDep &SuccDep : SU->Succs) { 1406 if (SuccDep.getKind() == SDep::Data && 1407 !SuccDep.getSUnit()->isBoundaryNode()) 1408 return true; 1409 } 1410 return false; 1411 } 1412 1413 /// Computes an ILP metric for all nodes in the subDAG reachable via depth-first 1414 /// search from this root. 1415 void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) { 1416 if (!IsBottomUp) 1417 llvm_unreachable("Top-down ILP metric is unimplemented"); 1418 1419 SchedDFSImpl Impl(*this); 1420 for (const SUnit &SU : SUnits) { 1421 if (Impl.isVisited(&SU) || hasDataSucc(&SU)) 1422 continue; 1423 1424 SchedDAGReverseDFS DFS; 1425 Impl.visitPreorder(&SU); 1426 DFS.follow(&SU); 1427 while (true) { 1428 // Traverse the leftmost path as far as possible. 1429 while (DFS.getPred() != DFS.getPredEnd()) { 1430 const SDep &PredDep = *DFS.getPred(); 1431 DFS.advance(); 1432 // Ignore non-data edges. 1433 if (PredDep.getKind() != SDep::Data 1434 || PredDep.getSUnit()->isBoundaryNode()) { 1435 continue; 1436 } 1437 // An already visited edge is a cross edge, assuming an acyclic DAG. 1438 if (Impl.isVisited(PredDep.getSUnit())) { 1439 Impl.visitCrossEdge(PredDep, DFS.getCurr()); 1440 continue; 1441 } 1442 Impl.visitPreorder(PredDep.getSUnit()); 1443 DFS.follow(PredDep.getSUnit()); 1444 } 1445 // Visit the top of the stack in postorder and backtrack. 1446 const SUnit *Child = DFS.getCurr(); 1447 const SDep *PredDep = DFS.backtrack(); 1448 Impl.visitPostorderNode(Child); 1449 if (PredDep) 1450 Impl.visitPostorderEdge(*PredDep, DFS.getCurr()); 1451 if (DFS.isComplete()) 1452 break; 1453 } 1454 } 1455 Impl.finalize(); 1456 } 1457 1458 /// The root of the given SubtreeID was just scheduled. For all subtrees 1459 /// connected to this tree, record the depth of the connection so that the 1460 /// nearest connected subtrees can be prioritized. 1461 void SchedDFSResult::scheduleTree(unsigned SubtreeID) { 1462 for (const Connection &C : SubtreeConnections[SubtreeID]) { 1463 SubtreeConnectLevels[C.TreeID] = 1464 std::max(SubtreeConnectLevels[C.TreeID], C.Level); 1465 LLVM_DEBUG(dbgs() << " Tree: " << C.TreeID << " @" 1466 << SubtreeConnectLevels[C.TreeID] << '\n'); 1467 } 1468 } 1469 1470 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1471 LLVM_DUMP_METHOD void ILPValue::print(raw_ostream &OS) const { 1472 OS << InstrCount << " / " << Length << " = "; 1473 if (!Length) 1474 OS << "BADILP"; 1475 else 1476 OS << format("%g", ((double)InstrCount / Length)); 1477 } 1478 1479 LLVM_DUMP_METHOD void ILPValue::dump() const { 1480 dbgs() << *this << '\n'; 1481 } 1482 1483 namespace llvm { 1484 1485 LLVM_DUMP_METHOD 1486 raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) { 1487 Val.print(OS); 1488 return OS; 1489 } 1490 1491 } // end namespace llvm 1492 1493 #endif 1494