1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file This implements the ScheduleDAGInstrs class, which implements 10 /// re-scheduling of MachineInstrs. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 15 #include "llvm/ADT/IntEqClasses.h" 16 #include "llvm/ADT/MapVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/SparseSet.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/CodeGen/LiveIntervals.h" 24 #include "llvm/CodeGen/LivePhysRegs.h" 25 #include "llvm/CodeGen/MachineBasicBlock.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBundle.h" 30 #include "llvm/CodeGen/MachineMemOperand.h" 31 #include "llvm/CodeGen/MachineOperand.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/PseudoSourceValue.h" 34 #include "llvm/CodeGen/RegisterPressure.h" 35 #include "llvm/CodeGen/ScheduleDAG.h" 36 #include "llvm/CodeGen/ScheduleDFS.h" 37 #include "llvm/CodeGen/SlotIndexes.h" 38 #include "llvm/CodeGen/TargetRegisterInfo.h" 39 #include "llvm/CodeGen/TargetSubtargetInfo.h" 40 #include "llvm/Config/llvm-config.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/Operator.h" 46 #include "llvm/IR/Type.h" 47 #include "llvm/IR/Value.h" 48 #include "llvm/MC/LaneBitmask.h" 49 #include "llvm/MC/MCRegisterInfo.h" 50 #include "llvm/Support/Casting.h" 51 #include "llvm/Support/CommandLine.h" 52 #include "llvm/Support/Compiler.h" 53 #include "llvm/Support/Debug.h" 54 #include "llvm/Support/ErrorHandling.h" 55 #include "llvm/Support/Format.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <iterator> 60 #include <string> 61 #include <utility> 62 #include <vector> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "machine-scheduler" 67 68 static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden, 69 cl::ZeroOrMore, cl::init(false), 70 cl::desc("Enable use of AA during MI DAG construction")); 71 72 static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, 73 cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction")); 74 75 // Note: the two options below might be used in tuning compile time vs 76 // output quality. Setting HugeRegion so large that it will never be 77 // reached means best-effort, but may be slow. 78 79 // When Stores and Loads maps (or NonAliasStores and NonAliasLoads) 80 // together hold this many SUs, a reduction of maps will be done. 81 static cl::opt<unsigned> HugeRegion("dag-maps-huge-region", cl::Hidden, 82 cl::init(1000), cl::desc("The limit to use while constructing the DAG " 83 "prior to scheduling, at which point a trade-off " 84 "is made to avoid excessive compile time.")); 85 86 static cl::opt<unsigned> ReductionSize( 87 "dag-maps-reduction-size", cl::Hidden, 88 cl::desc("A huge scheduling region will have maps reduced by this many " 89 "nodes at a time. Defaults to HugeRegion / 2.")); 90 91 static unsigned getReductionSize() { 92 // Always reduce a huge region with half of the elements, except 93 // when user sets this number explicitly. 94 if (ReductionSize.getNumOccurrences() == 0) 95 return HugeRegion / 2; 96 return ReductionSize; 97 } 98 99 static void dumpSUList(ScheduleDAGInstrs::SUList &L) { 100 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 101 dbgs() << "{ "; 102 for (const SUnit *su : L) { 103 dbgs() << "SU(" << su->NodeNum << ")"; 104 if (su != L.back()) 105 dbgs() << ", "; 106 } 107 dbgs() << "}\n"; 108 #endif 109 } 110 111 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 112 const MachineLoopInfo *mli, 113 bool RemoveKillFlags) 114 : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), 115 RemoveKillFlags(RemoveKillFlags), 116 UnknownValue(UndefValue::get( 117 Type::getVoidTy(mf.getFunction().getContext()))), Topo(SUnits, &ExitSU) { 118 DbgValues.clear(); 119 120 const TargetSubtargetInfo &ST = mf.getSubtarget(); 121 SchedModel.init(&ST); 122 } 123 124 /// If this machine instr has memory reference information and it can be 125 /// tracked to a normal reference to a known object, return the Value 126 /// for that object. This function returns false the memory location is 127 /// unknown or may alias anything. 128 static bool getUnderlyingObjectsForInstr(const MachineInstr *MI, 129 const MachineFrameInfo &MFI, 130 UnderlyingObjectsVector &Objects, 131 const DataLayout &DL) { 132 auto allMMOsOkay = [&]() { 133 for (const MachineMemOperand *MMO : MI->memoperands()) { 134 // TODO: Figure out whether isAtomic is really necessary (see D57601). 135 if (MMO->isVolatile() || MMO->isAtomic()) 136 return false; 137 138 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) { 139 // Function that contain tail calls don't have unique PseudoSourceValue 140 // objects. Two PseudoSourceValues might refer to the same or 141 // overlapping locations. The client code calling this function assumes 142 // this is not the case. So return a conservative answer of no known 143 // object. 144 if (MFI.hasTailCall()) 145 return false; 146 147 // For now, ignore PseudoSourceValues which may alias LLVM IR values 148 // because the code that uses this function has no way to cope with 149 // such aliases. 150 if (PSV->isAliased(&MFI)) 151 return false; 152 153 bool MayAlias = PSV->mayAlias(&MFI); 154 Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias)); 155 } else if (const Value *V = MMO->getValue()) { 156 SmallVector<Value *, 4> Objs; 157 if (!getUnderlyingObjectsForCodeGen(V, Objs)) 158 return false; 159 160 for (Value *V : Objs) { 161 assert(isIdentifiedObject(V)); 162 Objects.push_back(UnderlyingObjectsVector::value_type(V, true)); 163 } 164 } else 165 return false; 166 } 167 return true; 168 }; 169 170 if (!allMMOsOkay()) { 171 Objects.clear(); 172 return false; 173 } 174 175 return true; 176 } 177 178 void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) { 179 BB = bb; 180 } 181 182 void ScheduleDAGInstrs::finishBlock() { 183 // Subclasses should no longer refer to the old block. 184 BB = nullptr; 185 } 186 187 void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, 188 MachineBasicBlock::iterator begin, 189 MachineBasicBlock::iterator end, 190 unsigned regioninstrs) { 191 assert(bb == BB && "startBlock should set BB"); 192 RegionBegin = begin; 193 RegionEnd = end; 194 NumRegionInstrs = regioninstrs; 195 } 196 197 void ScheduleDAGInstrs::exitRegion() { 198 // Nothing to do. 199 } 200 201 void ScheduleDAGInstrs::addSchedBarrierDeps() { 202 MachineInstr *ExitMI = 203 RegionEnd != BB->end() 204 ? &*skipDebugInstructionsBackward(RegionEnd, RegionBegin) 205 : nullptr; 206 ExitSU.setInstr(ExitMI); 207 // Add dependencies on the defs and uses of the instruction. 208 if (ExitMI) { 209 for (const MachineOperand &MO : ExitMI->operands()) { 210 if (!MO.isReg() || MO.isDef()) continue; 211 Register Reg = MO.getReg(); 212 if (Register::isPhysicalRegister(Reg)) { 213 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg)); 214 } else if (Register::isVirtualRegister(Reg) && MO.readsReg()) { 215 addVRegUseDeps(&ExitSU, ExitMI->getOperandNo(&MO)); 216 } 217 } 218 } 219 if (!ExitMI || (!ExitMI->isCall() && !ExitMI->isBarrier())) { 220 // For others, e.g. fallthrough, conditional branch, assume the exit 221 // uses all the registers that are livein to the successor blocks. 222 for (const MachineBasicBlock *Succ : BB->successors()) { 223 for (const auto &LI : Succ->liveins()) { 224 if (!Uses.contains(LI.PhysReg)) 225 Uses.insert(PhysRegSUOper(&ExitSU, -1, LI.PhysReg)); 226 } 227 } 228 } 229 } 230 231 /// MO is an operand of SU's instruction that defines a physical register. Adds 232 /// data dependencies from SU to any uses of the physical register. 233 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { 234 const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx); 235 assert(MO.isDef() && "expect physreg def"); 236 237 // Ask the target if address-backscheduling is desirable, and if so how much. 238 const TargetSubtargetInfo &ST = MF.getSubtarget(); 239 240 // Only use any non-zero latency for real defs/uses, in contrast to 241 // "fake" operands added by regalloc. 242 const MCInstrDesc *DefMIDesc = &SU->getInstr()->getDesc(); 243 bool ImplicitPseudoDef = (OperIdx >= DefMIDesc->getNumOperands() && 244 !DefMIDesc->hasImplicitDefOfPhysReg(MO.getReg())); 245 for (MCRegAliasIterator Alias(MO.getReg(), TRI, true); 246 Alias.isValid(); ++Alias) { 247 for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) { 248 SUnit *UseSU = I->SU; 249 if (UseSU == SU) 250 continue; 251 252 // Adjust the dependence latency using operand def/use information, 253 // then allow the target to perform its own adjustments. 254 int UseOp = I->OpIdx; 255 MachineInstr *RegUse = nullptr; 256 SDep Dep; 257 if (UseOp < 0) 258 Dep = SDep(SU, SDep::Artificial); 259 else { 260 // Set the hasPhysRegDefs only for physreg defs that have a use within 261 // the scheduling region. 262 SU->hasPhysRegDefs = true; 263 Dep = SDep(SU, SDep::Data, *Alias); 264 RegUse = UseSU->getInstr(); 265 } 266 const MCInstrDesc *UseMIDesc = 267 (RegUse ? &UseSU->getInstr()->getDesc() : nullptr); 268 bool ImplicitPseudoUse = 269 (UseMIDesc && UseOp >= ((int)UseMIDesc->getNumOperands()) && 270 !UseMIDesc->hasImplicitUseOfPhysReg(*Alias)); 271 if (!ImplicitPseudoDef && !ImplicitPseudoUse) { 272 Dep.setLatency(SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, 273 RegUse, UseOp)); 274 ST.adjustSchedDependency(SU, OperIdx, UseSU, UseOp, Dep); 275 } else { 276 Dep.setLatency(0); 277 // FIXME: We could always let target to adjustSchedDependency(), and 278 // remove this condition, but that currently asserts in Hexagon BE. 279 if (SU->getInstr()->isBundle() || (RegUse && RegUse->isBundle())) 280 ST.adjustSchedDependency(SU, OperIdx, UseSU, UseOp, Dep); 281 } 282 283 UseSU->addPred(Dep); 284 } 285 } 286 } 287 288 /// Adds register dependencies (data, anti, and output) from this SUnit 289 /// to following instructions in the same scheduling region that depend the 290 /// physical register referenced at OperIdx. 291 void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { 292 MachineInstr *MI = SU->getInstr(); 293 MachineOperand &MO = MI->getOperand(OperIdx); 294 Register Reg = MO.getReg(); 295 // We do not need to track any dependencies for constant registers. 296 if (MRI.isConstantPhysReg(Reg)) 297 return; 298 299 const TargetSubtargetInfo &ST = MF.getSubtarget(); 300 301 // Optionally add output and anti dependencies. For anti 302 // dependencies we use a latency of 0 because for a multi-issue 303 // target we want to allow the defining instruction to issue 304 // in the same cycle as the using instruction. 305 // TODO: Using a latency of 1 here for output dependencies assumes 306 // there's no cost for reusing registers. 307 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 308 for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) { 309 if (!Defs.contains(*Alias)) 310 continue; 311 for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) { 312 SUnit *DefSU = I->SU; 313 if (DefSU == &ExitSU) 314 continue; 315 if (DefSU != SU && 316 (Kind != SDep::Output || !MO.isDead() || 317 !DefSU->getInstr()->registerDefIsDead(*Alias))) { 318 SDep Dep(SU, Kind, /*Reg=*/*Alias); 319 if (Kind != SDep::Anti) 320 Dep.setLatency( 321 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 322 ST.adjustSchedDependency(SU, OperIdx, DefSU, I->OpIdx, Dep); 323 DefSU->addPred(Dep); 324 } 325 } 326 } 327 328 if (!MO.isDef()) { 329 SU->hasPhysRegUses = true; 330 // Either insert a new Reg2SUnits entry with an empty SUnits list, or 331 // retrieve the existing SUnits list for this register's uses. 332 // Push this SUnit on the use list. 333 Uses.insert(PhysRegSUOper(SU, OperIdx, Reg)); 334 if (RemoveKillFlags) 335 MO.setIsKill(false); 336 } else { 337 addPhysRegDataDeps(SU, OperIdx); 338 339 // Clear previous uses and defs of this register and its subergisters. 340 for (MCSubRegIterator SubReg(Reg, TRI, true); SubReg.isValid(); ++SubReg) { 341 if (Uses.contains(*SubReg)) 342 Uses.eraseAll(*SubReg); 343 if (!MO.isDead()) 344 Defs.eraseAll(*SubReg); 345 } 346 if (MO.isDead() && SU->isCall) { 347 // Calls will not be reordered because of chain dependencies (see 348 // below). Since call operands are dead, calls may continue to be added 349 // to the DefList making dependence checking quadratic in the size of 350 // the block. Instead, we leave only one call at the back of the 351 // DefList. 352 Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg); 353 Reg2SUnitsMap::iterator B = P.first; 354 Reg2SUnitsMap::iterator I = P.second; 355 for (bool isBegin = I == B; !isBegin; /* empty */) { 356 isBegin = (--I) == B; 357 if (!I->SU->isCall) 358 break; 359 I = Defs.erase(I); 360 } 361 } 362 363 // Defs are pushed in the order they are visited and never reordered. 364 Defs.insert(PhysRegSUOper(SU, OperIdx, Reg)); 365 } 366 } 367 368 LaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const 369 { 370 Register Reg = MO.getReg(); 371 // No point in tracking lanemasks if we don't have interesting subregisters. 372 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 373 if (!RC.HasDisjunctSubRegs) 374 return LaneBitmask::getAll(); 375 376 unsigned SubReg = MO.getSubReg(); 377 if (SubReg == 0) 378 return RC.getLaneMask(); 379 return TRI->getSubRegIndexLaneMask(SubReg); 380 } 381 382 bool ScheduleDAGInstrs::deadDefHasNoUse(const MachineOperand &MO) { 383 auto RegUse = CurrentVRegUses.find(MO.getReg()); 384 if (RegUse == CurrentVRegUses.end()) 385 return true; 386 return (RegUse->LaneMask & getLaneMaskForMO(MO)).none(); 387 } 388 389 /// Adds register output and data dependencies from this SUnit to instructions 390 /// that occur later in the same scheduling region if they read from or write to 391 /// the virtual register defined at OperIdx. 392 /// 393 /// TODO: Hoist loop induction variable increments. This has to be 394 /// reevaluated. Generally, IV scheduling should be done before coalescing. 395 void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { 396 MachineInstr *MI = SU->getInstr(); 397 MachineOperand &MO = MI->getOperand(OperIdx); 398 Register Reg = MO.getReg(); 399 400 LaneBitmask DefLaneMask; 401 LaneBitmask KillLaneMask; 402 if (TrackLaneMasks) { 403 bool IsKill = MO.getSubReg() == 0 || MO.isUndef(); 404 DefLaneMask = getLaneMaskForMO(MO); 405 // If we have a <read-undef> flag, none of the lane values comes from an 406 // earlier instruction. 407 KillLaneMask = IsKill ? LaneBitmask::getAll() : DefLaneMask; 408 409 if (MO.getSubReg() != 0 && MO.isUndef()) { 410 // There may be other subregister defs on the same instruction of the same 411 // register in later operands. The lanes of other defs will now be live 412 // after this instruction, so these should not be treated as killed by the 413 // instruction even though they appear to be killed in this one operand. 414 for (int I = OperIdx + 1, E = MI->getNumOperands(); I != E; ++I) { 415 const MachineOperand &OtherMO = MI->getOperand(I); 416 if (OtherMO.isReg() && OtherMO.isDef() && OtherMO.getReg() == Reg) 417 KillLaneMask &= ~getLaneMaskForMO(OtherMO); 418 } 419 } 420 421 // Clear undef flag, we'll re-add it later once we know which subregister 422 // Def is first. 423 MO.setIsUndef(false); 424 } else { 425 DefLaneMask = LaneBitmask::getAll(); 426 KillLaneMask = LaneBitmask::getAll(); 427 } 428 429 if (MO.isDead()) { 430 assert(deadDefHasNoUse(MO) && "Dead defs should have no uses"); 431 } else { 432 // Add data dependence to all uses we found so far. 433 const TargetSubtargetInfo &ST = MF.getSubtarget(); 434 for (VReg2SUnitOperIdxMultiMap::iterator I = CurrentVRegUses.find(Reg), 435 E = CurrentVRegUses.end(); I != E; /*empty*/) { 436 LaneBitmask LaneMask = I->LaneMask; 437 // Ignore uses of other lanes. 438 if ((LaneMask & KillLaneMask).none()) { 439 ++I; 440 continue; 441 } 442 443 if ((LaneMask & DefLaneMask).any()) { 444 SUnit *UseSU = I->SU; 445 MachineInstr *Use = UseSU->getInstr(); 446 SDep Dep(SU, SDep::Data, Reg); 447 Dep.setLatency(SchedModel.computeOperandLatency(MI, OperIdx, Use, 448 I->OperandIndex)); 449 ST.adjustSchedDependency(SU, OperIdx, UseSU, I->OperandIndex, Dep); 450 UseSU->addPred(Dep); 451 } 452 453 LaneMask &= ~KillLaneMask; 454 // If we found a Def for all lanes of this use, remove it from the list. 455 if (LaneMask.any()) { 456 I->LaneMask = LaneMask; 457 ++I; 458 } else 459 I = CurrentVRegUses.erase(I); 460 } 461 } 462 463 // Shortcut: Singly defined vregs do not have output/anti dependencies. 464 if (MRI.hasOneDef(Reg)) 465 return; 466 467 // Add output dependence to the next nearest defs of this vreg. 468 // 469 // Unless this definition is dead, the output dependence should be 470 // transitively redundant with antidependencies from this definition's 471 // uses. We're conservative for now until we have a way to guarantee the uses 472 // are not eliminated sometime during scheduling. The output dependence edge 473 // is also useful if output latency exceeds def-use latency. 474 LaneBitmask LaneMask = DefLaneMask; 475 for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 476 CurrentVRegDefs.end())) { 477 // Ignore defs for other lanes. 478 if ((V2SU.LaneMask & LaneMask).none()) 479 continue; 480 // Add an output dependence. 481 SUnit *DefSU = V2SU.SU; 482 // Ignore additional defs of the same lanes in one instruction. This can 483 // happen because lanemasks are shared for targets with too many 484 // subregisters. We also use some representration tricks/hacks where we 485 // add super-register defs/uses, to imply that although we only access parts 486 // of the reg we care about the full one. 487 if (DefSU == SU) 488 continue; 489 SDep Dep(SU, SDep::Output, Reg); 490 Dep.setLatency( 491 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 492 DefSU->addPred(Dep); 493 494 // Update current definition. This can get tricky if the def was about a 495 // bigger lanemask before. We then have to shrink it and create a new 496 // VReg2SUnit for the non-overlapping part. 497 LaneBitmask OverlapMask = V2SU.LaneMask & LaneMask; 498 LaneBitmask NonOverlapMask = V2SU.LaneMask & ~LaneMask; 499 V2SU.SU = SU; 500 V2SU.LaneMask = OverlapMask; 501 if (NonOverlapMask.any()) 502 CurrentVRegDefs.insert(VReg2SUnit(Reg, NonOverlapMask, DefSU)); 503 } 504 // If there was no CurrentVRegDefs entry for some lanes yet, create one. 505 if (LaneMask.any()) 506 CurrentVRegDefs.insert(VReg2SUnit(Reg, LaneMask, SU)); 507 } 508 509 /// Adds a register data dependency if the instruction that defines the 510 /// virtual register used at OperIdx is mapped to an SUnit. Add a register 511 /// antidependency from this SUnit to instructions that occur later in the same 512 /// scheduling region if they write the virtual register. 513 /// 514 /// TODO: Handle ExitSU "uses" properly. 515 void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { 516 const MachineInstr *MI = SU->getInstr(); 517 assert(!MI->isDebugOrPseudoInstr()); 518 519 const MachineOperand &MO = MI->getOperand(OperIdx); 520 Register Reg = MO.getReg(); 521 522 // Remember the use. Data dependencies will be added when we find the def. 523 LaneBitmask LaneMask = TrackLaneMasks ? getLaneMaskForMO(MO) 524 : LaneBitmask::getAll(); 525 CurrentVRegUses.insert(VReg2SUnitOperIdx(Reg, LaneMask, OperIdx, SU)); 526 527 // Add antidependences to the following defs of the vreg. 528 for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 529 CurrentVRegDefs.end())) { 530 // Ignore defs for unrelated lanes. 531 LaneBitmask PrevDefLaneMask = V2SU.LaneMask; 532 if ((PrevDefLaneMask & LaneMask).none()) 533 continue; 534 if (V2SU.SU == SU) 535 continue; 536 537 V2SU.SU->addPred(SDep(SU, SDep::Anti, Reg)); 538 } 539 } 540 541 /// Returns true if MI is an instruction we are unable to reason about 542 /// (like a call or something with unmodeled side effects). 543 static inline bool isGlobalMemoryObject(AAResults *AA, MachineInstr *MI) { 544 return MI->isCall() || MI->hasUnmodeledSideEffects() || 545 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad(AA)); 546 } 547 548 void ScheduleDAGInstrs::addChainDependency (SUnit *SUa, SUnit *SUb, 549 unsigned Latency) { 550 if (SUa->getInstr()->mayAlias(AAForDep, *SUb->getInstr(), UseTBAA)) { 551 SDep Dep(SUa, SDep::MayAliasMem); 552 Dep.setLatency(Latency); 553 SUb->addPred(Dep); 554 } 555 } 556 557 /// Creates an SUnit for each real instruction, numbered in top-down 558 /// topological order. The instruction order A < B, implies that no edge exists 559 /// from B to A. 560 /// 561 /// Map each real instruction to its SUnit. 562 /// 563 /// After initSUnits, the SUnits vector cannot be resized and the scheduler may 564 /// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs 565 /// instead of pointers. 566 /// 567 /// MachineScheduler relies on initSUnits numbering the nodes by their order in 568 /// the original instruction list. 569 void ScheduleDAGInstrs::initSUnits() { 570 // We'll be allocating one SUnit for each real instruction in the region, 571 // which is contained within a basic block. 572 SUnits.reserve(NumRegionInstrs); 573 574 for (MachineInstr &MI : make_range(RegionBegin, RegionEnd)) { 575 if (MI.isDebugOrPseudoInstr()) 576 continue; 577 578 SUnit *SU = newSUnit(&MI); 579 MISUnitMap[&MI] = SU; 580 581 SU->isCall = MI.isCall(); 582 SU->isCommutable = MI.isCommutable(); 583 584 // Assign the Latency field of SU using target-provided information. 585 SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); 586 587 // If this SUnit uses a reserved or unbuffered resource, mark it as such. 588 // 589 // Reserved resources block an instruction from issuing and stall the 590 // entire pipeline. These are identified by BufferSize=0. 591 // 592 // Unbuffered resources prevent execution of subsequent instructions that 593 // require the same resources. This is used for in-order execution pipelines 594 // within an out-of-order core. These are identified by BufferSize=1. 595 if (SchedModel.hasInstrSchedModel()) { 596 const MCSchedClassDesc *SC = getSchedClass(SU); 597 for (const MCWriteProcResEntry &PRE : 598 make_range(SchedModel.getWriteProcResBegin(SC), 599 SchedModel.getWriteProcResEnd(SC))) { 600 switch (SchedModel.getProcResource(PRE.ProcResourceIdx)->BufferSize) { 601 case 0: 602 SU->hasReservedResource = true; 603 break; 604 case 1: 605 SU->isUnbuffered = true; 606 break; 607 default: 608 break; 609 } 610 } 611 } 612 } 613 } 614 615 class ScheduleDAGInstrs::Value2SUsMap : public MapVector<ValueType, SUList> { 616 /// Current total number of SUs in map. 617 unsigned NumNodes = 0; 618 619 /// 1 for loads, 0 for stores. (see comment in SUList) 620 unsigned TrueMemOrderLatency; 621 622 public: 623 Value2SUsMap(unsigned lat = 0) : TrueMemOrderLatency(lat) {} 624 625 /// To keep NumNodes up to date, insert() is used instead of 626 /// this operator w/ push_back(). 627 ValueType &operator[](const SUList &Key) { 628 llvm_unreachable("Don't use. Use insert() instead."); }; 629 630 /// Adds SU to the SUList of V. If Map grows huge, reduce its size by calling 631 /// reduce(). 632 void inline insert(SUnit *SU, ValueType V) { 633 MapVector::operator[](V).push_back(SU); 634 NumNodes++; 635 } 636 637 /// Clears the list of SUs mapped to V. 638 void inline clearList(ValueType V) { 639 iterator Itr = find(V); 640 if (Itr != end()) { 641 assert(NumNodes >= Itr->second.size()); 642 NumNodes -= Itr->second.size(); 643 644 Itr->second.clear(); 645 } 646 } 647 648 /// Clears map from all contents. 649 void clear() { 650 MapVector<ValueType, SUList>::clear(); 651 NumNodes = 0; 652 } 653 654 unsigned inline size() const { return NumNodes; } 655 656 /// Counts the number of SUs in this map after a reduction. 657 void reComputeSize() { 658 NumNodes = 0; 659 for (auto &I : *this) 660 NumNodes += I.second.size(); 661 } 662 663 unsigned inline getTrueMemOrderLatency() const { 664 return TrueMemOrderLatency; 665 } 666 667 void dump(); 668 }; 669 670 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 671 Value2SUsMap &Val2SUsMap) { 672 for (auto &I : Val2SUsMap) 673 addChainDependencies(SU, I.second, 674 Val2SUsMap.getTrueMemOrderLatency()); 675 } 676 677 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 678 Value2SUsMap &Val2SUsMap, 679 ValueType V) { 680 Value2SUsMap::iterator Itr = Val2SUsMap.find(V); 681 if (Itr != Val2SUsMap.end()) 682 addChainDependencies(SU, Itr->second, 683 Val2SUsMap.getTrueMemOrderLatency()); 684 } 685 686 void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap &map) { 687 assert(BarrierChain != nullptr); 688 689 for (auto &I : map) { 690 SUList &sus = I.second; 691 for (auto *SU : sus) 692 SU->addPredBarrier(BarrierChain); 693 } 694 map.clear(); 695 } 696 697 void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap &map) { 698 assert(BarrierChain != nullptr); 699 700 // Go through all lists of SUs. 701 for (Value2SUsMap::iterator I = map.begin(), EE = map.end(); I != EE;) { 702 Value2SUsMap::iterator CurrItr = I++; 703 SUList &sus = CurrItr->second; 704 SUList::iterator SUItr = sus.begin(), SUEE = sus.end(); 705 for (; SUItr != SUEE; ++SUItr) { 706 // Stop on BarrierChain or any instruction above it. 707 if ((*SUItr)->NodeNum <= BarrierChain->NodeNum) 708 break; 709 710 (*SUItr)->addPredBarrier(BarrierChain); 711 } 712 713 // Remove also the BarrierChain from list if present. 714 if (SUItr != SUEE && *SUItr == BarrierChain) 715 SUItr++; 716 717 // Remove all SUs that are now successors of BarrierChain. 718 if (SUItr != sus.begin()) 719 sus.erase(sus.begin(), SUItr); 720 } 721 722 // Remove all entries with empty su lists. 723 map.remove_if([&](std::pair<ValueType, SUList> &mapEntry) { 724 return (mapEntry.second.empty()); }); 725 726 // Recompute the size of the map (NumNodes). 727 map.reComputeSize(); 728 } 729 730 void ScheduleDAGInstrs::buildSchedGraph(AAResults *AA, 731 RegPressureTracker *RPTracker, 732 PressureDiffs *PDiffs, 733 LiveIntervals *LIS, 734 bool TrackLaneMasks) { 735 const TargetSubtargetInfo &ST = MF.getSubtarget(); 736 bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI 737 : ST.useAA(); 738 AAForDep = UseAA ? AA : nullptr; 739 740 BarrierChain = nullptr; 741 742 this->TrackLaneMasks = TrackLaneMasks; 743 MISUnitMap.clear(); 744 ScheduleDAG::clearDAG(); 745 746 // Create an SUnit for each real instruction. 747 initSUnits(); 748 749 if (PDiffs) 750 PDiffs->init(SUnits.size()); 751 752 // We build scheduling units by walking a block's instruction list 753 // from bottom to top. 754 755 // Each MIs' memory operand(s) is analyzed to a list of underlying 756 // objects. The SU is then inserted in the SUList(s) mapped from the 757 // Value(s). Each Value thus gets mapped to lists of SUs depending 758 // on it, stores and loads kept separately. Two SUs are trivially 759 // non-aliasing if they both depend on only identified Values and do 760 // not share any common Value. 761 Value2SUsMap Stores, Loads(1 /*TrueMemOrderLatency*/); 762 763 // Certain memory accesses are known to not alias any SU in Stores 764 // or Loads, and have therefore their own 'NonAlias' 765 // domain. E.g. spill / reload instructions never alias LLVM I/R 766 // Values. It would be nice to assume that this type of memory 767 // accesses always have a proper memory operand modelling, and are 768 // therefore never unanalyzable, but this is conservatively not 769 // done. 770 Value2SUsMap NonAliasStores, NonAliasLoads(1 /*TrueMemOrderLatency*/); 771 772 // Track all instructions that may raise floating-point exceptions. 773 // These do not depend on one other (or normal loads or stores), but 774 // must not be rescheduled across global barriers. Note that we don't 775 // really need a "map" here since we don't track those MIs by value; 776 // using the same Value2SUsMap data type here is simply a matter of 777 // convenience. 778 Value2SUsMap FPExceptions; 779 780 // Remove any stale debug info; sometimes BuildSchedGraph is called again 781 // without emitting the info from the previous call. 782 DbgValues.clear(); 783 FirstDbgValue = nullptr; 784 785 assert(Defs.empty() && Uses.empty() && 786 "Only BuildGraph should update Defs/Uses"); 787 Defs.setUniverse(TRI->getNumRegs()); 788 Uses.setUniverse(TRI->getNumRegs()); 789 790 assert(CurrentVRegDefs.empty() && "nobody else should use CurrentVRegDefs"); 791 assert(CurrentVRegUses.empty() && "nobody else should use CurrentVRegUses"); 792 unsigned NumVirtRegs = MRI.getNumVirtRegs(); 793 CurrentVRegDefs.setUniverse(NumVirtRegs); 794 CurrentVRegUses.setUniverse(NumVirtRegs); 795 796 // Model data dependencies between instructions being scheduled and the 797 // ExitSU. 798 addSchedBarrierDeps(); 799 800 // Walk the list of instructions, from bottom moving up. 801 MachineInstr *DbgMI = nullptr; 802 for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; 803 MII != MIE; --MII) { 804 MachineInstr &MI = *std::prev(MII); 805 if (DbgMI) { 806 DbgValues.push_back(std::make_pair(DbgMI, &MI)); 807 DbgMI = nullptr; 808 } 809 810 if (MI.isDebugValue() || MI.isDebugRef()) { 811 DbgMI = &MI; 812 continue; 813 } 814 if (MI.isDebugLabel()) 815 continue; 816 817 if (MI.isPseudoProbe()) 818 continue; 819 820 SUnit *SU = MISUnitMap[&MI]; 821 assert(SU && "No SUnit mapped to this MI"); 822 823 if (RPTracker) { 824 RegisterOperands RegOpers; 825 RegOpers.collect(MI, *TRI, MRI, TrackLaneMasks, false); 826 if (TrackLaneMasks) { 827 SlotIndex SlotIdx = LIS->getInstructionIndex(MI); 828 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx); 829 } 830 if (PDiffs != nullptr) 831 PDiffs->addInstruction(SU->NodeNum, RegOpers, MRI); 832 833 if (RPTracker->getPos() == RegionEnd || &*RPTracker->getPos() != &MI) 834 RPTracker->recedeSkipDebugValues(); 835 assert(&*RPTracker->getPos() == &MI && "RPTracker in sync"); 836 RPTracker->recede(RegOpers); 837 } 838 839 assert( 840 (CanHandleTerminators || (!MI.isTerminator() && !MI.isPosition())) && 841 "Cannot schedule terminators or labels!"); 842 843 // Add register-based dependencies (data, anti, and output). 844 // For some instructions (calls, returns, inline-asm, etc.) there can 845 // be explicit uses and implicit defs, in which case the use will appear 846 // on the operand list before the def. Do two passes over the operand 847 // list to make sure that defs are processed before any uses. 848 bool HasVRegDef = false; 849 for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 850 const MachineOperand &MO = MI.getOperand(j); 851 if (!MO.isReg() || !MO.isDef()) 852 continue; 853 Register Reg = MO.getReg(); 854 if (Register::isPhysicalRegister(Reg)) { 855 addPhysRegDeps(SU, j); 856 } else if (Register::isVirtualRegister(Reg)) { 857 HasVRegDef = true; 858 addVRegDefDeps(SU, j); 859 } 860 } 861 // Now process all uses. 862 for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 863 const MachineOperand &MO = MI.getOperand(j); 864 // Only look at use operands. 865 // We do not need to check for MO.readsReg() here because subsequent 866 // subregister defs will get output dependence edges and need no 867 // additional use dependencies. 868 if (!MO.isReg() || !MO.isUse()) 869 continue; 870 Register Reg = MO.getReg(); 871 if (Register::isPhysicalRegister(Reg)) { 872 addPhysRegDeps(SU, j); 873 } else if (Register::isVirtualRegister(Reg) && MO.readsReg()) { 874 addVRegUseDeps(SU, j); 875 } 876 } 877 878 // If we haven't seen any uses in this scheduling region, create a 879 // dependence edge to ExitSU to model the live-out latency. This is required 880 // for vreg defs with no in-region use, and prefetches with no vreg def. 881 // 882 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This 883 // check currently relies on being called before adding chain deps. 884 if (SU->NumSuccs == 0 && SU->Latency > 1 && (HasVRegDef || MI.mayLoad())) { 885 SDep Dep(SU, SDep::Artificial); 886 Dep.setLatency(SU->Latency - 1); 887 ExitSU.addPred(Dep); 888 } 889 890 // Add memory dependencies (Note: isStoreToStackSlot and 891 // isLoadFromStackSLot are not usable after stack slots are lowered to 892 // actual addresses). 893 894 // This is a barrier event that acts as a pivotal node in the DAG. 895 if (isGlobalMemoryObject(AA, &MI)) { 896 897 // Become the barrier chain. 898 if (BarrierChain) 899 BarrierChain->addPredBarrier(SU); 900 BarrierChain = SU; 901 902 LLVM_DEBUG(dbgs() << "Global memory object and new barrier chain: SU(" 903 << BarrierChain->NodeNum << ").\n";); 904 905 // Add dependencies against everything below it and clear maps. 906 addBarrierChain(Stores); 907 addBarrierChain(Loads); 908 addBarrierChain(NonAliasStores); 909 addBarrierChain(NonAliasLoads); 910 addBarrierChain(FPExceptions); 911 912 continue; 913 } 914 915 // Instructions that may raise FP exceptions may not be moved 916 // across any global barriers. 917 if (MI.mayRaiseFPException()) { 918 if (BarrierChain) 919 BarrierChain->addPredBarrier(SU); 920 921 FPExceptions.insert(SU, UnknownValue); 922 923 if (FPExceptions.size() >= HugeRegion) { 924 LLVM_DEBUG(dbgs() << "Reducing FPExceptions map.\n";); 925 Value2SUsMap empty; 926 reduceHugeMemNodeMaps(FPExceptions, empty, getReductionSize()); 927 } 928 } 929 930 // If it's not a store or a variant load, we're done. 931 if (!MI.mayStore() && 932 !(MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))) 933 continue; 934 935 // Always add dependecy edge to BarrierChain if present. 936 if (BarrierChain) 937 BarrierChain->addPredBarrier(SU); 938 939 // Find the underlying objects for MI. The Objs vector is either 940 // empty, or filled with the Values of memory locations which this 941 // SU depends on. 942 UnderlyingObjectsVector Objs; 943 bool ObjsFound = getUnderlyingObjectsForInstr(&MI, MFI, Objs, 944 MF.getDataLayout()); 945 946 if (MI.mayStore()) { 947 if (!ObjsFound) { 948 // An unknown store depends on all stores and loads. 949 addChainDependencies(SU, Stores); 950 addChainDependencies(SU, NonAliasStores); 951 addChainDependencies(SU, Loads); 952 addChainDependencies(SU, NonAliasLoads); 953 954 // Map this store to 'UnknownValue'. 955 Stores.insert(SU, UnknownValue); 956 } else { 957 // Add precise dependencies against all previously seen memory 958 // accesses mapped to the same Value(s). 959 for (const UnderlyingObject &UnderlObj : Objs) { 960 ValueType V = UnderlObj.getValue(); 961 bool ThisMayAlias = UnderlObj.mayAlias(); 962 963 // Add dependencies to previous stores and loads mapped to V. 964 addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 965 addChainDependencies(SU, (ThisMayAlias ? Loads : NonAliasLoads), V); 966 } 967 // Update the store map after all chains have been added to avoid adding 968 // self-loop edge if multiple underlying objects are present. 969 for (const UnderlyingObject &UnderlObj : Objs) { 970 ValueType V = UnderlObj.getValue(); 971 bool ThisMayAlias = UnderlObj.mayAlias(); 972 973 // Map this store to V. 974 (ThisMayAlias ? Stores : NonAliasStores).insert(SU, V); 975 } 976 // The store may have dependencies to unanalyzable loads and 977 // stores. 978 addChainDependencies(SU, Loads, UnknownValue); 979 addChainDependencies(SU, Stores, UnknownValue); 980 } 981 } else { // SU is a load. 982 if (!ObjsFound) { 983 // An unknown load depends on all stores. 984 addChainDependencies(SU, Stores); 985 addChainDependencies(SU, NonAliasStores); 986 987 Loads.insert(SU, UnknownValue); 988 } else { 989 for (const UnderlyingObject &UnderlObj : Objs) { 990 ValueType V = UnderlObj.getValue(); 991 bool ThisMayAlias = UnderlObj.mayAlias(); 992 993 // Add precise dependencies against all previously seen stores 994 // mapping to the same Value(s). 995 addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 996 997 // Map this load to V. 998 (ThisMayAlias ? Loads : NonAliasLoads).insert(SU, V); 999 } 1000 // The load may have dependencies to unanalyzable stores. 1001 addChainDependencies(SU, Stores, UnknownValue); 1002 } 1003 } 1004 1005 // Reduce maps if they grow huge. 1006 if (Stores.size() + Loads.size() >= HugeRegion) { 1007 LLVM_DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";); 1008 reduceHugeMemNodeMaps(Stores, Loads, getReductionSize()); 1009 } 1010 if (NonAliasStores.size() + NonAliasLoads.size() >= HugeRegion) { 1011 LLVM_DEBUG( 1012 dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";); 1013 reduceHugeMemNodeMaps(NonAliasStores, NonAliasLoads, getReductionSize()); 1014 } 1015 } 1016 1017 if (DbgMI) 1018 FirstDbgValue = DbgMI; 1019 1020 Defs.clear(); 1021 Uses.clear(); 1022 CurrentVRegDefs.clear(); 1023 CurrentVRegUses.clear(); 1024 1025 Topo.MarkDirty(); 1026 } 1027 1028 raw_ostream &llvm::operator<<(raw_ostream &OS, const PseudoSourceValue* PSV) { 1029 PSV->printCustom(OS); 1030 return OS; 1031 } 1032 1033 void ScheduleDAGInstrs::Value2SUsMap::dump() { 1034 for (auto &Itr : *this) { 1035 if (Itr.first.is<const Value*>()) { 1036 const Value *V = Itr.first.get<const Value*>(); 1037 if (isa<UndefValue>(V)) 1038 dbgs() << "Unknown"; 1039 else 1040 V->printAsOperand(dbgs()); 1041 } 1042 else if (Itr.first.is<const PseudoSourceValue*>()) 1043 dbgs() << Itr.first.get<const PseudoSourceValue*>(); 1044 else 1045 llvm_unreachable("Unknown Value type."); 1046 1047 dbgs() << " : "; 1048 dumpSUList(Itr.second); 1049 } 1050 } 1051 1052 void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores, 1053 Value2SUsMap &loads, unsigned N) { 1054 LLVM_DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n"; stores.dump(); 1055 dbgs() << "Loading SUnits:\n"; loads.dump()); 1056 1057 // Insert all SU's NodeNums into a vector and sort it. 1058 std::vector<unsigned> NodeNums; 1059 NodeNums.reserve(stores.size() + loads.size()); 1060 for (auto &I : stores) 1061 for (auto *SU : I.second) 1062 NodeNums.push_back(SU->NodeNum); 1063 for (auto &I : loads) 1064 for (auto *SU : I.second) 1065 NodeNums.push_back(SU->NodeNum); 1066 llvm::sort(NodeNums); 1067 1068 // The N last elements in NodeNums will be removed, and the SU with 1069 // the lowest NodeNum of them will become the new BarrierChain to 1070 // let the not yet seen SUs have a dependency to the removed SUs. 1071 assert(N <= NodeNums.size()); 1072 SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)]; 1073 if (BarrierChain) { 1074 // The aliasing and non-aliasing maps reduce independently of each 1075 // other, but share a common BarrierChain. Check if the 1076 // newBarrierChain is above the former one. If it is not, it may 1077 // introduce a loop to use newBarrierChain, so keep the old one. 1078 if (newBarrierChain->NodeNum < BarrierChain->NodeNum) { 1079 BarrierChain->addPredBarrier(newBarrierChain); 1080 BarrierChain = newBarrierChain; 1081 LLVM_DEBUG(dbgs() << "Inserting new barrier chain: SU(" 1082 << BarrierChain->NodeNum << ").\n";); 1083 } 1084 else 1085 LLVM_DEBUG(dbgs() << "Keeping old barrier chain: SU(" 1086 << BarrierChain->NodeNum << ").\n";); 1087 } 1088 else 1089 BarrierChain = newBarrierChain; 1090 1091 insertBarrierChain(stores); 1092 insertBarrierChain(loads); 1093 1094 LLVM_DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n"; stores.dump(); 1095 dbgs() << "Loading SUnits:\n"; loads.dump()); 1096 } 1097 1098 static void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs, 1099 MachineInstr &MI, bool addToLiveRegs) { 1100 for (MachineOperand &MO : MI.operands()) { 1101 if (!MO.isReg() || !MO.readsReg()) 1102 continue; 1103 Register Reg = MO.getReg(); 1104 if (!Reg) 1105 continue; 1106 1107 // Things that are available after the instruction are killed by it. 1108 bool IsKill = LiveRegs.available(MRI, Reg); 1109 MO.setIsKill(IsKill); 1110 if (addToLiveRegs) 1111 LiveRegs.addReg(Reg); 1112 } 1113 } 1114 1115 void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) { 1116 LLVM_DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB) << '\n'); 1117 1118 LiveRegs.init(*TRI); 1119 LiveRegs.addLiveOuts(MBB); 1120 1121 // Examine block from end to start... 1122 for (MachineInstr &MI : make_range(MBB.rbegin(), MBB.rend())) { 1123 if (MI.isDebugOrPseudoInstr()) 1124 continue; 1125 1126 // Update liveness. Registers that are defed but not used in this 1127 // instruction are now dead. Mark register and all subregs as they 1128 // are completely defined. 1129 for (ConstMIBundleOperands O(MI); O.isValid(); ++O) { 1130 const MachineOperand &MO = *O; 1131 if (MO.isReg()) { 1132 if (!MO.isDef()) 1133 continue; 1134 Register Reg = MO.getReg(); 1135 if (!Reg) 1136 continue; 1137 LiveRegs.removeReg(Reg); 1138 } else if (MO.isRegMask()) { 1139 LiveRegs.removeRegsInMask(MO); 1140 } 1141 } 1142 1143 // If there is a bundle header fix it up first. 1144 if (!MI.isBundled()) { 1145 toggleKills(MRI, LiveRegs, MI, true); 1146 } else { 1147 MachineBasicBlock::instr_iterator Bundle = MI.getIterator(); 1148 if (MI.isBundle()) 1149 toggleKills(MRI, LiveRegs, MI, false); 1150 1151 // Some targets make the (questionable) assumtion that the instructions 1152 // inside the bundle are ordered and consequently only the last use of 1153 // a register inside the bundle can kill it. 1154 MachineBasicBlock::instr_iterator I = std::next(Bundle); 1155 while (I->isBundledWithSucc()) 1156 ++I; 1157 do { 1158 if (!I->isDebugOrPseudoInstr()) 1159 toggleKills(MRI, LiveRegs, *I, true); 1160 --I; 1161 } while (I != Bundle); 1162 } 1163 } 1164 } 1165 1166 void ScheduleDAGInstrs::dumpNode(const SUnit &SU) const { 1167 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1168 dumpNodeName(SU); 1169 dbgs() << ": "; 1170 SU.getInstr()->dump(); 1171 #endif 1172 } 1173 1174 void ScheduleDAGInstrs::dump() const { 1175 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1176 if (EntrySU.getInstr() != nullptr) 1177 dumpNodeAll(EntrySU); 1178 for (const SUnit &SU : SUnits) 1179 dumpNodeAll(SU); 1180 if (ExitSU.getInstr() != nullptr) 1181 dumpNodeAll(ExitSU); 1182 #endif 1183 } 1184 1185 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 1186 std::string s; 1187 raw_string_ostream oss(s); 1188 if (SU == &EntrySU) 1189 oss << "<entry>"; 1190 else if (SU == &ExitSU) 1191 oss << "<exit>"; 1192 else 1193 SU->getInstr()->print(oss, /*IsStandalone=*/true); 1194 return oss.str(); 1195 } 1196 1197 /// Return the basic block label. It is not necessarilly unique because a block 1198 /// contains multiple scheduling regions. But it is fine for visualization. 1199 std::string ScheduleDAGInstrs::getDAGName() const { 1200 return "dag." + BB->getFullName(); 1201 } 1202 1203 bool ScheduleDAGInstrs::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 1204 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 1205 } 1206 1207 bool ScheduleDAGInstrs::addEdge(SUnit *SuccSU, const SDep &PredDep) { 1208 if (SuccSU != &ExitSU) { 1209 // Do not use WillCreateCycle, it assumes SD scheduling. 1210 // If Pred is reachable from Succ, then the edge creates a cycle. 1211 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 1212 return false; 1213 Topo.AddPredQueued(SuccSU, PredDep.getSUnit()); 1214 } 1215 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 1216 // Return true regardless of whether a new edge needed to be inserted. 1217 return true; 1218 } 1219 1220 //===----------------------------------------------------------------------===// 1221 // SchedDFSResult Implementation 1222 //===----------------------------------------------------------------------===// 1223 1224 namespace llvm { 1225 1226 /// Internal state used to compute SchedDFSResult. 1227 class SchedDFSImpl { 1228 SchedDFSResult &R; 1229 1230 /// Join DAG nodes into equivalence classes by their subtree. 1231 IntEqClasses SubtreeClasses; 1232 /// List PredSU, SuccSU pairs that represent data edges between subtrees. 1233 std::vector<std::pair<const SUnit *, const SUnit*>> ConnectionPairs; 1234 1235 struct RootData { 1236 unsigned NodeID; 1237 unsigned ParentNodeID; ///< Parent node (member of the parent subtree). 1238 unsigned SubInstrCount = 0; ///< Instr count in this tree only, not 1239 /// children. 1240 1241 RootData(unsigned id): NodeID(id), 1242 ParentNodeID(SchedDFSResult::InvalidSubtreeID) {} 1243 1244 unsigned getSparseSetIndex() const { return NodeID; } 1245 }; 1246 1247 SparseSet<RootData> RootSet; 1248 1249 public: 1250 SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) { 1251 RootSet.setUniverse(R.DFSNodeData.size()); 1252 } 1253 1254 /// Returns true if this node been visited by the DFS traversal. 1255 /// 1256 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node 1257 /// ID. Later, SubtreeID is updated but remains valid. 1258 bool isVisited(const SUnit *SU) const { 1259 return R.DFSNodeData[SU->NodeNum].SubtreeID 1260 != SchedDFSResult::InvalidSubtreeID; 1261 } 1262 1263 /// Initializes this node's instruction count. We don't need to flag the node 1264 /// visited until visitPostorder because the DAG cannot have cycles. 1265 void visitPreorder(const SUnit *SU) { 1266 R.DFSNodeData[SU->NodeNum].InstrCount = 1267 SU->getInstr()->isTransient() ? 0 : 1; 1268 } 1269 1270 /// Called once for each node after all predecessors are visited. Revisit this 1271 /// node's predecessors and potentially join them now that we know the ILP of 1272 /// the other predecessors. 1273 void visitPostorderNode(const SUnit *SU) { 1274 // Mark this node as the root of a subtree. It may be joined with its 1275 // successors later. 1276 R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum; 1277 RootData RData(SU->NodeNum); 1278 RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1; 1279 1280 // If any predecessors are still in their own subtree, they either cannot be 1281 // joined or are large enough to remain separate. If this parent node's 1282 // total instruction count is not greater than a child subtree by at least 1283 // the subtree limit, then try to join it now since splitting subtrees is 1284 // only useful if multiple high-pressure paths are possible. 1285 unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount; 1286 for (const SDep &PredDep : SU->Preds) { 1287 if (PredDep.getKind() != SDep::Data) 1288 continue; 1289 unsigned PredNum = PredDep.getSUnit()->NodeNum; 1290 if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit) 1291 joinPredSubtree(PredDep, SU, /*CheckLimit=*/false); 1292 1293 // Either link or merge the TreeData entry from the child to the parent. 1294 if (R.DFSNodeData[PredNum].SubtreeID == PredNum) { 1295 // If the predecessor's parent is invalid, this is a tree edge and the 1296 // current node is the parent. 1297 if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID) 1298 RootSet[PredNum].ParentNodeID = SU->NodeNum; 1299 } 1300 else if (RootSet.count(PredNum)) { 1301 // The predecessor is not a root, but is still in the root set. This 1302 // must be the new parent that it was just joined to. Note that 1303 // RootSet[PredNum].ParentNodeID may either be invalid or may still be 1304 // set to the original parent. 1305 RData.SubInstrCount += RootSet[PredNum].SubInstrCount; 1306 RootSet.erase(PredNum); 1307 } 1308 } 1309 RootSet[SU->NodeNum] = RData; 1310 } 1311 1312 /// Called once for each tree edge after calling visitPostOrderNode on 1313 /// the predecessor. Increment the parent node's instruction count and 1314 /// preemptively join this subtree to its parent's if it is small enough. 1315 void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { 1316 R.DFSNodeData[Succ->NodeNum].InstrCount 1317 += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount; 1318 joinPredSubtree(PredDep, Succ); 1319 } 1320 1321 /// Adds a connection for cross edges. 1322 void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) { 1323 ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ)); 1324 } 1325 1326 /// Sets each node's subtree ID to the representative ID and record 1327 /// connections between trees. 1328 void finalize() { 1329 SubtreeClasses.compress(); 1330 R.DFSTreeData.resize(SubtreeClasses.getNumClasses()); 1331 assert(SubtreeClasses.getNumClasses() == RootSet.size() 1332 && "number of roots should match trees"); 1333 for (const RootData &Root : RootSet) { 1334 unsigned TreeID = SubtreeClasses[Root.NodeID]; 1335 if (Root.ParentNodeID != SchedDFSResult::InvalidSubtreeID) 1336 R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[Root.ParentNodeID]; 1337 R.DFSTreeData[TreeID].SubInstrCount = Root.SubInstrCount; 1338 // Note that SubInstrCount may be greater than InstrCount if we joined 1339 // subtrees across a cross edge. InstrCount will be attributed to the 1340 // original parent, while SubInstrCount will be attributed to the joined 1341 // parent. 1342 } 1343 R.SubtreeConnections.resize(SubtreeClasses.getNumClasses()); 1344 R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses()); 1345 LLVM_DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n"); 1346 for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) { 1347 R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx]; 1348 LLVM_DEBUG(dbgs() << " SU(" << Idx << ") in tree " 1349 << R.DFSNodeData[Idx].SubtreeID << '\n'); 1350 } 1351 for (const std::pair<const SUnit*, const SUnit*> &P : ConnectionPairs) { 1352 unsigned PredTree = SubtreeClasses[P.first->NodeNum]; 1353 unsigned SuccTree = SubtreeClasses[P.second->NodeNum]; 1354 if (PredTree == SuccTree) 1355 continue; 1356 unsigned Depth = P.first->getDepth(); 1357 addConnection(PredTree, SuccTree, Depth); 1358 addConnection(SuccTree, PredTree, Depth); 1359 } 1360 } 1361 1362 protected: 1363 /// Joins the predecessor subtree with the successor that is its DFS parent. 1364 /// Applies some heuristics before joining. 1365 bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ, 1366 bool CheckLimit = true) { 1367 assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges"); 1368 1369 // Check if the predecessor is already joined. 1370 const SUnit *PredSU = PredDep.getSUnit(); 1371 unsigned PredNum = PredSU->NodeNum; 1372 if (R.DFSNodeData[PredNum].SubtreeID != PredNum) 1373 return false; 1374 1375 // Four is the magic number of successors before a node is considered a 1376 // pinch point. 1377 unsigned NumDataSucs = 0; 1378 for (const SDep &SuccDep : PredSU->Succs) { 1379 if (SuccDep.getKind() == SDep::Data) { 1380 if (++NumDataSucs >= 4) 1381 return false; 1382 } 1383 } 1384 if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit) 1385 return false; 1386 R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum; 1387 SubtreeClasses.join(Succ->NodeNum, PredNum); 1388 return true; 1389 } 1390 1391 /// Called by finalize() to record a connection between trees. 1392 void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) { 1393 if (!Depth) 1394 return; 1395 1396 do { 1397 SmallVectorImpl<SchedDFSResult::Connection> &Connections = 1398 R.SubtreeConnections[FromTree]; 1399 for (SchedDFSResult::Connection &C : Connections) { 1400 if (C.TreeID == ToTree) { 1401 C.Level = std::max(C.Level, Depth); 1402 return; 1403 } 1404 } 1405 Connections.push_back(SchedDFSResult::Connection(ToTree, Depth)); 1406 FromTree = R.DFSTreeData[FromTree].ParentTreeID; 1407 } while (FromTree != SchedDFSResult::InvalidSubtreeID); 1408 } 1409 }; 1410 1411 } // end namespace llvm 1412 1413 namespace { 1414 1415 /// Manage the stack used by a reverse depth-first search over the DAG. 1416 class SchedDAGReverseDFS { 1417 std::vector<std::pair<const SUnit *, SUnit::const_pred_iterator>> DFSStack; 1418 1419 public: 1420 bool isComplete() const { return DFSStack.empty(); } 1421 1422 void follow(const SUnit *SU) { 1423 DFSStack.push_back(std::make_pair(SU, SU->Preds.begin())); 1424 } 1425 void advance() { ++DFSStack.back().second; } 1426 1427 const SDep *backtrack() { 1428 DFSStack.pop_back(); 1429 return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second); 1430 } 1431 1432 const SUnit *getCurr() const { return DFSStack.back().first; } 1433 1434 SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; } 1435 1436 SUnit::const_pred_iterator getPredEnd() const { 1437 return getCurr()->Preds.end(); 1438 } 1439 }; 1440 1441 } // end anonymous namespace 1442 1443 static bool hasDataSucc(const SUnit *SU) { 1444 for (const SDep &SuccDep : SU->Succs) { 1445 if (SuccDep.getKind() == SDep::Data && 1446 !SuccDep.getSUnit()->isBoundaryNode()) 1447 return true; 1448 } 1449 return false; 1450 } 1451 1452 /// Computes an ILP metric for all nodes in the subDAG reachable via depth-first 1453 /// search from this root. 1454 void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) { 1455 if (!IsBottomUp) 1456 llvm_unreachable("Top-down ILP metric is unimplemented"); 1457 1458 SchedDFSImpl Impl(*this); 1459 for (const SUnit &SU : SUnits) { 1460 if (Impl.isVisited(&SU) || hasDataSucc(&SU)) 1461 continue; 1462 1463 SchedDAGReverseDFS DFS; 1464 Impl.visitPreorder(&SU); 1465 DFS.follow(&SU); 1466 while (true) { 1467 // Traverse the leftmost path as far as possible. 1468 while (DFS.getPred() != DFS.getPredEnd()) { 1469 const SDep &PredDep = *DFS.getPred(); 1470 DFS.advance(); 1471 // Ignore non-data edges. 1472 if (PredDep.getKind() != SDep::Data 1473 || PredDep.getSUnit()->isBoundaryNode()) { 1474 continue; 1475 } 1476 // An already visited edge is a cross edge, assuming an acyclic DAG. 1477 if (Impl.isVisited(PredDep.getSUnit())) { 1478 Impl.visitCrossEdge(PredDep, DFS.getCurr()); 1479 continue; 1480 } 1481 Impl.visitPreorder(PredDep.getSUnit()); 1482 DFS.follow(PredDep.getSUnit()); 1483 } 1484 // Visit the top of the stack in postorder and backtrack. 1485 const SUnit *Child = DFS.getCurr(); 1486 const SDep *PredDep = DFS.backtrack(); 1487 Impl.visitPostorderNode(Child); 1488 if (PredDep) 1489 Impl.visitPostorderEdge(*PredDep, DFS.getCurr()); 1490 if (DFS.isComplete()) 1491 break; 1492 } 1493 } 1494 Impl.finalize(); 1495 } 1496 1497 /// The root of the given SubtreeID was just scheduled. For all subtrees 1498 /// connected to this tree, record the depth of the connection so that the 1499 /// nearest connected subtrees can be prioritized. 1500 void SchedDFSResult::scheduleTree(unsigned SubtreeID) { 1501 for (const Connection &C : SubtreeConnections[SubtreeID]) { 1502 SubtreeConnectLevels[C.TreeID] = 1503 std::max(SubtreeConnectLevels[C.TreeID], C.Level); 1504 LLVM_DEBUG(dbgs() << " Tree: " << C.TreeID << " @" 1505 << SubtreeConnectLevels[C.TreeID] << '\n'); 1506 } 1507 } 1508 1509 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1510 LLVM_DUMP_METHOD void ILPValue::print(raw_ostream &OS) const { 1511 OS << InstrCount << " / " << Length << " = "; 1512 if (!Length) 1513 OS << "BADILP"; 1514 else 1515 OS << format("%g", ((double)InstrCount / Length)); 1516 } 1517 1518 LLVM_DUMP_METHOD void ILPValue::dump() const { 1519 dbgs() << *this << '\n'; 1520 } 1521 1522 namespace llvm { 1523 1524 LLVM_DUMP_METHOD 1525 raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) { 1526 Val.print(OS); 1527 return OS; 1528 } 1529 1530 } // end namespace llvm 1531 1532 #endif 1533