1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file This implements the ScheduleDAGInstrs class, which implements 11 /// re-scheduling of MachineInstrs. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 16 #include "llvm/ADT/IntEqClasses.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/ValueTracking.h" 21 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 22 #include "llvm/CodeGen/MachineFunctionPass.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineMemOperand.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/PseudoSourceValue.h" 28 #include "llvm/CodeGen/RegisterPressure.h" 29 #include "llvm/CodeGen/ScheduleDFS.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/Type.h" 32 #include "llvm/IR/Operator.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/Format.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include "llvm/Target/TargetInstrInfo.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/Target/TargetRegisterInfo.h" 40 #include "llvm/Target/TargetSubtargetInfo.h" 41 42 using namespace llvm; 43 44 #define DEBUG_TYPE "misched" 45 46 static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden, 47 cl::ZeroOrMore, cl::init(false), 48 cl::desc("Enable use of AA during MI DAG construction")); 49 50 static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, 51 cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction")); 52 53 // Note: the two options below might be used in tuning compile time vs 54 // output quality. Setting HugeRegion so large that it will never be 55 // reached means best-effort, but may be slow. 56 57 // When Stores and Loads maps (or NonAliasStores and NonAliasLoads) 58 // together hold this many SUs, a reduction of maps will be done. 59 static cl::opt<unsigned> HugeRegion("dag-maps-huge-region", cl::Hidden, 60 cl::init(1000), cl::desc("The limit to use while constructing the DAG " 61 "prior to scheduling, at which point a trade-off " 62 "is made to avoid excessive compile time.")); 63 64 static cl::opt<unsigned> ReductionSize( 65 "dag-maps-reduction-size", cl::Hidden, 66 cl::desc("A huge scheduling region will have maps reduced by this many " 67 "nodes at a time. Defaults to HugeRegion / 2.")); 68 69 static unsigned getReductionSize() { 70 // Always reduce a huge region with half of the elements, except 71 // when user sets this number explicitly. 72 if (ReductionSize.getNumOccurrences() == 0) 73 return HugeRegion / 2; 74 return ReductionSize; 75 } 76 77 static void dumpSUList(ScheduleDAGInstrs::SUList &L) { 78 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 79 dbgs() << "{ "; 80 for (const SUnit *su : L) { 81 dbgs() << "SU(" << su->NodeNum << ")"; 82 if (su != L.back()) 83 dbgs() << ", "; 84 } 85 dbgs() << "}\n"; 86 #endif 87 } 88 89 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 90 const MachineLoopInfo *mli, 91 bool RemoveKillFlags) 92 : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), 93 RemoveKillFlags(RemoveKillFlags), CanHandleTerminators(false), 94 TrackLaneMasks(false), AAForDep(nullptr), BarrierChain(nullptr), 95 UnknownValue(UndefValue::get( 96 Type::getVoidTy(mf.getFunction()->getContext()))), 97 FirstDbgValue(nullptr) { 98 DbgValues.clear(); 99 100 const TargetSubtargetInfo &ST = mf.getSubtarget(); 101 SchedModel.init(ST.getSchedModel(), &ST, TII); 102 } 103 104 /// This is the function that does the work of looking through basic 105 /// ptrtoint+arithmetic+inttoptr sequences. 106 static const Value *getUnderlyingObjectFromInt(const Value *V) { 107 do { 108 if (const Operator *U = dyn_cast<Operator>(V)) { 109 // If we find a ptrtoint, we can transfer control back to the 110 // regular getUnderlyingObjectFromInt. 111 if (U->getOpcode() == Instruction::PtrToInt) 112 return U->getOperand(0); 113 // If we find an add of a constant, a multiplied value, or a phi, it's 114 // likely that the other operand will lead us to the base 115 // object. We don't have to worry about the case where the 116 // object address is somehow being computed by the multiply, 117 // because our callers only care when the result is an 118 // identifiable object. 119 if (U->getOpcode() != Instruction::Add || 120 (!isa<ConstantInt>(U->getOperand(1)) && 121 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 122 !isa<PHINode>(U->getOperand(1)))) 123 return V; 124 V = U->getOperand(0); 125 } else { 126 return V; 127 } 128 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 129 } while (1); 130 } 131 132 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 133 /// ptrtoint+arithmetic+inttoptr sequences. 134 static void getUnderlyingObjects(const Value *V, 135 SmallVectorImpl<Value *> &Objects, 136 const DataLayout &DL) { 137 SmallPtrSet<const Value *, 16> Visited; 138 SmallVector<const Value *, 4> Working(1, V); 139 do { 140 V = Working.pop_back_val(); 141 142 SmallVector<Value *, 4> Objs; 143 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 144 145 for (Value *V : Objs) { 146 if (!Visited.insert(V).second) 147 continue; 148 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 149 const Value *O = 150 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 151 if (O->getType()->isPointerTy()) { 152 Working.push_back(O); 153 continue; 154 } 155 } 156 Objects.push_back(const_cast<Value *>(V)); 157 } 158 } while (!Working.empty()); 159 } 160 161 /// If this machine instr has memory reference information and it can be tracked 162 /// to a normal reference to a known object, return the Value for that object. 163 static void getUnderlyingObjectsForInstr(const MachineInstr *MI, 164 const MachineFrameInfo &MFI, 165 UnderlyingObjectsVector &Objects, 166 const DataLayout &DL) { 167 auto allMMOsOkay = [&]() { 168 for (const MachineMemOperand *MMO : MI->memoperands()) { 169 if (MMO->isVolatile()) 170 return false; 171 172 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) { 173 // Function that contain tail calls don't have unique PseudoSourceValue 174 // objects. Two PseudoSourceValues might refer to the same or 175 // overlapping locations. The client code calling this function assumes 176 // this is not the case. So return a conservative answer of no known 177 // object. 178 if (MFI.hasTailCall()) 179 return false; 180 181 // For now, ignore PseudoSourceValues which may alias LLVM IR values 182 // because the code that uses this function has no way to cope with 183 // such aliases. 184 if (PSV->isAliased(&MFI)) 185 return false; 186 187 bool MayAlias = PSV->mayAlias(&MFI); 188 Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias)); 189 } else if (const Value *V = MMO->getValue()) { 190 SmallVector<Value *, 4> Objs; 191 getUnderlyingObjects(V, Objs, DL); 192 193 for (Value *V : Objs) { 194 if (!isIdentifiedObject(V)) 195 return false; 196 197 Objects.push_back(UnderlyingObjectsVector::value_type(V, true)); 198 } 199 } else 200 return false; 201 } 202 return true; 203 }; 204 205 if (!allMMOsOkay()) 206 Objects.clear(); 207 } 208 209 void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) { 210 BB = bb; 211 } 212 213 void ScheduleDAGInstrs::finishBlock() { 214 // Subclasses should no longer refer to the old block. 215 BB = nullptr; 216 } 217 218 void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, 219 MachineBasicBlock::iterator begin, 220 MachineBasicBlock::iterator end, 221 unsigned regioninstrs) { 222 assert(bb == BB && "startBlock should set BB"); 223 RegionBegin = begin; 224 RegionEnd = end; 225 NumRegionInstrs = regioninstrs; 226 } 227 228 void ScheduleDAGInstrs::exitRegion() { 229 // Nothing to do. 230 } 231 232 void ScheduleDAGInstrs::addSchedBarrierDeps() { 233 MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : nullptr; 234 ExitSU.setInstr(ExitMI); 235 // Add dependencies on the defs and uses of the instruction. 236 if (ExitMI) { 237 for (const MachineOperand &MO : ExitMI->operands()) { 238 if (!MO.isReg() || MO.isDef()) continue; 239 unsigned Reg = MO.getReg(); 240 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 241 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg)); 242 } else if (TargetRegisterInfo::isVirtualRegister(Reg) && MO.readsReg()) { 243 addVRegUseDeps(&ExitSU, ExitMI->getOperandNo(&MO)); 244 } 245 } 246 } 247 if (!ExitMI || (!ExitMI->isCall() && !ExitMI->isBarrier())) { 248 // For others, e.g. fallthrough, conditional branch, assume the exit 249 // uses all the registers that are livein to the successor blocks. 250 for (const MachineBasicBlock *Succ : BB->successors()) { 251 for (const auto &LI : Succ->liveins()) { 252 if (!Uses.contains(LI.PhysReg)) 253 Uses.insert(PhysRegSUOper(&ExitSU, -1, LI.PhysReg)); 254 } 255 } 256 } 257 } 258 259 /// MO is an operand of SU's instruction that defines a physical register. Adds 260 /// data dependencies from SU to any uses of the physical register. 261 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { 262 const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx); 263 assert(MO.isDef() && "expect physreg def"); 264 265 // Ask the target if address-backscheduling is desirable, and if so how much. 266 const TargetSubtargetInfo &ST = MF.getSubtarget(); 267 268 for (MCRegAliasIterator Alias(MO.getReg(), TRI, true); 269 Alias.isValid(); ++Alias) { 270 if (!Uses.contains(*Alias)) 271 continue; 272 for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) { 273 SUnit *UseSU = I->SU; 274 if (UseSU == SU) 275 continue; 276 277 // Adjust the dependence latency using operand def/use information, 278 // then allow the target to perform its own adjustments. 279 int UseOp = I->OpIdx; 280 MachineInstr *RegUse = nullptr; 281 SDep Dep; 282 if (UseOp < 0) 283 Dep = SDep(SU, SDep::Artificial); 284 else { 285 // Set the hasPhysRegDefs only for physreg defs that have a use within 286 // the scheduling region. 287 SU->hasPhysRegDefs = true; 288 Dep = SDep(SU, SDep::Data, *Alias); 289 RegUse = UseSU->getInstr(); 290 } 291 Dep.setLatency( 292 SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, RegUse, 293 UseOp)); 294 295 ST.adjustSchedDependency(SU, UseSU, Dep); 296 UseSU->addPred(Dep); 297 } 298 } 299 } 300 301 /// \brief Adds register dependencies (data, anti, and output) from this SUnit 302 /// to following instructions in the same scheduling region that depend the 303 /// physical register referenced at OperIdx. 304 void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { 305 MachineInstr *MI = SU->getInstr(); 306 MachineOperand &MO = MI->getOperand(OperIdx); 307 unsigned Reg = MO.getReg(); 308 // We do not need to track any dependencies for constant registers. 309 if (MRI.isConstantPhysReg(Reg)) 310 return; 311 312 // Optionally add output and anti dependencies. For anti 313 // dependencies we use a latency of 0 because for a multi-issue 314 // target we want to allow the defining instruction to issue 315 // in the same cycle as the using instruction. 316 // TODO: Using a latency of 1 here for output dependencies assumes 317 // there's no cost for reusing registers. 318 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 319 for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) { 320 if (!Defs.contains(*Alias)) 321 continue; 322 for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) { 323 SUnit *DefSU = I->SU; 324 if (DefSU == &ExitSU) 325 continue; 326 if (DefSU != SU && 327 (Kind != SDep::Output || !MO.isDead() || 328 !DefSU->getInstr()->registerDefIsDead(*Alias))) { 329 if (Kind == SDep::Anti) 330 DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias)); 331 else { 332 SDep Dep(SU, Kind, /*Reg=*/*Alias); 333 Dep.setLatency( 334 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 335 DefSU->addPred(Dep); 336 } 337 } 338 } 339 } 340 341 if (!MO.isDef()) { 342 SU->hasPhysRegUses = true; 343 // Either insert a new Reg2SUnits entry with an empty SUnits list, or 344 // retrieve the existing SUnits list for this register's uses. 345 // Push this SUnit on the use list. 346 Uses.insert(PhysRegSUOper(SU, OperIdx, Reg)); 347 if (RemoveKillFlags) 348 MO.setIsKill(false); 349 } else { 350 addPhysRegDataDeps(SU, OperIdx); 351 352 // clear this register's use list 353 if (Uses.contains(Reg)) 354 Uses.eraseAll(Reg); 355 356 if (!MO.isDead()) { 357 Defs.eraseAll(Reg); 358 } else if (SU->isCall) { 359 // Calls will not be reordered because of chain dependencies (see 360 // below). Since call operands are dead, calls may continue to be added 361 // to the DefList making dependence checking quadratic in the size of 362 // the block. Instead, we leave only one call at the back of the 363 // DefList. 364 Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg); 365 Reg2SUnitsMap::iterator B = P.first; 366 Reg2SUnitsMap::iterator I = P.second; 367 for (bool isBegin = I == B; !isBegin; /* empty */) { 368 isBegin = (--I) == B; 369 if (!I->SU->isCall) 370 break; 371 I = Defs.erase(I); 372 } 373 } 374 375 // Defs are pushed in the order they are visited and never reordered. 376 Defs.insert(PhysRegSUOper(SU, OperIdx, Reg)); 377 } 378 } 379 380 LaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const 381 { 382 unsigned Reg = MO.getReg(); 383 // No point in tracking lanemasks if we don't have interesting subregisters. 384 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 385 if (!RC.HasDisjunctSubRegs) 386 return LaneBitmask::getAll(); 387 388 unsigned SubReg = MO.getSubReg(); 389 if (SubReg == 0) 390 return RC.getLaneMask(); 391 return TRI->getSubRegIndexLaneMask(SubReg); 392 } 393 394 /// Adds register output and data dependencies from this SUnit to instructions 395 /// that occur later in the same scheduling region if they read from or write to 396 /// the virtual register defined at OperIdx. 397 /// 398 /// TODO: Hoist loop induction variable increments. This has to be 399 /// reevaluated. Generally, IV scheduling should be done before coalescing. 400 void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { 401 MachineInstr *MI = SU->getInstr(); 402 MachineOperand &MO = MI->getOperand(OperIdx); 403 unsigned Reg = MO.getReg(); 404 405 LaneBitmask DefLaneMask; 406 LaneBitmask KillLaneMask; 407 if (TrackLaneMasks) { 408 bool IsKill = MO.getSubReg() == 0 || MO.isUndef(); 409 DefLaneMask = getLaneMaskForMO(MO); 410 // If we have a <read-undef> flag, none of the lane values comes from an 411 // earlier instruction. 412 KillLaneMask = IsKill ? LaneBitmask::getAll() : DefLaneMask; 413 414 // Clear undef flag, we'll re-add it later once we know which subregister 415 // Def is first. 416 MO.setIsUndef(false); 417 } else { 418 DefLaneMask = LaneBitmask::getAll(); 419 KillLaneMask = LaneBitmask::getAll(); 420 } 421 422 if (MO.isDead()) { 423 assert(CurrentVRegUses.find(Reg) == CurrentVRegUses.end() && 424 "Dead defs should have no uses"); 425 } else { 426 // Add data dependence to all uses we found so far. 427 const TargetSubtargetInfo &ST = MF.getSubtarget(); 428 for (VReg2SUnitOperIdxMultiMap::iterator I = CurrentVRegUses.find(Reg), 429 E = CurrentVRegUses.end(); I != E; /*empty*/) { 430 LaneBitmask LaneMask = I->LaneMask; 431 // Ignore uses of other lanes. 432 if ((LaneMask & KillLaneMask).none()) { 433 ++I; 434 continue; 435 } 436 437 if ((LaneMask & DefLaneMask).any()) { 438 SUnit *UseSU = I->SU; 439 MachineInstr *Use = UseSU->getInstr(); 440 SDep Dep(SU, SDep::Data, Reg); 441 Dep.setLatency(SchedModel.computeOperandLatency(MI, OperIdx, Use, 442 I->OperandIndex)); 443 ST.adjustSchedDependency(SU, UseSU, Dep); 444 UseSU->addPred(Dep); 445 } 446 447 LaneMask &= ~KillLaneMask; 448 // If we found a Def for all lanes of this use, remove it from the list. 449 if (LaneMask.any()) { 450 I->LaneMask = LaneMask; 451 ++I; 452 } else 453 I = CurrentVRegUses.erase(I); 454 } 455 } 456 457 // Shortcut: Singly defined vregs do not have output/anti dependencies. 458 if (MRI.hasOneDef(Reg)) 459 return; 460 461 // Add output dependence to the next nearest defs of this vreg. 462 // 463 // Unless this definition is dead, the output dependence should be 464 // transitively redundant with antidependencies from this definition's 465 // uses. We're conservative for now until we have a way to guarantee the uses 466 // are not eliminated sometime during scheduling. The output dependence edge 467 // is also useful if output latency exceeds def-use latency. 468 LaneBitmask LaneMask = DefLaneMask; 469 for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 470 CurrentVRegDefs.end())) { 471 // Ignore defs for other lanes. 472 if ((V2SU.LaneMask & LaneMask).none()) 473 continue; 474 // Add an output dependence. 475 SUnit *DefSU = V2SU.SU; 476 // Ignore additional defs of the same lanes in one instruction. This can 477 // happen because lanemasks are shared for targets with too many 478 // subregisters. We also use some representration tricks/hacks where we 479 // add super-register defs/uses, to imply that although we only access parts 480 // of the reg we care about the full one. 481 if (DefSU == SU) 482 continue; 483 SDep Dep(SU, SDep::Output, Reg); 484 Dep.setLatency( 485 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 486 DefSU->addPred(Dep); 487 488 // Update current definition. This can get tricky if the def was about a 489 // bigger lanemask before. We then have to shrink it and create a new 490 // VReg2SUnit for the non-overlapping part. 491 LaneBitmask OverlapMask = V2SU.LaneMask & LaneMask; 492 LaneBitmask NonOverlapMask = V2SU.LaneMask & ~LaneMask; 493 V2SU.SU = SU; 494 V2SU.LaneMask = OverlapMask; 495 if (NonOverlapMask.any()) 496 CurrentVRegDefs.insert(VReg2SUnit(Reg, NonOverlapMask, DefSU)); 497 } 498 // If there was no CurrentVRegDefs entry for some lanes yet, create one. 499 if (LaneMask.any()) 500 CurrentVRegDefs.insert(VReg2SUnit(Reg, LaneMask, SU)); 501 } 502 503 /// \brief Adds a register data dependency if the instruction that defines the 504 /// virtual register used at OperIdx is mapped to an SUnit. Add a register 505 /// antidependency from this SUnit to instructions that occur later in the same 506 /// scheduling region if they write the virtual register. 507 /// 508 /// TODO: Handle ExitSU "uses" properly. 509 void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { 510 const MachineInstr *MI = SU->getInstr(); 511 const MachineOperand &MO = MI->getOperand(OperIdx); 512 unsigned Reg = MO.getReg(); 513 514 // Remember the use. Data dependencies will be added when we find the def. 515 LaneBitmask LaneMask = TrackLaneMasks ? getLaneMaskForMO(MO) 516 : LaneBitmask::getAll(); 517 CurrentVRegUses.insert(VReg2SUnitOperIdx(Reg, LaneMask, OperIdx, SU)); 518 519 // Add antidependences to the following defs of the vreg. 520 for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 521 CurrentVRegDefs.end())) { 522 // Ignore defs for unrelated lanes. 523 LaneBitmask PrevDefLaneMask = V2SU.LaneMask; 524 if ((PrevDefLaneMask & LaneMask).none()) 525 continue; 526 if (V2SU.SU == SU) 527 continue; 528 529 V2SU.SU->addPred(SDep(SU, SDep::Anti, Reg)); 530 } 531 } 532 533 /// Returns true if MI is an instruction we are unable to reason about 534 /// (like a call or something with unmodeled side effects). 535 static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) { 536 return MI->isCall() || MI->hasUnmodeledSideEffects() || 537 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad(AA)); 538 } 539 540 /// Returns true if the two MIs need a chain edge between them. 541 /// This is called on normal stores and loads. 542 static bool MIsNeedChainEdge(AliasAnalysis *AA, MachineInstr *MIa, 543 MachineInstr *MIb) { 544 const MachineFunction *MF = MIa->getParent()->getParent(); 545 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 546 547 assert ((MIa->mayStore() || MIb->mayStore()) && 548 "Dependency checked between two loads"); 549 550 // Let the target decide if memory accesses cannot possibly overlap. 551 if (TII->areMemAccessesTriviallyDisjoint(*MIa, *MIb, AA)) 552 return false; 553 554 // To this point analysis is generic. From here on we do need AA. 555 if (!AA) 556 return true; 557 558 // FIXME: Need to handle multiple memory operands to support all targets. 559 if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand()) 560 return true; 561 562 MachineMemOperand *MMOa = *MIa->memoperands_begin(); 563 MachineMemOperand *MMOb = *MIb->memoperands_begin(); 564 565 if (!MMOa->getValue() || !MMOb->getValue()) 566 return true; 567 568 // The following interface to AA is fashioned after DAGCombiner::isAlias 569 // and operates with MachineMemOperand offset with some important 570 // assumptions: 571 // - LLVM fundamentally assumes flat address spaces. 572 // - MachineOperand offset can *only* result from legalization and 573 // cannot affect queries other than the trivial case of overlap 574 // checking. 575 // - These offsets never wrap and never step outside 576 // of allocated objects. 577 // - There should never be any negative offsets here. 578 // 579 // FIXME: Modify API to hide this math from "user" 580 // FIXME: Even before we go to AA we can reason locally about some 581 // memory objects. It can save compile time, and possibly catch some 582 // corner cases not currently covered. 583 584 assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset"); 585 assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset"); 586 587 int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset()); 588 int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset; 589 int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset; 590 591 AliasResult AAResult = 592 AA->alias(MemoryLocation(MMOa->getValue(), Overlapa, 593 UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), 594 MemoryLocation(MMOb->getValue(), Overlapb, 595 UseTBAA ? MMOb->getAAInfo() : AAMDNodes())); 596 597 return (AAResult != NoAlias); 598 } 599 600 void ScheduleDAGInstrs::addChainDependency (SUnit *SUa, SUnit *SUb, 601 unsigned Latency) { 602 if (MIsNeedChainEdge(AAForDep, SUa->getInstr(), SUb->getInstr())) { 603 SDep Dep(SUa, SDep::MayAliasMem); 604 Dep.setLatency(Latency); 605 SUb->addPred(Dep); 606 } 607 } 608 609 /// \brief Creates an SUnit for each real instruction, numbered in top-down 610 /// topological order. The instruction order A < B, implies that no edge exists 611 /// from B to A. 612 /// 613 /// Map each real instruction to its SUnit. 614 /// 615 /// After initSUnits, the SUnits vector cannot be resized and the scheduler may 616 /// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs 617 /// instead of pointers. 618 /// 619 /// MachineScheduler relies on initSUnits numbering the nodes by their order in 620 /// the original instruction list. 621 void ScheduleDAGInstrs::initSUnits() { 622 // We'll be allocating one SUnit for each real instruction in the region, 623 // which is contained within a basic block. 624 SUnits.reserve(NumRegionInstrs); 625 626 for (MachineInstr &MI : llvm::make_range(RegionBegin, RegionEnd)) { 627 if (MI.isDebugValue()) 628 continue; 629 630 SUnit *SU = newSUnit(&MI); 631 MISUnitMap[&MI] = SU; 632 633 SU->isCall = MI.isCall(); 634 SU->isCommutable = MI.isCommutable(); 635 636 // Assign the Latency field of SU using target-provided information. 637 SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); 638 639 // If this SUnit uses a reserved or unbuffered resource, mark it as such. 640 // 641 // Reserved resources block an instruction from issuing and stall the 642 // entire pipeline. These are identified by BufferSize=0. 643 // 644 // Unbuffered resources prevent execution of subsequent instructions that 645 // require the same resources. This is used for in-order execution pipelines 646 // within an out-of-order core. These are identified by BufferSize=1. 647 if (SchedModel.hasInstrSchedModel()) { 648 const MCSchedClassDesc *SC = getSchedClass(SU); 649 for (const MCWriteProcResEntry &PRE : 650 make_range(SchedModel.getWriteProcResBegin(SC), 651 SchedModel.getWriteProcResEnd(SC))) { 652 switch (SchedModel.getProcResource(PRE.ProcResourceIdx)->BufferSize) { 653 case 0: 654 SU->hasReservedResource = true; 655 break; 656 case 1: 657 SU->isUnbuffered = true; 658 break; 659 default: 660 break; 661 } 662 } 663 } 664 } 665 } 666 667 class ScheduleDAGInstrs::Value2SUsMap : public MapVector<ValueType, SUList> { 668 /// Current total number of SUs in map. 669 unsigned NumNodes; 670 671 /// 1 for loads, 0 for stores. (see comment in SUList) 672 unsigned TrueMemOrderLatency; 673 674 public: 675 Value2SUsMap(unsigned lat = 0) : NumNodes(0), TrueMemOrderLatency(lat) {} 676 677 /// To keep NumNodes up to date, insert() is used instead of 678 /// this operator w/ push_back(). 679 ValueType &operator[](const SUList &Key) { 680 llvm_unreachable("Don't use. Use insert() instead."); }; 681 682 /// Adds SU to the SUList of V. If Map grows huge, reduce its size by calling 683 /// reduce(). 684 void inline insert(SUnit *SU, ValueType V) { 685 MapVector::operator[](V).push_back(SU); 686 NumNodes++; 687 } 688 689 /// Clears the list of SUs mapped to V. 690 void inline clearList(ValueType V) { 691 iterator Itr = find(V); 692 if (Itr != end()) { 693 assert (NumNodes >= Itr->second.size()); 694 NumNodes -= Itr->second.size(); 695 696 Itr->second.clear(); 697 } 698 } 699 700 /// Clears map from all contents. 701 void clear() { 702 MapVector<ValueType, SUList>::clear(); 703 NumNodes = 0; 704 } 705 706 unsigned inline size() const { return NumNodes; } 707 708 /// Counts the number of SUs in this map after a reduction. 709 void reComputeSize(void) { 710 NumNodes = 0; 711 for (auto &I : *this) 712 NumNodes += I.second.size(); 713 } 714 715 unsigned inline getTrueMemOrderLatency() const { 716 return TrueMemOrderLatency; 717 } 718 719 void dump(); 720 }; 721 722 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 723 Value2SUsMap &Val2SUsMap) { 724 for (auto &I : Val2SUsMap) 725 addChainDependencies(SU, I.second, 726 Val2SUsMap.getTrueMemOrderLatency()); 727 } 728 729 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 730 Value2SUsMap &Val2SUsMap, 731 ValueType V) { 732 Value2SUsMap::iterator Itr = Val2SUsMap.find(V); 733 if (Itr != Val2SUsMap.end()) 734 addChainDependencies(SU, Itr->second, 735 Val2SUsMap.getTrueMemOrderLatency()); 736 } 737 738 void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap &map) { 739 assert (BarrierChain != nullptr); 740 741 for (auto &I : map) { 742 SUList &sus = I.second; 743 for (auto *SU : sus) 744 SU->addPredBarrier(BarrierChain); 745 } 746 map.clear(); 747 } 748 749 void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap &map) { 750 assert (BarrierChain != nullptr); 751 752 // Go through all lists of SUs. 753 for (Value2SUsMap::iterator I = map.begin(), EE = map.end(); I != EE;) { 754 Value2SUsMap::iterator CurrItr = I++; 755 SUList &sus = CurrItr->second; 756 SUList::iterator SUItr = sus.begin(), SUEE = sus.end(); 757 for (; SUItr != SUEE; ++SUItr) { 758 // Stop on BarrierChain or any instruction above it. 759 if ((*SUItr)->NodeNum <= BarrierChain->NodeNum) 760 break; 761 762 (*SUItr)->addPredBarrier(BarrierChain); 763 } 764 765 // Remove also the BarrierChain from list if present. 766 if (SUItr != SUEE && *SUItr == BarrierChain) 767 SUItr++; 768 769 // Remove all SUs that are now successors of BarrierChain. 770 if (SUItr != sus.begin()) 771 sus.erase(sus.begin(), SUItr); 772 } 773 774 // Remove all entries with empty su lists. 775 map.remove_if([&](std::pair<ValueType, SUList> &mapEntry) { 776 return (mapEntry.second.empty()); }); 777 778 // Recompute the size of the map (NumNodes). 779 map.reComputeSize(); 780 } 781 782 void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, 783 RegPressureTracker *RPTracker, 784 PressureDiffs *PDiffs, 785 LiveIntervals *LIS, 786 bool TrackLaneMasks) { 787 const TargetSubtargetInfo &ST = MF.getSubtarget(); 788 bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI 789 : ST.useAA(); 790 AAForDep = UseAA ? AA : nullptr; 791 792 BarrierChain = nullptr; 793 794 this->TrackLaneMasks = TrackLaneMasks; 795 MISUnitMap.clear(); 796 ScheduleDAG::clearDAG(); 797 798 // Create an SUnit for each real instruction. 799 initSUnits(); 800 801 if (PDiffs) 802 PDiffs->init(SUnits.size()); 803 804 // We build scheduling units by walking a block's instruction list 805 // from bottom to top. 806 807 // Each MIs' memory operand(s) is analyzed to a list of underlying 808 // objects. The SU is then inserted in the SUList(s) mapped from the 809 // Value(s). Each Value thus gets mapped to lists of SUs depending 810 // on it, stores and loads kept separately. Two SUs are trivially 811 // non-aliasing if they both depend on only identified Values and do 812 // not share any common Value. 813 Value2SUsMap Stores, Loads(1 /*TrueMemOrderLatency*/); 814 815 // Certain memory accesses are known to not alias any SU in Stores 816 // or Loads, and have therefore their own 'NonAlias' 817 // domain. E.g. spill / reload instructions never alias LLVM I/R 818 // Values. It would be nice to assume that this type of memory 819 // accesses always have a proper memory operand modelling, and are 820 // therefore never unanalyzable, but this is conservatively not 821 // done. 822 Value2SUsMap NonAliasStores, NonAliasLoads(1 /*TrueMemOrderLatency*/); 823 824 // Remove any stale debug info; sometimes BuildSchedGraph is called again 825 // without emitting the info from the previous call. 826 DbgValues.clear(); 827 FirstDbgValue = nullptr; 828 829 assert(Defs.empty() && Uses.empty() && 830 "Only BuildGraph should update Defs/Uses"); 831 Defs.setUniverse(TRI->getNumRegs()); 832 Uses.setUniverse(TRI->getNumRegs()); 833 834 assert(CurrentVRegDefs.empty() && "nobody else should use CurrentVRegDefs"); 835 assert(CurrentVRegUses.empty() && "nobody else should use CurrentVRegUses"); 836 unsigned NumVirtRegs = MRI.getNumVirtRegs(); 837 CurrentVRegDefs.setUniverse(NumVirtRegs); 838 CurrentVRegUses.setUniverse(NumVirtRegs); 839 840 // Model data dependencies between instructions being scheduled and the 841 // ExitSU. 842 addSchedBarrierDeps(); 843 844 // Walk the list of instructions, from bottom moving up. 845 MachineInstr *DbgMI = nullptr; 846 for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; 847 MII != MIE; --MII) { 848 MachineInstr &MI = *std::prev(MII); 849 if (DbgMI) { 850 DbgValues.push_back(std::make_pair(DbgMI, &MI)); 851 DbgMI = nullptr; 852 } 853 854 if (MI.isDebugValue()) { 855 DbgMI = &MI; 856 continue; 857 } 858 SUnit *SU = MISUnitMap[&MI]; 859 assert(SU && "No SUnit mapped to this MI"); 860 861 if (RPTracker) { 862 RegisterOperands RegOpers; 863 RegOpers.collect(MI, *TRI, MRI, TrackLaneMasks, false); 864 if (TrackLaneMasks) { 865 SlotIndex SlotIdx = LIS->getInstructionIndex(MI); 866 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx); 867 } 868 if (PDiffs != nullptr) 869 PDiffs->addInstruction(SU->NodeNum, RegOpers, MRI); 870 871 RPTracker->recedeSkipDebugValues(); 872 assert(&*RPTracker->getPos() == &MI && "RPTracker in sync"); 873 RPTracker->recede(RegOpers); 874 } 875 876 assert( 877 (CanHandleTerminators || (!MI.isTerminator() && !MI.isPosition())) && 878 "Cannot schedule terminators or labels!"); 879 880 // Add register-based dependencies (data, anti, and output). 881 // For some instructions (calls, returns, inline-asm, etc.) there can 882 // be explicit uses and implicit defs, in which case the use will appear 883 // on the operand list before the def. Do two passes over the operand 884 // list to make sure that defs are processed before any uses. 885 bool HasVRegDef = false; 886 for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 887 const MachineOperand &MO = MI.getOperand(j); 888 if (!MO.isReg() || !MO.isDef()) 889 continue; 890 unsigned Reg = MO.getReg(); 891 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 892 addPhysRegDeps(SU, j); 893 } else if (TargetRegisterInfo::isVirtualRegister(Reg)) { 894 HasVRegDef = true; 895 addVRegDefDeps(SU, j); 896 } 897 } 898 // Now process all uses. 899 for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 900 const MachineOperand &MO = MI.getOperand(j); 901 // Only look at use operands. 902 // We do not need to check for MO.readsReg() here because subsequent 903 // subregister defs will get output dependence edges and need no 904 // additional use dependencies. 905 if (!MO.isReg() || !MO.isUse()) 906 continue; 907 unsigned Reg = MO.getReg(); 908 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 909 addPhysRegDeps(SU, j); 910 } else if (TargetRegisterInfo::isVirtualRegister(Reg) && MO.readsReg()) { 911 addVRegUseDeps(SU, j); 912 } 913 } 914 915 // If we haven't seen any uses in this scheduling region, create a 916 // dependence edge to ExitSU to model the live-out latency. This is required 917 // for vreg defs with no in-region use, and prefetches with no vreg def. 918 // 919 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This 920 // check currently relies on being called before adding chain deps. 921 if (SU->NumSuccs == 0 && SU->Latency > 1 && (HasVRegDef || MI.mayLoad())) { 922 SDep Dep(SU, SDep::Artificial); 923 Dep.setLatency(SU->Latency - 1); 924 ExitSU.addPred(Dep); 925 } 926 927 // Add memory dependencies (Note: isStoreToStackSlot and 928 // isLoadFromStackSLot are not usable after stack slots are lowered to 929 // actual addresses). 930 931 // This is a barrier event that acts as a pivotal node in the DAG. 932 if (isGlobalMemoryObject(AA, &MI)) { 933 934 // Become the barrier chain. 935 if (BarrierChain) 936 BarrierChain->addPredBarrier(SU); 937 BarrierChain = SU; 938 939 DEBUG(dbgs() << "Global memory object and new barrier chain: SU(" 940 << BarrierChain->NodeNum << ").\n";); 941 942 // Add dependencies against everything below it and clear maps. 943 addBarrierChain(Stores); 944 addBarrierChain(Loads); 945 addBarrierChain(NonAliasStores); 946 addBarrierChain(NonAliasLoads); 947 948 continue; 949 } 950 951 // If it's not a store or a variant load, we're done. 952 if (!MI.mayStore() && 953 !(MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))) 954 continue; 955 956 // Always add dependecy edge to BarrierChain if present. 957 if (BarrierChain) 958 BarrierChain->addPredBarrier(SU); 959 960 // Find the underlying objects for MI. The Objs vector is either 961 // empty, or filled with the Values of memory locations which this 962 // SU depends on. An empty vector means the memory location is 963 // unknown, and may alias anything. 964 UnderlyingObjectsVector Objs; 965 getUnderlyingObjectsForInstr(&MI, MFI, Objs, MF.getDataLayout()); 966 967 if (MI.mayStore()) { 968 if (Objs.empty()) { 969 // An unknown store depends on all stores and loads. 970 addChainDependencies(SU, Stores); 971 addChainDependencies(SU, NonAliasStores); 972 addChainDependencies(SU, Loads); 973 addChainDependencies(SU, NonAliasLoads); 974 975 // Map this store to 'UnknownValue'. 976 Stores.insert(SU, UnknownValue); 977 } else { 978 // Add precise dependencies against all previously seen memory 979 // accesses mapped to the same Value(s). 980 for (const UnderlyingObject &UnderlObj : Objs) { 981 ValueType V = UnderlObj.getValue(); 982 bool ThisMayAlias = UnderlObj.mayAlias(); 983 984 // Add dependencies to previous stores and loads mapped to V. 985 addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 986 addChainDependencies(SU, (ThisMayAlias ? Loads : NonAliasLoads), V); 987 } 988 // Update the store map after all chains have been added to avoid adding 989 // self-loop edge if multiple underlying objects are present. 990 for (const UnderlyingObject &UnderlObj : Objs) { 991 ValueType V = UnderlObj.getValue(); 992 bool ThisMayAlias = UnderlObj.mayAlias(); 993 994 // Map this store to V. 995 (ThisMayAlias ? Stores : NonAliasStores).insert(SU, V); 996 } 997 // The store may have dependencies to unanalyzable loads and 998 // stores. 999 addChainDependencies(SU, Loads, UnknownValue); 1000 addChainDependencies(SU, Stores, UnknownValue); 1001 } 1002 } else { // SU is a load. 1003 if (Objs.empty()) { 1004 // An unknown load depends on all stores. 1005 addChainDependencies(SU, Stores); 1006 addChainDependencies(SU, NonAliasStores); 1007 1008 Loads.insert(SU, UnknownValue); 1009 } else { 1010 for (const UnderlyingObject &UnderlObj : Objs) { 1011 ValueType V = UnderlObj.getValue(); 1012 bool ThisMayAlias = UnderlObj.mayAlias(); 1013 1014 // Add precise dependencies against all previously seen stores 1015 // mapping to the same Value(s). 1016 addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 1017 1018 // Map this load to V. 1019 (ThisMayAlias ? Loads : NonAliasLoads).insert(SU, V); 1020 } 1021 // The load may have dependencies to unanalyzable stores. 1022 addChainDependencies(SU, Stores, UnknownValue); 1023 } 1024 } 1025 1026 // Reduce maps if they grow huge. 1027 if (Stores.size() + Loads.size() >= HugeRegion) { 1028 DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";); 1029 reduceHugeMemNodeMaps(Stores, Loads, getReductionSize()); 1030 } 1031 if (NonAliasStores.size() + NonAliasLoads.size() >= HugeRegion) { 1032 DEBUG(dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";); 1033 reduceHugeMemNodeMaps(NonAliasStores, NonAliasLoads, getReductionSize()); 1034 } 1035 } 1036 1037 if (DbgMI) 1038 FirstDbgValue = DbgMI; 1039 1040 Defs.clear(); 1041 Uses.clear(); 1042 CurrentVRegDefs.clear(); 1043 CurrentVRegUses.clear(); 1044 } 1045 1046 raw_ostream &llvm::operator<<(raw_ostream &OS, const PseudoSourceValue* PSV) { 1047 PSV->printCustom(OS); 1048 return OS; 1049 } 1050 1051 void ScheduleDAGInstrs::Value2SUsMap::dump() { 1052 for (auto &Itr : *this) { 1053 if (Itr.first.is<const Value*>()) { 1054 const Value *V = Itr.first.get<const Value*>(); 1055 if (isa<UndefValue>(V)) 1056 dbgs() << "Unknown"; 1057 else 1058 V->printAsOperand(dbgs()); 1059 } 1060 else if (Itr.first.is<const PseudoSourceValue*>()) 1061 dbgs() << Itr.first.get<const PseudoSourceValue*>(); 1062 else 1063 llvm_unreachable("Unknown Value type."); 1064 1065 dbgs() << " : "; 1066 dumpSUList(Itr.second); 1067 } 1068 } 1069 1070 void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores, 1071 Value2SUsMap &loads, unsigned N) { 1072 DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n"; 1073 stores.dump(); 1074 dbgs() << "Loading SUnits:\n"; 1075 loads.dump()); 1076 1077 // Insert all SU's NodeNums into a vector and sort it. 1078 std::vector<unsigned> NodeNums; 1079 NodeNums.reserve(stores.size() + loads.size()); 1080 for (auto &I : stores) 1081 for (auto *SU : I.second) 1082 NodeNums.push_back(SU->NodeNum); 1083 for (auto &I : loads) 1084 for (auto *SU : I.second) 1085 NodeNums.push_back(SU->NodeNum); 1086 std::sort(NodeNums.begin(), NodeNums.end()); 1087 1088 // The N last elements in NodeNums will be removed, and the SU with 1089 // the lowest NodeNum of them will become the new BarrierChain to 1090 // let the not yet seen SUs have a dependency to the removed SUs. 1091 assert (N <= NodeNums.size()); 1092 SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)]; 1093 if (BarrierChain) { 1094 // The aliasing and non-aliasing maps reduce independently of each 1095 // other, but share a common BarrierChain. Check if the 1096 // newBarrierChain is above the former one. If it is not, it may 1097 // introduce a loop to use newBarrierChain, so keep the old one. 1098 if (newBarrierChain->NodeNum < BarrierChain->NodeNum) { 1099 BarrierChain->addPredBarrier(newBarrierChain); 1100 BarrierChain = newBarrierChain; 1101 DEBUG(dbgs() << "Inserting new barrier chain: SU(" 1102 << BarrierChain->NodeNum << ").\n";); 1103 } 1104 else 1105 DEBUG(dbgs() << "Keeping old barrier chain: SU(" 1106 << BarrierChain->NodeNum << ").\n";); 1107 } 1108 else 1109 BarrierChain = newBarrierChain; 1110 1111 insertBarrierChain(stores); 1112 insertBarrierChain(loads); 1113 1114 DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n"; 1115 stores.dump(); 1116 dbgs() << "Loading SUnits:\n"; 1117 loads.dump()); 1118 } 1119 1120 void ScheduleDAGInstrs::startBlockForKills(MachineBasicBlock *BB) { 1121 // Start with no live registers. 1122 LiveRegs.reset(); 1123 1124 // Examine the live-in regs of all successors. 1125 for (const MachineBasicBlock *Succ : BB->successors()) { 1126 for (const auto &LI : Succ->liveins()) { 1127 // Repeat, for reg and all subregs. 1128 for (MCSubRegIterator SubRegs(LI.PhysReg, TRI, /*IncludeSelf=*/true); 1129 SubRegs.isValid(); ++SubRegs) 1130 LiveRegs.set(*SubRegs); 1131 } 1132 } 1133 } 1134 1135 /// \brief If we change a kill flag on the bundle instruction implicit register 1136 /// operands, then we also need to propagate that to any instructions inside 1137 /// the bundle which had the same kill state. 1138 static void toggleBundleKillFlag(MachineInstr *MI, unsigned Reg, 1139 bool NewKillState, 1140 const TargetRegisterInfo *TRI) { 1141 if (MI->getOpcode() != TargetOpcode::BUNDLE) 1142 return; 1143 1144 // Walk backwards from the last instruction in the bundle to the first. 1145 // Once we set a kill flag on an instruction, we bail out, as otherwise we 1146 // might set it on too many operands. We will clear as many flags as we 1147 // can though. 1148 MachineBasicBlock::instr_iterator Begin = MI->getIterator(); 1149 MachineBasicBlock::instr_iterator End = getBundleEnd(Begin); 1150 while (Begin != End) { 1151 if (NewKillState) { 1152 if ((--End)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false)) 1153 return; 1154 } else 1155 (--End)->clearRegisterKills(Reg, TRI); 1156 } 1157 } 1158 1159 void ScheduleDAGInstrs::toggleKillFlag(MachineInstr &MI, MachineOperand &MO) { 1160 if (MO.isDebug()) 1161 return; 1162 1163 // Setting kill flag... 1164 if (!MO.isKill()) { 1165 MO.setIsKill(true); 1166 toggleBundleKillFlag(&MI, MO.getReg(), true, TRI); 1167 return; 1168 } 1169 1170 // If MO itself is live, clear the kill flag... 1171 if (LiveRegs.test(MO.getReg())) { 1172 MO.setIsKill(false); 1173 toggleBundleKillFlag(&MI, MO.getReg(), false, TRI); 1174 return; 1175 } 1176 1177 // If any subreg of MO is live, then create an imp-def for that 1178 // subreg and keep MO marked as killed. 1179 MO.setIsKill(false); 1180 toggleBundleKillFlag(&MI, MO.getReg(), false, TRI); 1181 bool AllDead = true; 1182 const unsigned SuperReg = MO.getReg(); 1183 MachineInstrBuilder MIB(MF, &MI); 1184 for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) { 1185 if (LiveRegs.test(*SubRegs)) { 1186 MIB.addReg(*SubRegs, RegState::ImplicitDefine); 1187 AllDead = false; 1188 } 1189 } 1190 1191 if(AllDead) { 1192 MO.setIsKill(true); 1193 toggleBundleKillFlag(&MI, MO.getReg(), true, TRI); 1194 } 1195 } 1196 1197 void ScheduleDAGInstrs::fixupKills(MachineBasicBlock *MBB) { 1198 // FIXME: Reuse the LivePhysRegs utility for this. 1199 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 1200 1201 LiveRegs.resize(TRI->getNumRegs()); 1202 BitVector killedRegs(TRI->getNumRegs()); 1203 1204 startBlockForKills(MBB); 1205 1206 // Examine block from end to start... 1207 unsigned Count = MBB->size(); 1208 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 1209 I != E; --Count) { 1210 MachineInstr &MI = *--I; 1211 if (MI.isDebugValue()) 1212 continue; 1213 1214 // Update liveness. Registers that are defed but not used in this 1215 // instruction are now dead. Mark register and all subregs as they 1216 // are completely defined. 1217 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1218 MachineOperand &MO = MI.getOperand(i); 1219 if (MO.isRegMask()) 1220 LiveRegs.clearBitsNotInMask(MO.getRegMask()); 1221 if (!MO.isReg()) continue; 1222 unsigned Reg = MO.getReg(); 1223 if (Reg == 0) continue; 1224 if (!MO.isDef()) continue; 1225 // Ignore two-addr defs. 1226 if (MI.isRegTiedToUseOperand(i)) continue; 1227 1228 // Repeat for reg and all subregs. 1229 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); 1230 SubRegs.isValid(); ++SubRegs) 1231 LiveRegs.reset(*SubRegs); 1232 } 1233 1234 // Examine all used registers and set/clear kill flag. When a 1235 // register is used multiple times we only set the kill flag on 1236 // the first use. Don't set kill flags on undef operands. 1237 killedRegs.reset(); 1238 1239 // toggleKillFlag can append new operands (implicit defs), so using 1240 // a range-based loop is not safe. The new operands will be appended 1241 // at the end of the operand list and they don't need to be visited, 1242 // so iterating until the currently last operand is ok. 1243 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1244 MachineOperand &MO = MI.getOperand(i); 1245 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 1246 unsigned Reg = MO.getReg(); 1247 if ((Reg == 0) || MRI.isReserved(Reg)) continue; 1248 1249 bool kill = false; 1250 if (!killedRegs.test(Reg)) { 1251 kill = true; 1252 // A register is not killed if any subregs are live... 1253 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) { 1254 if (LiveRegs.test(*SubRegs)) { 1255 kill = false; 1256 break; 1257 } 1258 } 1259 1260 // If subreg is not live, then register is killed if it became 1261 // live in this instruction 1262 if (kill) 1263 kill = !LiveRegs.test(Reg); 1264 } 1265 1266 if (MO.isKill() != kill) { 1267 DEBUG(dbgs() << "Fixing " << MO << " in "); 1268 toggleKillFlag(MI, MO); 1269 DEBUG(MI.dump()); 1270 DEBUG({ 1271 if (MI.getOpcode() == TargetOpcode::BUNDLE) { 1272 MachineBasicBlock::instr_iterator Begin = MI.getIterator(); 1273 MachineBasicBlock::instr_iterator End = getBundleEnd(Begin); 1274 while (++Begin != End) 1275 DEBUG(Begin->dump()); 1276 } 1277 }); 1278 } 1279 1280 killedRegs.set(Reg); 1281 } 1282 1283 // Mark any used register (that is not using undef) and subregs as 1284 // now live... 1285 for (const MachineOperand &MO : MI.operands()) { 1286 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 1287 unsigned Reg = MO.getReg(); 1288 if ((Reg == 0) || MRI.isReserved(Reg)) continue; 1289 1290 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); 1291 SubRegs.isValid(); ++SubRegs) 1292 LiveRegs.set(*SubRegs); 1293 } 1294 } 1295 } 1296 1297 void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 1298 // Cannot completely remove virtual function even in release mode. 1299 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1300 SU->getInstr()->dump(); 1301 #endif 1302 } 1303 1304 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 1305 std::string s; 1306 raw_string_ostream oss(s); 1307 if (SU == &EntrySU) 1308 oss << "<entry>"; 1309 else if (SU == &ExitSU) 1310 oss << "<exit>"; 1311 else 1312 SU->getInstr()->print(oss, /*SkipOpers=*/true); 1313 return oss.str(); 1314 } 1315 1316 /// Return the basic block label. It is not necessarilly unique because a block 1317 /// contains multiple scheduling regions. But it is fine for visualization. 1318 std::string ScheduleDAGInstrs::getDAGName() const { 1319 return "dag." + BB->getFullName(); 1320 } 1321 1322 //===----------------------------------------------------------------------===// 1323 // SchedDFSResult Implementation 1324 //===----------------------------------------------------------------------===// 1325 1326 namespace llvm { 1327 /// Internal state used to compute SchedDFSResult. 1328 class SchedDFSImpl { 1329 SchedDFSResult &R; 1330 1331 /// Join DAG nodes into equivalence classes by their subtree. 1332 IntEqClasses SubtreeClasses; 1333 /// List PredSU, SuccSU pairs that represent data edges between subtrees. 1334 std::vector<std::pair<const SUnit*, const SUnit*> > ConnectionPairs; 1335 1336 struct RootData { 1337 unsigned NodeID; 1338 unsigned ParentNodeID; ///< Parent node (member of the parent subtree). 1339 unsigned SubInstrCount; ///< Instr count in this tree only, not children. 1340 1341 RootData(unsigned id): NodeID(id), 1342 ParentNodeID(SchedDFSResult::InvalidSubtreeID), 1343 SubInstrCount(0) {} 1344 1345 unsigned getSparseSetIndex() const { return NodeID; } 1346 }; 1347 1348 SparseSet<RootData> RootSet; 1349 1350 public: 1351 SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) { 1352 RootSet.setUniverse(R.DFSNodeData.size()); 1353 } 1354 1355 /// Returns true if this node been visited by the DFS traversal. 1356 /// 1357 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node 1358 /// ID. Later, SubtreeID is updated but remains valid. 1359 bool isVisited(const SUnit *SU) const { 1360 return R.DFSNodeData[SU->NodeNum].SubtreeID 1361 != SchedDFSResult::InvalidSubtreeID; 1362 } 1363 1364 /// Initializes this node's instruction count. We don't need to flag the node 1365 /// visited until visitPostorder because the DAG cannot have cycles. 1366 void visitPreorder(const SUnit *SU) { 1367 R.DFSNodeData[SU->NodeNum].InstrCount = 1368 SU->getInstr()->isTransient() ? 0 : 1; 1369 } 1370 1371 /// Called once for each node after all predecessors are visited. Revisit this 1372 /// node's predecessors and potentially join them now that we know the ILP of 1373 /// the other predecessors. 1374 void visitPostorderNode(const SUnit *SU) { 1375 // Mark this node as the root of a subtree. It may be joined with its 1376 // successors later. 1377 R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum; 1378 RootData RData(SU->NodeNum); 1379 RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1; 1380 1381 // If any predecessors are still in their own subtree, they either cannot be 1382 // joined or are large enough to remain separate. If this parent node's 1383 // total instruction count is not greater than a child subtree by at least 1384 // the subtree limit, then try to join it now since splitting subtrees is 1385 // only useful if multiple high-pressure paths are possible. 1386 unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount; 1387 for (const SDep &PredDep : SU->Preds) { 1388 if (PredDep.getKind() != SDep::Data) 1389 continue; 1390 unsigned PredNum = PredDep.getSUnit()->NodeNum; 1391 if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit) 1392 joinPredSubtree(PredDep, SU, /*CheckLimit=*/false); 1393 1394 // Either link or merge the TreeData entry from the child to the parent. 1395 if (R.DFSNodeData[PredNum].SubtreeID == PredNum) { 1396 // If the predecessor's parent is invalid, this is a tree edge and the 1397 // current node is the parent. 1398 if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID) 1399 RootSet[PredNum].ParentNodeID = SU->NodeNum; 1400 } 1401 else if (RootSet.count(PredNum)) { 1402 // The predecessor is not a root, but is still in the root set. This 1403 // must be the new parent that it was just joined to. Note that 1404 // RootSet[PredNum].ParentNodeID may either be invalid or may still be 1405 // set to the original parent. 1406 RData.SubInstrCount += RootSet[PredNum].SubInstrCount; 1407 RootSet.erase(PredNum); 1408 } 1409 } 1410 RootSet[SU->NodeNum] = RData; 1411 } 1412 1413 /// \brief Called once for each tree edge after calling visitPostOrderNode on 1414 /// the predecessor. Increment the parent node's instruction count and 1415 /// preemptively join this subtree to its parent's if it is small enough. 1416 void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { 1417 R.DFSNodeData[Succ->NodeNum].InstrCount 1418 += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount; 1419 joinPredSubtree(PredDep, Succ); 1420 } 1421 1422 /// Adds a connection for cross edges. 1423 void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) { 1424 ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ)); 1425 } 1426 1427 /// Sets each node's subtree ID to the representative ID and record 1428 /// connections between trees. 1429 void finalize() { 1430 SubtreeClasses.compress(); 1431 R.DFSTreeData.resize(SubtreeClasses.getNumClasses()); 1432 assert(SubtreeClasses.getNumClasses() == RootSet.size() 1433 && "number of roots should match trees"); 1434 for (const RootData &Root : RootSet) { 1435 unsigned TreeID = SubtreeClasses[Root.NodeID]; 1436 if (Root.ParentNodeID != SchedDFSResult::InvalidSubtreeID) 1437 R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[Root.ParentNodeID]; 1438 R.DFSTreeData[TreeID].SubInstrCount = Root.SubInstrCount; 1439 // Note that SubInstrCount may be greater than InstrCount if we joined 1440 // subtrees across a cross edge. InstrCount will be attributed to the 1441 // original parent, while SubInstrCount will be attributed to the joined 1442 // parent. 1443 } 1444 R.SubtreeConnections.resize(SubtreeClasses.getNumClasses()); 1445 R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses()); 1446 DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n"); 1447 for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) { 1448 R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx]; 1449 DEBUG(dbgs() << " SU(" << Idx << ") in tree " 1450 << R.DFSNodeData[Idx].SubtreeID << '\n'); 1451 } 1452 for (const std::pair<const SUnit*, const SUnit*> &P : ConnectionPairs) { 1453 unsigned PredTree = SubtreeClasses[P.first->NodeNum]; 1454 unsigned SuccTree = SubtreeClasses[P.second->NodeNum]; 1455 if (PredTree == SuccTree) 1456 continue; 1457 unsigned Depth = P.first->getDepth(); 1458 addConnection(PredTree, SuccTree, Depth); 1459 addConnection(SuccTree, PredTree, Depth); 1460 } 1461 } 1462 1463 protected: 1464 /// Joins the predecessor subtree with the successor that is its DFS parent. 1465 /// Applies some heuristics before joining. 1466 bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ, 1467 bool CheckLimit = true) { 1468 assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges"); 1469 1470 // Check if the predecessor is already joined. 1471 const SUnit *PredSU = PredDep.getSUnit(); 1472 unsigned PredNum = PredSU->NodeNum; 1473 if (R.DFSNodeData[PredNum].SubtreeID != PredNum) 1474 return false; 1475 1476 // Four is the magic number of successors before a node is considered a 1477 // pinch point. 1478 unsigned NumDataSucs = 0; 1479 for (const SDep &SuccDep : PredSU->Succs) { 1480 if (SuccDep.getKind() == SDep::Data) { 1481 if (++NumDataSucs >= 4) 1482 return false; 1483 } 1484 } 1485 if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit) 1486 return false; 1487 R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum; 1488 SubtreeClasses.join(Succ->NodeNum, PredNum); 1489 return true; 1490 } 1491 1492 /// Called by finalize() to record a connection between trees. 1493 void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) { 1494 if (!Depth) 1495 return; 1496 1497 do { 1498 SmallVectorImpl<SchedDFSResult::Connection> &Connections = 1499 R.SubtreeConnections[FromTree]; 1500 for (SchedDFSResult::Connection &C : Connections) { 1501 if (C.TreeID == ToTree) { 1502 C.Level = std::max(C.Level, Depth); 1503 return; 1504 } 1505 } 1506 Connections.push_back(SchedDFSResult::Connection(ToTree, Depth)); 1507 FromTree = R.DFSTreeData[FromTree].ParentTreeID; 1508 } while (FromTree != SchedDFSResult::InvalidSubtreeID); 1509 } 1510 }; 1511 } // end namespace llvm 1512 1513 namespace { 1514 /// Manage the stack used by a reverse depth-first search over the DAG. 1515 class SchedDAGReverseDFS { 1516 std::vector<std::pair<const SUnit*, SUnit::const_pred_iterator> > DFSStack; 1517 public: 1518 bool isComplete() const { return DFSStack.empty(); } 1519 1520 void follow(const SUnit *SU) { 1521 DFSStack.push_back(std::make_pair(SU, SU->Preds.begin())); 1522 } 1523 void advance() { ++DFSStack.back().second; } 1524 1525 const SDep *backtrack() { 1526 DFSStack.pop_back(); 1527 return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second); 1528 } 1529 1530 const SUnit *getCurr() const { return DFSStack.back().first; } 1531 1532 SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; } 1533 1534 SUnit::const_pred_iterator getPredEnd() const { 1535 return getCurr()->Preds.end(); 1536 } 1537 }; 1538 } // anonymous 1539 1540 static bool hasDataSucc(const SUnit *SU) { 1541 for (const SDep &SuccDep : SU->Succs) { 1542 if (SuccDep.getKind() == SDep::Data && 1543 !SuccDep.getSUnit()->isBoundaryNode()) 1544 return true; 1545 } 1546 return false; 1547 } 1548 1549 /// Computes an ILP metric for all nodes in the subDAG reachable via depth-first 1550 /// search from this root. 1551 void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) { 1552 if (!IsBottomUp) 1553 llvm_unreachable("Top-down ILP metric is unimplemnted"); 1554 1555 SchedDFSImpl Impl(*this); 1556 for (const SUnit &SU : SUnits) { 1557 if (Impl.isVisited(&SU) || hasDataSucc(&SU)) 1558 continue; 1559 1560 SchedDAGReverseDFS DFS; 1561 Impl.visitPreorder(&SU); 1562 DFS.follow(&SU); 1563 for (;;) { 1564 // Traverse the leftmost path as far as possible. 1565 while (DFS.getPred() != DFS.getPredEnd()) { 1566 const SDep &PredDep = *DFS.getPred(); 1567 DFS.advance(); 1568 // Ignore non-data edges. 1569 if (PredDep.getKind() != SDep::Data 1570 || PredDep.getSUnit()->isBoundaryNode()) { 1571 continue; 1572 } 1573 // An already visited edge is a cross edge, assuming an acyclic DAG. 1574 if (Impl.isVisited(PredDep.getSUnit())) { 1575 Impl.visitCrossEdge(PredDep, DFS.getCurr()); 1576 continue; 1577 } 1578 Impl.visitPreorder(PredDep.getSUnit()); 1579 DFS.follow(PredDep.getSUnit()); 1580 } 1581 // Visit the top of the stack in postorder and backtrack. 1582 const SUnit *Child = DFS.getCurr(); 1583 const SDep *PredDep = DFS.backtrack(); 1584 Impl.visitPostorderNode(Child); 1585 if (PredDep) 1586 Impl.visitPostorderEdge(*PredDep, DFS.getCurr()); 1587 if (DFS.isComplete()) 1588 break; 1589 } 1590 } 1591 Impl.finalize(); 1592 } 1593 1594 /// The root of the given SubtreeID was just scheduled. For all subtrees 1595 /// connected to this tree, record the depth of the connection so that the 1596 /// nearest connected subtrees can be prioritized. 1597 void SchedDFSResult::scheduleTree(unsigned SubtreeID) { 1598 for (const Connection &C : SubtreeConnections[SubtreeID]) { 1599 SubtreeConnectLevels[C.TreeID] = 1600 std::max(SubtreeConnectLevels[C.TreeID], C.Level); 1601 DEBUG(dbgs() << " Tree: " << C.TreeID 1602 << " @" << SubtreeConnectLevels[C.TreeID] << '\n'); 1603 } 1604 } 1605 1606 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1607 LLVM_DUMP_METHOD void ILPValue::print(raw_ostream &OS) const { 1608 OS << InstrCount << " / " << Length << " = "; 1609 if (!Length) 1610 OS << "BADILP"; 1611 else 1612 OS << format("%g", ((double)InstrCount / Length)); 1613 } 1614 1615 LLVM_DUMP_METHOD void ILPValue::dump() const { 1616 dbgs() << *this << '\n'; 1617 } 1618 1619 namespace llvm { 1620 1621 LLVM_DUMP_METHOD 1622 raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) { 1623 Val.print(OS); 1624 return OS; 1625 } 1626 1627 } // end namespace llvm 1628 #endif 1629