1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling 11 // of MachineInstrs. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 16 #include "llvm/ADT/MapVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/ValueTracking.h" 21 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 22 #include "llvm/CodeGen/MachineFunctionPass.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineMemOperand.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/PseudoSourceValue.h" 27 #include "llvm/CodeGen/RegisterPressure.h" 28 #include "llvm/CodeGen/ScheduleDFS.h" 29 #include "llvm/IR/Operator.h" 30 #include "llvm/MC/MCInstrItineraries.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/Debug.h" 33 #include "llvm/Support/Format.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include "llvm/Target/TargetInstrInfo.h" 36 #include "llvm/Target/TargetMachine.h" 37 #include "llvm/Target/TargetRegisterInfo.h" 38 #include "llvm/Target/TargetSubtargetInfo.h" 39 #include <queue> 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "misched" 44 45 static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden, 46 cl::ZeroOrMore, cl::init(false), 47 cl::desc("Enable use of AA during MI DAG construction")); 48 49 static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, 50 cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction")); 51 52 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 53 const MachineLoopInfo *mli, 54 bool IsPostRAFlag, bool RemoveKillFlags, 55 LiveIntervals *lis) 56 : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), LIS(lis), 57 IsPostRA(IsPostRAFlag), RemoveKillFlags(RemoveKillFlags), 58 CanHandleTerminators(false), FirstDbgValue(nullptr) { 59 assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals"); 60 DbgValues.clear(); 61 assert(!(IsPostRA && MRI.getNumVirtRegs()) && 62 "Virtual registers must be removed prior to PostRA scheduling"); 63 64 const TargetSubtargetInfo &ST = mf.getSubtarget(); 65 SchedModel.init(ST.getSchedModel(), &ST, TII); 66 } 67 68 /// getUnderlyingObjectFromInt - This is the function that does the work of 69 /// looking through basic ptrtoint+arithmetic+inttoptr sequences. 70 static const Value *getUnderlyingObjectFromInt(const Value *V) { 71 do { 72 if (const Operator *U = dyn_cast<Operator>(V)) { 73 // If we find a ptrtoint, we can transfer control back to the 74 // regular getUnderlyingObjectFromInt. 75 if (U->getOpcode() == Instruction::PtrToInt) 76 return U->getOperand(0); 77 // If we find an add of a constant, a multiplied value, or a phi, it's 78 // likely that the other operand will lead us to the base 79 // object. We don't have to worry about the case where the 80 // object address is somehow being computed by the multiply, 81 // because our callers only care when the result is an 82 // identifiable object. 83 if (U->getOpcode() != Instruction::Add || 84 (!isa<ConstantInt>(U->getOperand(1)) && 85 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 86 !isa<PHINode>(U->getOperand(1)))) 87 return V; 88 V = U->getOperand(0); 89 } else { 90 return V; 91 } 92 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 93 } while (1); 94 } 95 96 /// getUnderlyingObjects - This is a wrapper around GetUnderlyingObjects 97 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 98 static void getUnderlyingObjects(const Value *V, 99 SmallVectorImpl<Value *> &Objects, 100 const DataLayout &DL) { 101 SmallPtrSet<const Value *, 16> Visited; 102 SmallVector<const Value *, 4> Working(1, V); 103 do { 104 V = Working.pop_back_val(); 105 106 SmallVector<Value *, 4> Objs; 107 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 108 109 for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end(); 110 I != IE; ++I) { 111 V = *I; 112 if (!Visited.insert(V).second) 113 continue; 114 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 115 const Value *O = 116 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 117 if (O->getType()->isPointerTy()) { 118 Working.push_back(O); 119 continue; 120 } 121 } 122 Objects.push_back(const_cast<Value *>(V)); 123 } 124 } while (!Working.empty()); 125 } 126 127 typedef PointerUnion<const Value *, const PseudoSourceValue *> ValueType; 128 typedef SmallVector<PointerIntPair<ValueType, 1, bool>, 4> 129 UnderlyingObjectsVector; 130 131 /// getUnderlyingObjectsForInstr - If this machine instr has memory reference 132 /// information and it can be tracked to a normal reference to a known 133 /// object, return the Value for that object. 134 static void getUnderlyingObjectsForInstr(const MachineInstr *MI, 135 const MachineFrameInfo *MFI, 136 UnderlyingObjectsVector &Objects, 137 const DataLayout &DL) { 138 if (!MI->hasOneMemOperand() || 139 (!(*MI->memoperands_begin())->getValue() && 140 !(*MI->memoperands_begin())->getPseudoValue()) || 141 (*MI->memoperands_begin())->isVolatile()) 142 return; 143 144 if (const PseudoSourceValue *PSV = 145 (*MI->memoperands_begin())->getPseudoValue()) { 146 // For now, ignore PseudoSourceValues which may alias LLVM IR values 147 // because the code that uses this function has no way to cope with 148 // such aliases. 149 if (!PSV->isAliased(MFI)) { 150 bool MayAlias = PSV->mayAlias(MFI); 151 Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias)); 152 } 153 return; 154 } 155 156 const Value *V = (*MI->memoperands_begin())->getValue(); 157 if (!V) 158 return; 159 160 SmallVector<Value *, 4> Objs; 161 getUnderlyingObjects(V, Objs, DL); 162 163 for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end(); 164 I != IE; ++I) { 165 V = *I; 166 167 if (!isIdentifiedObject(V)) { 168 Objects.clear(); 169 return; 170 } 171 172 Objects.push_back(UnderlyingObjectsVector::value_type(V, true)); 173 } 174 } 175 176 void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) { 177 BB = bb; 178 } 179 180 void ScheduleDAGInstrs::finishBlock() { 181 // Subclasses should no longer refer to the old block. 182 BB = nullptr; 183 } 184 185 /// Initialize the DAG and common scheduler state for the current scheduling 186 /// region. This does not actually create the DAG, only clears it. The 187 /// scheduling driver may call BuildSchedGraph multiple times per scheduling 188 /// region. 189 void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, 190 MachineBasicBlock::iterator begin, 191 MachineBasicBlock::iterator end, 192 unsigned regioninstrs) { 193 assert(bb == BB && "startBlock should set BB"); 194 RegionBegin = begin; 195 RegionEnd = end; 196 NumRegionInstrs = regioninstrs; 197 } 198 199 /// Close the current scheduling region. Don't clear any state in case the 200 /// driver wants to refer to the previous scheduling region. 201 void ScheduleDAGInstrs::exitRegion() { 202 // Nothing to do. 203 } 204 205 /// addSchedBarrierDeps - Add dependencies from instructions in the current 206 /// list of instructions being scheduled to scheduling barrier by adding 207 /// the exit SU to the register defs and use list. This is because we want to 208 /// make sure instructions which define registers that are either used by 209 /// the terminator or are live-out are properly scheduled. This is 210 /// especially important when the definition latency of the return value(s) 211 /// are too high to be hidden by the branch or when the liveout registers 212 /// used by instructions in the fallthrough block. 213 void ScheduleDAGInstrs::addSchedBarrierDeps() { 214 MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : nullptr; 215 ExitSU.setInstr(ExitMI); 216 bool AllDepKnown = ExitMI && 217 (ExitMI->isCall() || ExitMI->isBarrier()); 218 if (ExitMI && AllDepKnown) { 219 // If it's a call or a barrier, add dependencies on the defs and uses of 220 // instruction. 221 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) { 222 const MachineOperand &MO = ExitMI->getOperand(i); 223 if (!MO.isReg() || MO.isDef()) continue; 224 unsigned Reg = MO.getReg(); 225 if (Reg == 0) continue; 226 227 if (TRI->isPhysicalRegister(Reg)) 228 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg)); 229 else { 230 assert(!IsPostRA && "Virtual register encountered after regalloc."); 231 if (MO.readsReg()) // ignore undef operands 232 addVRegUseDeps(&ExitSU, i); 233 } 234 } 235 } else { 236 // For others, e.g. fallthrough, conditional branch, assume the exit 237 // uses all the registers that are livein to the successor blocks. 238 assert(Uses.empty() && "Uses in set before adding deps?"); 239 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 240 SE = BB->succ_end(); SI != SE; ++SI) 241 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 242 E = (*SI)->livein_end(); I != E; ++I) { 243 unsigned Reg = *I; 244 if (!Uses.contains(Reg)) 245 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg)); 246 } 247 } 248 } 249 250 /// MO is an operand of SU's instruction that defines a physical register. Add 251 /// data dependencies from SU to any uses of the physical register. 252 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { 253 const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx); 254 assert(MO.isDef() && "expect physreg def"); 255 256 // Ask the target if address-backscheduling is desirable, and if so how much. 257 const TargetSubtargetInfo &ST = MF.getSubtarget(); 258 259 for (MCRegAliasIterator Alias(MO.getReg(), TRI, true); 260 Alias.isValid(); ++Alias) { 261 if (!Uses.contains(*Alias)) 262 continue; 263 for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) { 264 SUnit *UseSU = I->SU; 265 if (UseSU == SU) 266 continue; 267 268 // Adjust the dependence latency using operand def/use information, 269 // then allow the target to perform its own adjustments. 270 int UseOp = I->OpIdx; 271 MachineInstr *RegUse = nullptr; 272 SDep Dep; 273 if (UseOp < 0) 274 Dep = SDep(SU, SDep::Artificial); 275 else { 276 // Set the hasPhysRegDefs only for physreg defs that have a use within 277 // the scheduling region. 278 SU->hasPhysRegDefs = true; 279 Dep = SDep(SU, SDep::Data, *Alias); 280 RegUse = UseSU->getInstr(); 281 } 282 Dep.setLatency( 283 SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, RegUse, 284 UseOp)); 285 286 ST.adjustSchedDependency(SU, UseSU, Dep); 287 UseSU->addPred(Dep); 288 } 289 } 290 } 291 292 /// addPhysRegDeps - Add register dependencies (data, anti, and output) from 293 /// this SUnit to following instructions in the same scheduling region that 294 /// depend the physical register referenced at OperIdx. 295 void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { 296 MachineInstr *MI = SU->getInstr(); 297 MachineOperand &MO = MI->getOperand(OperIdx); 298 299 // Optionally add output and anti dependencies. For anti 300 // dependencies we use a latency of 0 because for a multi-issue 301 // target we want to allow the defining instruction to issue 302 // in the same cycle as the using instruction. 303 // TODO: Using a latency of 1 here for output dependencies assumes 304 // there's no cost for reusing registers. 305 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 306 for (MCRegAliasIterator Alias(MO.getReg(), TRI, true); 307 Alias.isValid(); ++Alias) { 308 if (!Defs.contains(*Alias)) 309 continue; 310 for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) { 311 SUnit *DefSU = I->SU; 312 if (DefSU == &ExitSU) 313 continue; 314 if (DefSU != SU && 315 (Kind != SDep::Output || !MO.isDead() || 316 !DefSU->getInstr()->registerDefIsDead(*Alias))) { 317 if (Kind == SDep::Anti) 318 DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias)); 319 else { 320 SDep Dep(SU, Kind, /*Reg=*/*Alias); 321 Dep.setLatency( 322 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 323 DefSU->addPred(Dep); 324 } 325 } 326 } 327 } 328 329 if (!MO.isDef()) { 330 SU->hasPhysRegUses = true; 331 // Either insert a new Reg2SUnits entry with an empty SUnits list, or 332 // retrieve the existing SUnits list for this register's uses. 333 // Push this SUnit on the use list. 334 Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg())); 335 if (RemoveKillFlags) 336 MO.setIsKill(false); 337 } 338 else { 339 addPhysRegDataDeps(SU, OperIdx); 340 unsigned Reg = MO.getReg(); 341 342 // clear this register's use list 343 if (Uses.contains(Reg)) 344 Uses.eraseAll(Reg); 345 346 if (!MO.isDead()) { 347 Defs.eraseAll(Reg); 348 } else if (SU->isCall) { 349 // Calls will not be reordered because of chain dependencies (see 350 // below). Since call operands are dead, calls may continue to be added 351 // to the DefList making dependence checking quadratic in the size of 352 // the block. Instead, we leave only one call at the back of the 353 // DefList. 354 Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg); 355 Reg2SUnitsMap::iterator B = P.first; 356 Reg2SUnitsMap::iterator I = P.second; 357 for (bool isBegin = I == B; !isBegin; /* empty */) { 358 isBegin = (--I) == B; 359 if (!I->SU->isCall) 360 break; 361 I = Defs.erase(I); 362 } 363 } 364 365 // Defs are pushed in the order they are visited and never reordered. 366 Defs.insert(PhysRegSUOper(SU, OperIdx, Reg)); 367 } 368 } 369 370 /// addVRegDefDeps - Add register output and data dependencies from this SUnit 371 /// to instructions that occur later in the same scheduling region if they read 372 /// from or write to the virtual register defined at OperIdx. 373 /// 374 /// TODO: Hoist loop induction variable increments. This has to be 375 /// reevaluated. Generally, IV scheduling should be done before coalescing. 376 void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { 377 const MachineInstr *MI = SU->getInstr(); 378 unsigned Reg = MI->getOperand(OperIdx).getReg(); 379 380 // Singly defined vregs do not have output/anti dependencies. 381 // The current operand is a def, so we have at least one. 382 // Check here if there are any others... 383 if (MRI.hasOneDef(Reg)) 384 return; 385 386 // Add output dependence to the next nearest def of this vreg. 387 // 388 // Unless this definition is dead, the output dependence should be 389 // transitively redundant with antidependencies from this definition's 390 // uses. We're conservative for now until we have a way to guarantee the uses 391 // are not eliminated sometime during scheduling. The output dependence edge 392 // is also useful if output latency exceeds def-use latency. 393 VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg); 394 if (DefI == VRegDefs.end()) 395 VRegDefs.insert(VReg2SUnit(Reg, SU)); 396 else { 397 SUnit *DefSU = DefI->SU; 398 if (DefSU != SU && DefSU != &ExitSU) { 399 SDep Dep(SU, SDep::Output, Reg); 400 Dep.setLatency( 401 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 402 DefSU->addPred(Dep); 403 } 404 DefI->SU = SU; 405 } 406 } 407 408 /// addVRegUseDeps - Add a register data dependency if the instruction that 409 /// defines the virtual register used at OperIdx is mapped to an SUnit. Add a 410 /// register antidependency from this SUnit to instructions that occur later in 411 /// the same scheduling region if they write the virtual register. 412 /// 413 /// TODO: Handle ExitSU "uses" properly. 414 void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { 415 MachineInstr *MI = SU->getInstr(); 416 unsigned Reg = MI->getOperand(OperIdx).getReg(); 417 418 // Record this local VReg use. 419 VReg2UseMap::iterator UI = VRegUses.find(Reg); 420 for (; UI != VRegUses.end(); ++UI) { 421 if (UI->SU == SU) 422 break; 423 } 424 if (UI == VRegUses.end()) 425 VRegUses.insert(VReg2SUnit(Reg, SU)); 426 427 // Lookup this operand's reaching definition. 428 assert(LIS && "vreg dependencies requires LiveIntervals"); 429 LiveQueryResult LRQ 430 = LIS->getInterval(Reg).Query(LIS->getInstructionIndex(MI)); 431 VNInfo *VNI = LRQ.valueIn(); 432 433 // VNI will be valid because MachineOperand::readsReg() is checked by caller. 434 assert(VNI && "No value to read by operand"); 435 MachineInstr *Def = LIS->getInstructionFromIndex(VNI->def); 436 // Phis and other noninstructions (after coalescing) have a NULL Def. 437 if (Def) { 438 SUnit *DefSU = getSUnit(Def); 439 if (DefSU) { 440 // The reaching Def lives within this scheduling region. 441 // Create a data dependence. 442 SDep dep(DefSU, SDep::Data, Reg); 443 // Adjust the dependence latency using operand def/use information, then 444 // allow the target to perform its own adjustments. 445 int DefOp = Def->findRegisterDefOperandIdx(Reg); 446 dep.setLatency(SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx)); 447 448 const TargetSubtargetInfo &ST = MF.getSubtarget(); 449 ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep)); 450 SU->addPred(dep); 451 } 452 } 453 454 // Add antidependence to the following def of the vreg it uses. 455 VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg); 456 if (DefI != VRegDefs.end() && DefI->SU != SU) 457 DefI->SU->addPred(SDep(SU, SDep::Anti, Reg)); 458 } 459 460 /// Return true if MI is an instruction we are unable to reason about 461 /// (like a call or something with unmodeled side effects). 462 static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) { 463 if (MI->isCall() || MI->hasUnmodeledSideEffects() || 464 (MI->hasOrderedMemoryRef() && 465 (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) 466 return true; 467 return false; 468 } 469 470 // This MI might have either incomplete info, or known to be unsafe 471 // to deal with (i.e. volatile object). 472 static inline bool isUnsafeMemoryObject(MachineInstr *MI, 473 const MachineFrameInfo *MFI, 474 const DataLayout &DL) { 475 if (!MI || MI->memoperands_empty()) 476 return true; 477 // We purposefully do no check for hasOneMemOperand() here 478 // in hope to trigger an assert downstream in order to 479 // finish implementation. 480 if ((*MI->memoperands_begin())->isVolatile() || 481 MI->hasUnmodeledSideEffects()) 482 return true; 483 484 if ((*MI->memoperands_begin())->getPseudoValue()) { 485 // Similarly to getUnderlyingObjectForInstr: 486 // For now, ignore PseudoSourceValues which may alias LLVM IR values 487 // because the code that uses this function has no way to cope with 488 // such aliases. 489 return true; 490 } 491 492 const Value *V = (*MI->memoperands_begin())->getValue(); 493 if (!V) 494 return true; 495 496 SmallVector<Value *, 4> Objs; 497 getUnderlyingObjects(V, Objs, DL); 498 for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), 499 IE = Objs.end(); I != IE; ++I) { 500 // Does this pointer refer to a distinct and identifiable object? 501 if (!isIdentifiedObject(*I)) 502 return true; 503 } 504 505 return false; 506 } 507 508 /// This returns true if the two MIs need a chain edge betwee them. 509 /// If these are not even memory operations, we still may need 510 /// chain deps between them. The question really is - could 511 /// these two MIs be reordered during scheduling from memory dependency 512 /// point of view. 513 static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI, 514 const DataLayout &DL, MachineInstr *MIa, 515 MachineInstr *MIb) { 516 const MachineFunction *MF = MIa->getParent()->getParent(); 517 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 518 519 // Cover a trivial case - no edge is need to itself. 520 if (MIa == MIb) 521 return false; 522 523 // Let the target decide if memory accesses cannot possibly overlap. 524 if ((MIa->mayLoad() || MIa->mayStore()) && 525 (MIb->mayLoad() || MIb->mayStore())) 526 if (TII->areMemAccessesTriviallyDisjoint(MIa, MIb, AA)) 527 return false; 528 529 // FIXME: Need to handle multiple memory operands to support all targets. 530 if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand()) 531 return true; 532 533 if (isUnsafeMemoryObject(MIa, MFI, DL) || isUnsafeMemoryObject(MIb, MFI, DL)) 534 return true; 535 536 // If we are dealing with two "normal" loads, we do not need an edge 537 // between them - they could be reordered. 538 if (!MIa->mayStore() && !MIb->mayStore()) 539 return false; 540 541 // To this point analysis is generic. From here on we do need AA. 542 if (!AA) 543 return true; 544 545 MachineMemOperand *MMOa = *MIa->memoperands_begin(); 546 MachineMemOperand *MMOb = *MIb->memoperands_begin(); 547 548 if (!MMOa->getValue() || !MMOb->getValue()) 549 return true; 550 551 // The following interface to AA is fashioned after DAGCombiner::isAlias 552 // and operates with MachineMemOperand offset with some important 553 // assumptions: 554 // - LLVM fundamentally assumes flat address spaces. 555 // - MachineOperand offset can *only* result from legalization and 556 // cannot affect queries other than the trivial case of overlap 557 // checking. 558 // - These offsets never wrap and never step outside 559 // of allocated objects. 560 // - There should never be any negative offsets here. 561 // 562 // FIXME: Modify API to hide this math from "user" 563 // FIXME: Even before we go to AA we can reason locally about some 564 // memory objects. It can save compile time, and possibly catch some 565 // corner cases not currently covered. 566 567 assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset"); 568 assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset"); 569 570 int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset()); 571 int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset; 572 int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset; 573 574 AliasAnalysis::AliasResult AAResult = AA->alias( 575 AliasAnalysis::Location(MMOa->getValue(), Overlapa, 576 UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), 577 AliasAnalysis::Location(MMOb->getValue(), Overlapb, 578 UseTBAA ? MMOb->getAAInfo() : AAMDNodes())); 579 580 return (AAResult != AliasAnalysis::NoAlias); 581 } 582 583 /// This recursive function iterates over chain deps of SUb looking for 584 /// "latest" node that needs a chain edge to SUa. 585 static unsigned iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI, 586 const DataLayout &DL, SUnit *SUa, SUnit *SUb, 587 SUnit *ExitSU, unsigned *Depth, 588 SmallPtrSetImpl<const SUnit *> &Visited) { 589 if (!SUa || !SUb || SUb == ExitSU) 590 return *Depth; 591 592 // Remember visited nodes. 593 if (!Visited.insert(SUb).second) 594 return *Depth; 595 // If there is _some_ dependency already in place, do not 596 // descend any further. 597 // TODO: Need to make sure that if that dependency got eliminated or ignored 598 // for any reason in the future, we would not violate DAG topology. 599 // Currently it does not happen, but makes an implicit assumption about 600 // future implementation. 601 // 602 // Independently, if we encounter node that is some sort of global 603 // object (like a call) we already have full set of dependencies to it 604 // and we can stop descending. 605 if (SUa->isSucc(SUb) || 606 isGlobalMemoryObject(AA, SUb->getInstr())) 607 return *Depth; 608 609 // If we do need an edge, or we have exceeded depth budget, 610 // add that edge to the predecessors chain of SUb, 611 // and stop descending. 612 if (*Depth > 200 || 613 MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) { 614 SUb->addPred(SDep(SUa, SDep::MayAliasMem)); 615 return *Depth; 616 } 617 // Track current depth. 618 (*Depth)++; 619 // Iterate over memory dependencies only. 620 for (SUnit::const_succ_iterator I = SUb->Succs.begin(), E = SUb->Succs.end(); 621 I != E; ++I) 622 if (I->isNormalMemoryOrBarrier()) 623 iterateChainSucc(AA, MFI, DL, SUa, I->getSUnit(), ExitSU, Depth, Visited); 624 return *Depth; 625 } 626 627 /// This function assumes that "downward" from SU there exist 628 /// tail/leaf of already constructed DAG. It iterates downward and 629 /// checks whether SU can be aliasing any node dominated 630 /// by it. 631 static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI, 632 const DataLayout &DL, SUnit *SU, SUnit *ExitSU, 633 std::set<SUnit *> &CheckList, 634 unsigned LatencyToLoad) { 635 if (!SU) 636 return; 637 638 SmallPtrSet<const SUnit*, 16> Visited; 639 unsigned Depth = 0; 640 641 for (std::set<SUnit *>::iterator I = CheckList.begin(), IE = CheckList.end(); 642 I != IE; ++I) { 643 if (SU == *I) 644 continue; 645 if (MIsNeedChainEdge(AA, MFI, DL, SU->getInstr(), (*I)->getInstr())) { 646 SDep Dep(SU, SDep::MayAliasMem); 647 Dep.setLatency(((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0); 648 (*I)->addPred(Dep); 649 } 650 651 // Iterate recursively over all previously added memory chain 652 // successors. Keep track of visited nodes. 653 for (SUnit::const_succ_iterator J = (*I)->Succs.begin(), 654 JE = (*I)->Succs.end(); J != JE; ++J) 655 if (J->isNormalMemoryOrBarrier()) 656 iterateChainSucc(AA, MFI, DL, SU, J->getSUnit(), ExitSU, &Depth, 657 Visited); 658 } 659 } 660 661 /// Check whether two objects need a chain edge, if so, add it 662 /// otherwise remember the rejected SU. 663 static inline void addChainDependency(AliasAnalysis *AA, 664 const MachineFrameInfo *MFI, 665 const DataLayout &DL, SUnit *SUa, 666 SUnit *SUb, std::set<SUnit *> &RejectList, 667 unsigned TrueMemOrderLatency = 0, 668 bool isNormalMemory = false) { 669 // If this is a false dependency, 670 // do not add the edge, but rememeber the rejected node. 671 if (MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) { 672 SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier); 673 Dep.setLatency(TrueMemOrderLatency); 674 SUb->addPred(Dep); 675 } 676 else { 677 // Duplicate entries should be ignored. 678 RejectList.insert(SUb); 679 DEBUG(dbgs() << "\tReject chain dep between SU(" 680 << SUa->NodeNum << ") and SU(" 681 << SUb->NodeNum << ")\n"); 682 } 683 } 684 685 /// Create an SUnit for each real instruction, numbered in top-down toplological 686 /// order. The instruction order A < B, implies that no edge exists from B to A. 687 /// 688 /// Map each real instruction to its SUnit. 689 /// 690 /// After initSUnits, the SUnits vector cannot be resized and the scheduler may 691 /// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs 692 /// instead of pointers. 693 /// 694 /// MachineScheduler relies on initSUnits numbering the nodes by their order in 695 /// the original instruction list. 696 void ScheduleDAGInstrs::initSUnits() { 697 // We'll be allocating one SUnit for each real instruction in the region, 698 // which is contained within a basic block. 699 SUnits.reserve(NumRegionInstrs); 700 701 for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) { 702 MachineInstr *MI = I; 703 if (MI->isDebugValue()) 704 continue; 705 706 SUnit *SU = newSUnit(MI); 707 MISUnitMap[MI] = SU; 708 709 SU->isCall = MI->isCall(); 710 SU->isCommutable = MI->isCommutable(); 711 712 // Assign the Latency field of SU using target-provided information. 713 SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); 714 715 // If this SUnit uses a reserved or unbuffered resource, mark it as such. 716 // 717 // Reserved resources block an instruction from issuing and stall the 718 // entire pipeline. These are identified by BufferSize=0. 719 // 720 // Unbuffered resources prevent execution of subsequent instructions that 721 // require the same resources. This is used for in-order execution pipelines 722 // within an out-of-order core. These are identified by BufferSize=1. 723 if (SchedModel.hasInstrSchedModel()) { 724 const MCSchedClassDesc *SC = getSchedClass(SU); 725 for (TargetSchedModel::ProcResIter 726 PI = SchedModel.getWriteProcResBegin(SC), 727 PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) { 728 switch (SchedModel.getProcResource(PI->ProcResourceIdx)->BufferSize) { 729 case 0: 730 SU->hasReservedResource = true; 731 break; 732 case 1: 733 SU->isUnbuffered = true; 734 break; 735 default: 736 break; 737 } 738 } 739 } 740 } 741 } 742 743 /// If RegPressure is non-null, compute register pressure as a side effect. The 744 /// DAG builder is an efficient place to do it because it already visits 745 /// operands. 746 void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, 747 RegPressureTracker *RPTracker, 748 PressureDiffs *PDiffs) { 749 const TargetSubtargetInfo &ST = MF.getSubtarget(); 750 bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI 751 : ST.useAA(); 752 AliasAnalysis *AAForDep = UseAA ? AA : nullptr; 753 754 MISUnitMap.clear(); 755 ScheduleDAG::clearDAG(); 756 757 // Create an SUnit for each real instruction. 758 initSUnits(); 759 760 if (PDiffs) 761 PDiffs->init(SUnits.size()); 762 763 // We build scheduling units by walking a block's instruction list from bottom 764 // to top. 765 766 // Remember where a generic side-effecting instruction is as we procede. 767 SUnit *BarrierChain = nullptr, *AliasChain = nullptr; 768 769 // Memory references to specific known memory locations are tracked 770 // so that they can be given more precise dependencies. We track 771 // separately the known memory locations that may alias and those 772 // that are known not to alias 773 MapVector<ValueType, std::vector<SUnit *> > AliasMemDefs, NonAliasMemDefs; 774 MapVector<ValueType, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; 775 std::set<SUnit*> RejectMemNodes; 776 777 // Remove any stale debug info; sometimes BuildSchedGraph is called again 778 // without emitting the info from the previous call. 779 DbgValues.clear(); 780 FirstDbgValue = nullptr; 781 782 assert(Defs.empty() && Uses.empty() && 783 "Only BuildGraph should update Defs/Uses"); 784 Defs.setUniverse(TRI->getNumRegs()); 785 Uses.setUniverse(TRI->getNumRegs()); 786 787 assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs"); 788 VRegUses.clear(); 789 VRegDefs.setUniverse(MRI.getNumVirtRegs()); 790 VRegUses.setUniverse(MRI.getNumVirtRegs()); 791 792 // Model data dependencies between instructions being scheduled and the 793 // ExitSU. 794 addSchedBarrierDeps(); 795 796 // Walk the list of instructions, from bottom moving up. 797 MachineInstr *DbgMI = nullptr; 798 for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; 799 MII != MIE; --MII) { 800 MachineInstr *MI = std::prev(MII); 801 if (MI && DbgMI) { 802 DbgValues.push_back(std::make_pair(DbgMI, MI)); 803 DbgMI = nullptr; 804 } 805 806 if (MI->isDebugValue()) { 807 DbgMI = MI; 808 continue; 809 } 810 SUnit *SU = MISUnitMap[MI]; 811 assert(SU && "No SUnit mapped to this MI"); 812 813 if (RPTracker) { 814 PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : nullptr; 815 RPTracker->recede(/*LiveUses=*/nullptr, PDiff); 816 assert(RPTracker->getPos() == std::prev(MII) && 817 "RPTracker can't find MI"); 818 } 819 820 assert( 821 (CanHandleTerminators || (!MI->isTerminator() && !MI->isPosition())) && 822 "Cannot schedule terminators or labels!"); 823 824 // Add register-based dependencies (data, anti, and output). 825 bool HasVRegDef = false; 826 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 827 const MachineOperand &MO = MI->getOperand(j); 828 if (!MO.isReg()) continue; 829 unsigned Reg = MO.getReg(); 830 if (Reg == 0) continue; 831 832 if (TRI->isPhysicalRegister(Reg)) 833 addPhysRegDeps(SU, j); 834 else { 835 assert(!IsPostRA && "Virtual register encountered!"); 836 if (MO.isDef()) { 837 HasVRegDef = true; 838 addVRegDefDeps(SU, j); 839 } 840 else if (MO.readsReg()) // ignore undef operands 841 addVRegUseDeps(SU, j); 842 } 843 } 844 // If we haven't seen any uses in this scheduling region, create a 845 // dependence edge to ExitSU to model the live-out latency. This is required 846 // for vreg defs with no in-region use, and prefetches with no vreg def. 847 // 848 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This 849 // check currently relies on being called before adding chain deps. 850 if (SU->NumSuccs == 0 && SU->Latency > 1 851 && (HasVRegDef || MI->mayLoad())) { 852 SDep Dep(SU, SDep::Artificial); 853 Dep.setLatency(SU->Latency - 1); 854 ExitSU.addPred(Dep); 855 } 856 857 // Add chain dependencies. 858 // Chain dependencies used to enforce memory order should have 859 // latency of 0 (except for true dependency of Store followed by 860 // aliased Load... we estimate that with a single cycle of latency 861 // assuming the hardware will bypass) 862 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 863 // after stack slots are lowered to actual addresses. 864 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 865 // produce more precise dependence information. 866 unsigned TrueMemOrderLatency = MI->mayStore() ? 1 : 0; 867 if (isGlobalMemoryObject(AA, MI)) { 868 // Be conservative with these and add dependencies on all memory 869 // references, even those that are known to not alias. 870 for (MapVector<ValueType, std::vector<SUnit *> >::iterator I = 871 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { 872 for (unsigned i = 0, e = I->second.size(); i != e; ++i) { 873 I->second[i]->addPred(SDep(SU, SDep::Barrier)); 874 } 875 } 876 for (MapVector<ValueType, std::vector<SUnit *> >::iterator I = 877 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { 878 for (unsigned i = 0, e = I->second.size(); i != e; ++i) { 879 SDep Dep(SU, SDep::Barrier); 880 Dep.setLatency(TrueMemOrderLatency); 881 I->second[i]->addPred(Dep); 882 } 883 } 884 // Add SU to the barrier chain. 885 if (BarrierChain) 886 BarrierChain->addPred(SDep(SU, SDep::Barrier)); 887 BarrierChain = SU; 888 // This is a barrier event that acts as a pivotal node in the DAG, 889 // so it is safe to clear list of exposed nodes. 890 adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes, 891 TrueMemOrderLatency); 892 RejectMemNodes.clear(); 893 NonAliasMemDefs.clear(); 894 NonAliasMemUses.clear(); 895 896 // fall-through 897 new_alias_chain: 898 // Chain all possibly aliasing memory references through SU. 899 if (AliasChain) { 900 unsigned ChainLatency = 0; 901 if (AliasChain->getInstr()->mayLoad()) 902 ChainLatency = TrueMemOrderLatency; 903 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain, 904 RejectMemNodes, ChainLatency); 905 } 906 AliasChain = SU; 907 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 908 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, 909 PendingLoads[k], RejectMemNodes, 910 TrueMemOrderLatency); 911 for (MapVector<ValueType, std::vector<SUnit *> >::iterator I = 912 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) { 913 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 914 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, 915 I->second[i], RejectMemNodes); 916 } 917 for (MapVector<ValueType, std::vector<SUnit *> >::iterator I = 918 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { 919 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 920 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, 921 I->second[i], RejectMemNodes, TrueMemOrderLatency); 922 } 923 adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes, 924 TrueMemOrderLatency); 925 PendingLoads.clear(); 926 AliasMemDefs.clear(); 927 AliasMemUses.clear(); 928 } else if (MI->mayStore()) { 929 // Add dependence on barrier chain, if needed. 930 // There is no point to check aliasing on barrier event. Even if 931 // SU and barrier _could_ be reordered, they should not. In addition, 932 // we have lost all RejectMemNodes below barrier. 933 if (BarrierChain) 934 BarrierChain->addPred(SDep(SU, SDep::Barrier)); 935 936 UnderlyingObjectsVector Objs; 937 getUnderlyingObjectsForInstr(MI, MFI, Objs, *TM.getDataLayout()); 938 939 if (Objs.empty()) { 940 // Treat all other stores conservatively. 941 goto new_alias_chain; 942 } 943 944 bool MayAlias = false; 945 for (UnderlyingObjectsVector::iterator K = Objs.begin(), KE = Objs.end(); 946 K != KE; ++K) { 947 ValueType V = K->getPointer(); 948 bool ThisMayAlias = K->getInt(); 949 if (ThisMayAlias) 950 MayAlias = true; 951 952 // A store to a specific PseudoSourceValue. Add precise dependencies. 953 // Record the def in MemDefs, first adding a dep if there is 954 // an existing def. 955 MapVector<ValueType, std::vector<SUnit *> >::iterator I = 956 ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 957 MapVector<ValueType, std::vector<SUnit *> >::iterator IE = 958 ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 959 if (I != IE) { 960 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 961 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, 962 I->second[i], RejectMemNodes, 0, true); 963 964 // If we're not using AA, then we only need one store per object. 965 if (!AAForDep) 966 I->second.clear(); 967 I->second.push_back(SU); 968 } else { 969 if (ThisMayAlias) { 970 if (!AAForDep) 971 AliasMemDefs[V].clear(); 972 AliasMemDefs[V].push_back(SU); 973 } else { 974 if (!AAForDep) 975 NonAliasMemDefs[V].clear(); 976 NonAliasMemDefs[V].push_back(SU); 977 } 978 } 979 // Handle the uses in MemUses, if there are any. 980 MapVector<ValueType, std::vector<SUnit *> >::iterator J = 981 ((ThisMayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); 982 MapVector<ValueType, std::vector<SUnit *> >::iterator JE = 983 ((ThisMayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); 984 if (J != JE) { 985 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 986 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, 987 J->second[i], RejectMemNodes, 988 TrueMemOrderLatency, true); 989 J->second.clear(); 990 } 991 } 992 if (MayAlias) { 993 // Add dependencies from all the PendingLoads, i.e. loads 994 // with no underlying object. 995 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 996 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, 997 PendingLoads[k], RejectMemNodes, 998 TrueMemOrderLatency); 999 // Add dependence on alias chain, if needed. 1000 if (AliasChain) 1001 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain, 1002 RejectMemNodes); 1003 } 1004 adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes, 1005 TrueMemOrderLatency); 1006 } else if (MI->mayLoad()) { 1007 bool MayAlias = true; 1008 if (MI->isInvariantLoad(AA)) { 1009 // Invariant load, no chain dependencies needed! 1010 } else { 1011 UnderlyingObjectsVector Objs; 1012 getUnderlyingObjectsForInstr(MI, MFI, Objs, *TM.getDataLayout()); 1013 1014 if (Objs.empty()) { 1015 // A load with no underlying object. Depend on all 1016 // potentially aliasing stores. 1017 for (MapVector<ValueType, std::vector<SUnit *> >::iterator I = 1018 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) 1019 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 1020 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, 1021 I->second[i], RejectMemNodes); 1022 1023 PendingLoads.push_back(SU); 1024 MayAlias = true; 1025 } else { 1026 MayAlias = false; 1027 } 1028 1029 for (UnderlyingObjectsVector::iterator 1030 J = Objs.begin(), JE = Objs.end(); J != JE; ++J) { 1031 ValueType V = J->getPointer(); 1032 bool ThisMayAlias = J->getInt(); 1033 1034 if (ThisMayAlias) 1035 MayAlias = true; 1036 1037 // A load from a specific PseudoSourceValue. Add precise dependencies. 1038 MapVector<ValueType, std::vector<SUnit *> >::iterator I = 1039 ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 1040 MapVector<ValueType, std::vector<SUnit *> >::iterator IE = 1041 ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 1042 if (I != IE) 1043 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 1044 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, 1045 I->second[i], RejectMemNodes, 0, true); 1046 if (ThisMayAlias) 1047 AliasMemUses[V].push_back(SU); 1048 else 1049 NonAliasMemUses[V].push_back(SU); 1050 } 1051 if (MayAlias) 1052 adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, 1053 RejectMemNodes, /*Latency=*/0); 1054 // Add dependencies on alias and barrier chains, if needed. 1055 if (MayAlias && AliasChain) 1056 addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain, 1057 RejectMemNodes); 1058 if (BarrierChain) 1059 BarrierChain->addPred(SDep(SU, SDep::Barrier)); 1060 } 1061 } 1062 } 1063 if (DbgMI) 1064 FirstDbgValue = DbgMI; 1065 1066 Defs.clear(); 1067 Uses.clear(); 1068 VRegDefs.clear(); 1069 PendingLoads.clear(); 1070 } 1071 1072 /// \brief Initialize register live-range state for updating kills. 1073 void ScheduleDAGInstrs::startBlockForKills(MachineBasicBlock *BB) { 1074 // Start with no live registers. 1075 LiveRegs.reset(); 1076 1077 // Examine the live-in regs of all successors. 1078 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 1079 SE = BB->succ_end(); SI != SE; ++SI) { 1080 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 1081 E = (*SI)->livein_end(); I != E; ++I) { 1082 unsigned Reg = *I; 1083 // Repeat, for reg and all subregs. 1084 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); 1085 SubRegs.isValid(); ++SubRegs) 1086 LiveRegs.set(*SubRegs); 1087 } 1088 } 1089 } 1090 1091 /// \brief If we change a kill flag on the bundle instruction implicit register 1092 /// operands, then we also need to propagate that to any instructions inside 1093 /// the bundle which had the same kill state. 1094 static void toggleBundleKillFlag(MachineInstr *MI, unsigned Reg, 1095 bool NewKillState) { 1096 if (MI->getOpcode() != TargetOpcode::BUNDLE) 1097 return; 1098 1099 // Walk backwards from the last instruction in the bundle to the first. 1100 // Once we set a kill flag on an instruction, we bail out, as otherwise we 1101 // might set it on too many operands. We will clear as many flags as we 1102 // can though. 1103 MachineBasicBlock::instr_iterator Begin = MI; 1104 MachineBasicBlock::instr_iterator End = getBundleEnd(MI); 1105 while (Begin != End) { 1106 for (MIOperands MO(--End); MO.isValid(); ++MO) { 1107 if (!MO->isReg() || MO->isDef() || Reg != MO->getReg()) 1108 continue; 1109 1110 // If the register has the internal flag then it could be killing an 1111 // internal def of the register. In this case, just skip. We only want 1112 // to toggle the flag on operands visible outside the bundle. 1113 if (MO->isInternalRead()) 1114 continue; 1115 1116 if (MO->isKill() == NewKillState) 1117 continue; 1118 MO->setIsKill(NewKillState); 1119 if (NewKillState) 1120 return; 1121 } 1122 } 1123 } 1124 1125 bool ScheduleDAGInstrs::toggleKillFlag(MachineInstr *MI, MachineOperand &MO) { 1126 // Setting kill flag... 1127 if (!MO.isKill()) { 1128 MO.setIsKill(true); 1129 toggleBundleKillFlag(MI, MO.getReg(), true); 1130 return false; 1131 } 1132 1133 // If MO itself is live, clear the kill flag... 1134 if (LiveRegs.test(MO.getReg())) { 1135 MO.setIsKill(false); 1136 toggleBundleKillFlag(MI, MO.getReg(), false); 1137 return false; 1138 } 1139 1140 // If any subreg of MO is live, then create an imp-def for that 1141 // subreg and keep MO marked as killed. 1142 MO.setIsKill(false); 1143 toggleBundleKillFlag(MI, MO.getReg(), false); 1144 bool AllDead = true; 1145 const unsigned SuperReg = MO.getReg(); 1146 MachineInstrBuilder MIB(MF, MI); 1147 for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) { 1148 if (LiveRegs.test(*SubRegs)) { 1149 MIB.addReg(*SubRegs, RegState::ImplicitDefine); 1150 AllDead = false; 1151 } 1152 } 1153 1154 if(AllDead) { 1155 MO.setIsKill(true); 1156 toggleBundleKillFlag(MI, MO.getReg(), true); 1157 } 1158 return false; 1159 } 1160 1161 // FIXME: Reuse the LivePhysRegs utility for this. 1162 void ScheduleDAGInstrs::fixupKills(MachineBasicBlock *MBB) { 1163 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 1164 1165 LiveRegs.resize(TRI->getNumRegs()); 1166 BitVector killedRegs(TRI->getNumRegs()); 1167 1168 startBlockForKills(MBB); 1169 1170 // Examine block from end to start... 1171 unsigned Count = MBB->size(); 1172 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 1173 I != E; --Count) { 1174 MachineInstr *MI = --I; 1175 if (MI->isDebugValue()) 1176 continue; 1177 1178 // Update liveness. Registers that are defed but not used in this 1179 // instruction are now dead. Mark register and all subregs as they 1180 // are completely defined. 1181 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1182 MachineOperand &MO = MI->getOperand(i); 1183 if (MO.isRegMask()) 1184 LiveRegs.clearBitsNotInMask(MO.getRegMask()); 1185 if (!MO.isReg()) continue; 1186 unsigned Reg = MO.getReg(); 1187 if (Reg == 0) continue; 1188 if (!MO.isDef()) continue; 1189 // Ignore two-addr defs. 1190 if (MI->isRegTiedToUseOperand(i)) continue; 1191 1192 // Repeat for reg and all subregs. 1193 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); 1194 SubRegs.isValid(); ++SubRegs) 1195 LiveRegs.reset(*SubRegs); 1196 } 1197 1198 // Examine all used registers and set/clear kill flag. When a 1199 // register is used multiple times we only set the kill flag on 1200 // the first use. Don't set kill flags on undef operands. 1201 killedRegs.reset(); 1202 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1203 MachineOperand &MO = MI->getOperand(i); 1204 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 1205 unsigned Reg = MO.getReg(); 1206 if ((Reg == 0) || MRI.isReserved(Reg)) continue; 1207 1208 bool kill = false; 1209 if (!killedRegs.test(Reg)) { 1210 kill = true; 1211 // A register is not killed if any subregs are live... 1212 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) { 1213 if (LiveRegs.test(*SubRegs)) { 1214 kill = false; 1215 break; 1216 } 1217 } 1218 1219 // If subreg is not live, then register is killed if it became 1220 // live in this instruction 1221 if (kill) 1222 kill = !LiveRegs.test(Reg); 1223 } 1224 1225 if (MO.isKill() != kill) { 1226 DEBUG(dbgs() << "Fixing " << MO << " in "); 1227 // Warning: toggleKillFlag may invalidate MO. 1228 toggleKillFlag(MI, MO); 1229 DEBUG(MI->dump()); 1230 DEBUG(if (MI->getOpcode() == TargetOpcode::BUNDLE) { 1231 MachineBasicBlock::instr_iterator Begin = MI; 1232 MachineBasicBlock::instr_iterator End = getBundleEnd(MI); 1233 while (++Begin != End) 1234 DEBUG(Begin->dump()); 1235 }); 1236 } 1237 1238 killedRegs.set(Reg); 1239 } 1240 1241 // Mark any used register (that is not using undef) and subregs as 1242 // now live... 1243 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1244 MachineOperand &MO = MI->getOperand(i); 1245 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 1246 unsigned Reg = MO.getReg(); 1247 if ((Reg == 0) || MRI.isReserved(Reg)) continue; 1248 1249 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); 1250 SubRegs.isValid(); ++SubRegs) 1251 LiveRegs.set(*SubRegs); 1252 } 1253 } 1254 } 1255 1256 void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 1257 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1258 SU->getInstr()->dump(); 1259 #endif 1260 } 1261 1262 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 1263 std::string s; 1264 raw_string_ostream oss(s); 1265 if (SU == &EntrySU) 1266 oss << "<entry>"; 1267 else if (SU == &ExitSU) 1268 oss << "<exit>"; 1269 else 1270 SU->getInstr()->print(oss, /*SkipOpers=*/true); 1271 return oss.str(); 1272 } 1273 1274 /// Return the basic block label. It is not necessarilly unique because a block 1275 /// contains multiple scheduling regions. But it is fine for visualization. 1276 std::string ScheduleDAGInstrs::getDAGName() const { 1277 return "dag." + BB->getFullName(); 1278 } 1279 1280 //===----------------------------------------------------------------------===// 1281 // SchedDFSResult Implementation 1282 //===----------------------------------------------------------------------===// 1283 1284 namespace llvm { 1285 /// \brief Internal state used to compute SchedDFSResult. 1286 class SchedDFSImpl { 1287 SchedDFSResult &R; 1288 1289 /// Join DAG nodes into equivalence classes by their subtree. 1290 IntEqClasses SubtreeClasses; 1291 /// List PredSU, SuccSU pairs that represent data edges between subtrees. 1292 std::vector<std::pair<const SUnit*, const SUnit*> > ConnectionPairs; 1293 1294 struct RootData { 1295 unsigned NodeID; 1296 unsigned ParentNodeID; // Parent node (member of the parent subtree). 1297 unsigned SubInstrCount; // Instr count in this tree only, not children. 1298 1299 RootData(unsigned id): NodeID(id), 1300 ParentNodeID(SchedDFSResult::InvalidSubtreeID), 1301 SubInstrCount(0) {} 1302 1303 unsigned getSparseSetIndex() const { return NodeID; } 1304 }; 1305 1306 SparseSet<RootData> RootSet; 1307 1308 public: 1309 SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) { 1310 RootSet.setUniverse(R.DFSNodeData.size()); 1311 } 1312 1313 /// Return true if this node been visited by the DFS traversal. 1314 /// 1315 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node 1316 /// ID. Later, SubtreeID is updated but remains valid. 1317 bool isVisited(const SUnit *SU) const { 1318 return R.DFSNodeData[SU->NodeNum].SubtreeID 1319 != SchedDFSResult::InvalidSubtreeID; 1320 } 1321 1322 /// Initialize this node's instruction count. We don't need to flag the node 1323 /// visited until visitPostorder because the DAG cannot have cycles. 1324 void visitPreorder(const SUnit *SU) { 1325 R.DFSNodeData[SU->NodeNum].InstrCount = 1326 SU->getInstr()->isTransient() ? 0 : 1; 1327 } 1328 1329 /// Called once for each node after all predecessors are visited. Revisit this 1330 /// node's predecessors and potentially join them now that we know the ILP of 1331 /// the other predecessors. 1332 void visitPostorderNode(const SUnit *SU) { 1333 // Mark this node as the root of a subtree. It may be joined with its 1334 // successors later. 1335 R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum; 1336 RootData RData(SU->NodeNum); 1337 RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1; 1338 1339 // If any predecessors are still in their own subtree, they either cannot be 1340 // joined or are large enough to remain separate. If this parent node's 1341 // total instruction count is not greater than a child subtree by at least 1342 // the subtree limit, then try to join it now since splitting subtrees is 1343 // only useful if multiple high-pressure paths are possible. 1344 unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount; 1345 for (SUnit::const_pred_iterator 1346 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 1347 if (PI->getKind() != SDep::Data) 1348 continue; 1349 unsigned PredNum = PI->getSUnit()->NodeNum; 1350 if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit) 1351 joinPredSubtree(*PI, SU, /*CheckLimit=*/false); 1352 1353 // Either link or merge the TreeData entry from the child to the parent. 1354 if (R.DFSNodeData[PredNum].SubtreeID == PredNum) { 1355 // If the predecessor's parent is invalid, this is a tree edge and the 1356 // current node is the parent. 1357 if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID) 1358 RootSet[PredNum].ParentNodeID = SU->NodeNum; 1359 } 1360 else if (RootSet.count(PredNum)) { 1361 // The predecessor is not a root, but is still in the root set. This 1362 // must be the new parent that it was just joined to. Note that 1363 // RootSet[PredNum].ParentNodeID may either be invalid or may still be 1364 // set to the original parent. 1365 RData.SubInstrCount += RootSet[PredNum].SubInstrCount; 1366 RootSet.erase(PredNum); 1367 } 1368 } 1369 RootSet[SU->NodeNum] = RData; 1370 } 1371 1372 /// Called once for each tree edge after calling visitPostOrderNode on the 1373 /// predecessor. Increment the parent node's instruction count and 1374 /// preemptively join this subtree to its parent's if it is small enough. 1375 void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { 1376 R.DFSNodeData[Succ->NodeNum].InstrCount 1377 += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount; 1378 joinPredSubtree(PredDep, Succ); 1379 } 1380 1381 /// Add a connection for cross edges. 1382 void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) { 1383 ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ)); 1384 } 1385 1386 /// Set each node's subtree ID to the representative ID and record connections 1387 /// between trees. 1388 void finalize() { 1389 SubtreeClasses.compress(); 1390 R.DFSTreeData.resize(SubtreeClasses.getNumClasses()); 1391 assert(SubtreeClasses.getNumClasses() == RootSet.size() 1392 && "number of roots should match trees"); 1393 for (SparseSet<RootData>::const_iterator 1394 RI = RootSet.begin(), RE = RootSet.end(); RI != RE; ++RI) { 1395 unsigned TreeID = SubtreeClasses[RI->NodeID]; 1396 if (RI->ParentNodeID != SchedDFSResult::InvalidSubtreeID) 1397 R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[RI->ParentNodeID]; 1398 R.DFSTreeData[TreeID].SubInstrCount = RI->SubInstrCount; 1399 // Note that SubInstrCount may be greater than InstrCount if we joined 1400 // subtrees across a cross edge. InstrCount will be attributed to the 1401 // original parent, while SubInstrCount will be attributed to the joined 1402 // parent. 1403 } 1404 R.SubtreeConnections.resize(SubtreeClasses.getNumClasses()); 1405 R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses()); 1406 DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n"); 1407 for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) { 1408 R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx]; 1409 DEBUG(dbgs() << " SU(" << Idx << ") in tree " 1410 << R.DFSNodeData[Idx].SubtreeID << '\n'); 1411 } 1412 for (std::vector<std::pair<const SUnit*, const SUnit*> >::const_iterator 1413 I = ConnectionPairs.begin(), E = ConnectionPairs.end(); 1414 I != E; ++I) { 1415 unsigned PredTree = SubtreeClasses[I->first->NodeNum]; 1416 unsigned SuccTree = SubtreeClasses[I->second->NodeNum]; 1417 if (PredTree == SuccTree) 1418 continue; 1419 unsigned Depth = I->first->getDepth(); 1420 addConnection(PredTree, SuccTree, Depth); 1421 addConnection(SuccTree, PredTree, Depth); 1422 } 1423 } 1424 1425 protected: 1426 /// Join the predecessor subtree with the successor that is its DFS 1427 /// parent. Apply some heuristics before joining. 1428 bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ, 1429 bool CheckLimit = true) { 1430 assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges"); 1431 1432 // Check if the predecessor is already joined. 1433 const SUnit *PredSU = PredDep.getSUnit(); 1434 unsigned PredNum = PredSU->NodeNum; 1435 if (R.DFSNodeData[PredNum].SubtreeID != PredNum) 1436 return false; 1437 1438 // Four is the magic number of successors before a node is considered a 1439 // pinch point. 1440 unsigned NumDataSucs = 0; 1441 for (SUnit::const_succ_iterator SI = PredSU->Succs.begin(), 1442 SE = PredSU->Succs.end(); SI != SE; ++SI) { 1443 if (SI->getKind() == SDep::Data) { 1444 if (++NumDataSucs >= 4) 1445 return false; 1446 } 1447 } 1448 if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit) 1449 return false; 1450 R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum; 1451 SubtreeClasses.join(Succ->NodeNum, PredNum); 1452 return true; 1453 } 1454 1455 /// Called by finalize() to record a connection between trees. 1456 void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) { 1457 if (!Depth) 1458 return; 1459 1460 do { 1461 SmallVectorImpl<SchedDFSResult::Connection> &Connections = 1462 R.SubtreeConnections[FromTree]; 1463 for (SmallVectorImpl<SchedDFSResult::Connection>::iterator 1464 I = Connections.begin(), E = Connections.end(); I != E; ++I) { 1465 if (I->TreeID == ToTree) { 1466 I->Level = std::max(I->Level, Depth); 1467 return; 1468 } 1469 } 1470 Connections.push_back(SchedDFSResult::Connection(ToTree, Depth)); 1471 FromTree = R.DFSTreeData[FromTree].ParentTreeID; 1472 } while (FromTree != SchedDFSResult::InvalidSubtreeID); 1473 } 1474 }; 1475 } // namespace llvm 1476 1477 namespace { 1478 /// \brief Manage the stack used by a reverse depth-first search over the DAG. 1479 class SchedDAGReverseDFS { 1480 std::vector<std::pair<const SUnit*, SUnit::const_pred_iterator> > DFSStack; 1481 public: 1482 bool isComplete() const { return DFSStack.empty(); } 1483 1484 void follow(const SUnit *SU) { 1485 DFSStack.push_back(std::make_pair(SU, SU->Preds.begin())); 1486 } 1487 void advance() { ++DFSStack.back().second; } 1488 1489 const SDep *backtrack() { 1490 DFSStack.pop_back(); 1491 return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second); 1492 } 1493 1494 const SUnit *getCurr() const { return DFSStack.back().first; } 1495 1496 SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; } 1497 1498 SUnit::const_pred_iterator getPredEnd() const { 1499 return getCurr()->Preds.end(); 1500 } 1501 }; 1502 } // anonymous 1503 1504 static bool hasDataSucc(const SUnit *SU) { 1505 for (SUnit::const_succ_iterator 1506 SI = SU->Succs.begin(), SE = SU->Succs.end(); SI != SE; ++SI) { 1507 if (SI->getKind() == SDep::Data && !SI->getSUnit()->isBoundaryNode()) 1508 return true; 1509 } 1510 return false; 1511 } 1512 1513 /// Compute an ILP metric for all nodes in the subDAG reachable via depth-first 1514 /// search from this root. 1515 void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) { 1516 if (!IsBottomUp) 1517 llvm_unreachable("Top-down ILP metric is unimplemnted"); 1518 1519 SchedDFSImpl Impl(*this); 1520 for (ArrayRef<SUnit>::const_iterator 1521 SI = SUnits.begin(), SE = SUnits.end(); SI != SE; ++SI) { 1522 const SUnit *SU = &*SI; 1523 if (Impl.isVisited(SU) || hasDataSucc(SU)) 1524 continue; 1525 1526 SchedDAGReverseDFS DFS; 1527 Impl.visitPreorder(SU); 1528 DFS.follow(SU); 1529 for (;;) { 1530 // Traverse the leftmost path as far as possible. 1531 while (DFS.getPred() != DFS.getPredEnd()) { 1532 const SDep &PredDep = *DFS.getPred(); 1533 DFS.advance(); 1534 // Ignore non-data edges. 1535 if (PredDep.getKind() != SDep::Data 1536 || PredDep.getSUnit()->isBoundaryNode()) { 1537 continue; 1538 } 1539 // An already visited edge is a cross edge, assuming an acyclic DAG. 1540 if (Impl.isVisited(PredDep.getSUnit())) { 1541 Impl.visitCrossEdge(PredDep, DFS.getCurr()); 1542 continue; 1543 } 1544 Impl.visitPreorder(PredDep.getSUnit()); 1545 DFS.follow(PredDep.getSUnit()); 1546 } 1547 // Visit the top of the stack in postorder and backtrack. 1548 const SUnit *Child = DFS.getCurr(); 1549 const SDep *PredDep = DFS.backtrack(); 1550 Impl.visitPostorderNode(Child); 1551 if (PredDep) 1552 Impl.visitPostorderEdge(*PredDep, DFS.getCurr()); 1553 if (DFS.isComplete()) 1554 break; 1555 } 1556 } 1557 Impl.finalize(); 1558 } 1559 1560 /// The root of the given SubtreeID was just scheduled. For all subtrees 1561 /// connected to this tree, record the depth of the connection so that the 1562 /// nearest connected subtrees can be prioritized. 1563 void SchedDFSResult::scheduleTree(unsigned SubtreeID) { 1564 for (SmallVectorImpl<Connection>::const_iterator 1565 I = SubtreeConnections[SubtreeID].begin(), 1566 E = SubtreeConnections[SubtreeID].end(); I != E; ++I) { 1567 SubtreeConnectLevels[I->TreeID] = 1568 std::max(SubtreeConnectLevels[I->TreeID], I->Level); 1569 DEBUG(dbgs() << " Tree: " << I->TreeID 1570 << " @" << SubtreeConnectLevels[I->TreeID] << '\n'); 1571 } 1572 } 1573 1574 LLVM_DUMP_METHOD 1575 void ILPValue::print(raw_ostream &OS) const { 1576 OS << InstrCount << " / " << Length << " = "; 1577 if (!Length) 1578 OS << "BADILP"; 1579 else 1580 OS << format("%g", ((double)InstrCount / Length)); 1581 } 1582 1583 LLVM_DUMP_METHOD 1584 void ILPValue::dump() const { 1585 dbgs() << *this << '\n'; 1586 } 1587 1588 namespace llvm { 1589 1590 LLVM_DUMP_METHOD 1591 raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) { 1592 Val.print(OS); 1593 return OS; 1594 } 1595 1596 } // namespace llvm 1597