1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling 11 // of MachineInstrs. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "sched-instrs" 16 #include "ScheduleDAGInstrs.h" 17 #include "llvm/Operator.h" 18 #include "llvm/Analysis/AliasAnalysis.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/CodeGen/MachineFunctionPass.h" 21 #include "llvm/CodeGen/MachineMemOperand.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/CodeGen/PseudoSourceValue.h" 24 #include "llvm/MC/MCInstrItineraries.h" 25 #include "llvm/Target/TargetMachine.h" 26 #include "llvm/Target/TargetInstrInfo.h" 27 #include "llvm/Target/TargetRegisterInfo.h" 28 #include "llvm/Target/TargetSubtargetInfo.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/ADT/SmallSet.h" 32 using namespace llvm; 33 34 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 35 const MachineLoopInfo &mli, 36 const MachineDominatorTree &mdt, 37 bool IsPostRAFlag) 38 : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), 39 InstrItins(mf.getTarget().getInstrItineraryData()), IsPostRA(IsPostRAFlag), 40 UnitLatencies(false), Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()), 41 LoopRegs(MLI, MDT), FirstDbgValue(0) { 42 DbgValues.clear(); 43 assert(!(IsPostRA && MF.getRegInfo().getNumVirtRegs()) && 44 "Virtual registers must be removed prior to PostRA scheduling"); 45 } 46 47 /// Run - perform scheduling. 48 /// 49 void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 50 MachineBasicBlock::iterator begin, 51 MachineBasicBlock::iterator end, 52 unsigned endcount) { 53 BB = bb; 54 Begin = begin; 55 InsertPosIndex = endcount; 56 57 // Check to see if the scheduler cares about latencies. 58 UnitLatencies = ForceUnitLatencies(); 59 60 ScheduleDAG::Run(bb, end); 61 } 62 63 /// getUnderlyingObjectFromInt - This is the function that does the work of 64 /// looking through basic ptrtoint+arithmetic+inttoptr sequences. 65 static const Value *getUnderlyingObjectFromInt(const Value *V) { 66 do { 67 if (const Operator *U = dyn_cast<Operator>(V)) { 68 // If we find a ptrtoint, we can transfer control back to the 69 // regular getUnderlyingObjectFromInt. 70 if (U->getOpcode() == Instruction::PtrToInt) 71 return U->getOperand(0); 72 // If we find an add of a constant or a multiplied value, it's 73 // likely that the other operand will lead us to the base 74 // object. We don't have to worry about the case where the 75 // object address is somehow being computed by the multiply, 76 // because our callers only care when the result is an 77 // identifibale object. 78 if (U->getOpcode() != Instruction::Add || 79 (!isa<ConstantInt>(U->getOperand(1)) && 80 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 81 return V; 82 V = U->getOperand(0); 83 } else { 84 return V; 85 } 86 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 87 } while (1); 88 } 89 90 /// getUnderlyingObject - This is a wrapper around GetUnderlyingObject 91 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 92 static const Value *getUnderlyingObject(const Value *V) { 93 // First just call Value::getUnderlyingObject to let it do what it does. 94 do { 95 V = GetUnderlyingObject(V); 96 // If it found an inttoptr, use special code to continue climing. 97 if (Operator::getOpcode(V) != Instruction::IntToPtr) 98 break; 99 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 100 // If that succeeded in finding a pointer, continue the search. 101 if (!O->getType()->isPointerTy()) 102 break; 103 V = O; 104 } while (1); 105 return V; 106 } 107 108 /// getUnderlyingObjectForInstr - If this machine instr has memory reference 109 /// information and it can be tracked to a normal reference to a known 110 /// object, return the Value for that object. Otherwise return null. 111 static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, 112 const MachineFrameInfo *MFI, 113 bool &MayAlias) { 114 MayAlias = true; 115 if (!MI->hasOneMemOperand() || 116 !(*MI->memoperands_begin())->getValue() || 117 (*MI->memoperands_begin())->isVolatile()) 118 return 0; 119 120 const Value *V = (*MI->memoperands_begin())->getValue(); 121 if (!V) 122 return 0; 123 124 V = getUnderlyingObject(V); 125 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 126 // For now, ignore PseudoSourceValues which may alias LLVM IR values 127 // because the code that uses this function has no way to cope with 128 // such aliases. 129 if (PSV->isAliased(MFI)) 130 return 0; 131 132 MayAlias = PSV->mayAlias(MFI); 133 return V; 134 } 135 136 if (isIdentifiedObject(V)) 137 return V; 138 139 return 0; 140 } 141 142 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 143 LoopRegs.Deps.clear(); 144 if (MachineLoop *ML = MLI.getLoopFor(BB)) 145 if (BB == ML->getLoopLatch()) 146 LoopRegs.VisitLoop(ML); 147 } 148 149 /// AddSchedBarrierDeps - Add dependencies from instructions in the current 150 /// list of instructions being scheduled to scheduling barrier by adding 151 /// the exit SU to the register defs and use list. This is because we want to 152 /// make sure instructions which define registers that are either used by 153 /// the terminator or are live-out are properly scheduled. This is 154 /// especially important when the definition latency of the return value(s) 155 /// are too high to be hidden by the branch or when the liveout registers 156 /// used by instructions in the fallthrough block. 157 void ScheduleDAGInstrs::AddSchedBarrierDeps() { 158 MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0; 159 ExitSU.setInstr(ExitMI); 160 bool AllDepKnown = ExitMI && 161 (ExitMI->isCall() || ExitMI->isBarrier()); 162 if (ExitMI && AllDepKnown) { 163 // If it's a call or a barrier, add dependencies on the defs and uses of 164 // instruction. 165 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) { 166 const MachineOperand &MO = ExitMI->getOperand(i); 167 if (!MO.isReg() || MO.isDef()) continue; 168 unsigned Reg = MO.getReg(); 169 if (Reg == 0) continue; 170 171 if (TRI->isPhysicalRegister(Reg)) 172 Uses[Reg].push_back(&ExitSU); 173 else 174 assert(!IsPostRA && "Virtual register encountered after regalloc."); 175 } 176 } else { 177 // For others, e.g. fallthrough, conditional branch, assume the exit 178 // uses all the registers that are livein to the successor blocks. 179 SmallSet<unsigned, 8> Seen; 180 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 181 SE = BB->succ_end(); SI != SE; ++SI) 182 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 183 E = (*SI)->livein_end(); I != E; ++I) { 184 unsigned Reg = *I; 185 if (Seen.insert(Reg)) 186 Uses[Reg].push_back(&ExitSU); 187 } 188 } 189 } 190 191 /// addPhysRegDeps - Add register dependencies (data, anti, and output) from 192 /// this SUnit to following instructions in the same scheduling region that 193 /// depend the physical register referenced at OperIdx. 194 void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { 195 const MachineInstr *MI = SU->getInstr(); 196 const MachineOperand &MO = MI->getOperand(OperIdx); 197 unsigned Reg = MO.getReg(); 198 199 // Ask the target if address-backscheduling is desirable, and if so how much. 200 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 201 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); 202 203 // Optionally add output and anti dependencies. For anti 204 // dependencies we use a latency of 0 because for a multi-issue 205 // target we want to allow the defining instruction to issue 206 // in the same cycle as the using instruction. 207 // TODO: Using a latency of 1 here for output dependencies assumes 208 // there's no cost for reusing registers. 209 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 210 for (const unsigned *Alias = TRI->getOverlaps(Reg); *Alias; ++Alias) { 211 std::vector<SUnit *> &DefList = Defs[*Alias]; 212 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 213 SUnit *DefSU = DefList[i]; 214 if (DefSU == &ExitSU) 215 continue; 216 if (DefSU != SU && 217 (Kind != SDep::Output || !MO.isDead() || 218 !DefSU->getInstr()->registerDefIsDead(*Alias))) { 219 if (Kind == SDep::Anti) 220 DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias)); 221 else { 222 unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx, 223 DefSU->getInstr()); 224 DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias)); 225 } 226 } 227 } 228 } 229 230 // Retrieve the UseList to add data dependencies and update uses. 231 std::vector<SUnit *> &UseList = Uses[Reg]; 232 if (MO.isDef()) { 233 // Update DefList. Defs are pushed in the order they are visited and 234 // never reordered. 235 std::vector<SUnit *> &DefList = Defs[Reg]; 236 237 // Add any data dependencies. 238 unsigned DataLatency = SU->Latency; 239 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 240 SUnit *UseSU = UseList[i]; 241 if (UseSU == SU) 242 continue; 243 unsigned LDataLatency = DataLatency; 244 // Optionally add in a special extra latency for nodes that 245 // feed addresses. 246 // TODO: Do this for register aliases too. 247 // TODO: Perhaps we should get rid of 248 // SpecialAddressLatency and just move this into 249 // adjustSchedDependency for the targets that care about it. 250 if (SpecialAddressLatency != 0 && !UnitLatencies && 251 UseSU != &ExitSU) { 252 MachineInstr *UseMI = UseSU->getInstr(); 253 const MCInstrDesc &UseMCID = UseMI->getDesc(); 254 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 255 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 256 if (RegUseIndex >= 0 && 257 (UseMI->mayLoad() || UseMI->mayStore()) && 258 (unsigned)RegUseIndex < UseMCID.getNumOperands() && 259 UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 260 LDataLatency += SpecialAddressLatency; 261 } 262 // Adjust the dependence latency using operand def/use 263 // information (if any), and then allow the target to 264 // perform its own adjustments. 265 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); 266 if (!UnitLatencies) { 267 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 268 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 269 } 270 UseSU->addPred(dep); 271 } 272 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 273 std::vector<SUnit *> &UseList = Uses[*Alias]; 274 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 275 SUnit *UseSU = UseList[i]; 276 if (UseSU == SU) 277 continue; 278 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); 279 if (!UnitLatencies) { 280 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 281 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 282 } 283 UseSU->addPred(dep); 284 } 285 } 286 287 // If a def is going to wrap back around to the top of the loop, 288 // backschedule it. 289 if (!UnitLatencies && DefList.empty()) { 290 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 291 if (I != LoopRegs.Deps.end()) { 292 const MachineOperand *UseMO = I->second.first; 293 unsigned Count = I->second.second; 294 const MachineInstr *UseMI = UseMO->getParent(); 295 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 296 const MCInstrDesc &UseMCID = UseMI->getDesc(); 297 // TODO: If we knew the total depth of the region here, we could 298 // handle the case where the whole loop is inside the region but 299 // is large enough that the isScheduleHigh trick isn't needed. 300 if (UseMOIdx < UseMCID.getNumOperands()) { 301 // Currently, we only support scheduling regions consisting of 302 // single basic blocks. Check to see if the instruction is in 303 // the same region by checking to see if it has the same parent. 304 if (UseMI->getParent() != MI->getParent()) { 305 unsigned Latency = SU->Latency; 306 if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 307 Latency += SpecialAddressLatency; 308 // This is a wild guess as to the portion of the latency which 309 // will be overlapped by work done outside the current 310 // scheduling region. 311 Latency -= std::min(Latency, Count); 312 // Add the artificial edge. 313 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 314 /*Reg=*/0, /*isNormalMemory=*/false, 315 /*isMustAlias=*/false, 316 /*isArtificial=*/true)); 317 } else if (SpecialAddressLatency > 0 && 318 UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 319 // The entire loop body is within the current scheduling region 320 // and the latency of this operation is assumed to be greater 321 // than the latency of the loop. 322 // TODO: Recursively mark data-edge predecessors as 323 // isScheduleHigh too. 324 SU->isScheduleHigh = true; 325 } 326 } 327 LoopRegs.Deps.erase(I); 328 } 329 } 330 331 UseList.clear(); 332 if (!MO.isDead()) 333 DefList.clear(); 334 335 // Calls will not be reordered because of chain dependencies (see 336 // below). Since call operands are dead, calls may continue to be added 337 // to the DefList making dependence checking quadratic in the size of 338 // the block. Instead, we leave only one call at the back of the 339 // DefList. 340 if (SU->isCall) { 341 while (!DefList.empty() && DefList.back()->isCall) 342 DefList.pop_back(); 343 } 344 DefList.push_back(SU); 345 } else { 346 UseList.push_back(SU); 347 } 348 } 349 350 /// addVRegDefDeps - Add register output and data dependencies from this SUnit 351 /// to instructions that occur later in the same scheduling region if they read 352 /// from or write to the virtual register defined at OperIdx. 353 /// 354 /// TODO: Hoist loop induction variable increments. This has to be 355 /// reevaluated. Generally, IV scheduling should be done before coalescing. 356 void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { 357 const MachineInstr *MI = SU->getInstr(); 358 unsigned Reg = MI->getOperand(OperIdx).getReg(); 359 360 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 361 362 // Add output dependence to the next nearest def of this vreg. 363 // 364 // Unless this definition is dead, the output dependence should be 365 // transitively redundant with antidependencies from this definition's 366 // uses. We're conservative for now until we have a way to guarantee the uses 367 // are not eliminated sometime during scheduling. The output dependence edge 368 // is also useful if output latency exceeds def-use latency. 369 SUnit *DefSU = VRegDefs[Reg]; 370 if (DefSU && DefSU != SU && DefSU != &ExitSU) { 371 unsigned OutLatency = TII->getOutputLatency(InstrItins, MI, OperIdx, 372 DefSU->getInstr()); 373 DefSU->addPred(SDep(SU, SDep::Output, OutLatency, Reg)); 374 } 375 VRegDefs[Reg] = SU; 376 377 // Add data dependence to any uses of this vreg before the next nearest def. 378 // 379 // TODO: Handle ExitSU properly. 380 // 381 // TODO: Data dependence could be handled more efficiently at the use-side. 382 std::vector<SUnit*> &UseList = VRegUses[Reg]; 383 for (std::vector<SUnit*>::const_iterator UI = UseList.begin(), 384 UE = UseList.end(); UI != UE; ++UI) { 385 SUnit *UseSU = *UI; 386 if (UseSU == SU) continue; 387 388 // TODO: Handle "special" address latencies cleanly. 389 const SDep& dep = SDep(SU, SDep::Data, SU->Latency, Reg); 390 if (!UnitLatencies) { 391 // Adjust the dependence latency using operand def/use information, then 392 // allow the target to perform its own adjustments. 393 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 394 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 395 } 396 UseSU->addPred(dep); 397 } 398 UseList.clear(); 399 } 400 401 /// addVRegUseDeps - Add register antidependencies from this SUnit to 402 /// instructions that occur later in the same scheduling region if they 403 /// write the virtual register referenced at OperIdx. 404 void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { 405 unsigned Reg = SU->getInstr()->getOperand(OperIdx).getReg(); 406 407 // Add antidependence to the following def of the vreg it uses. 408 SUnit *DefSU = VRegDefs[Reg]; 409 if (DefSU && DefSU != SU) 410 DefSU->addPred(SDep(SU, SDep::Anti, 0, Reg)); 411 412 // Add this SUnit to the use list of the vreg it uses. 413 // 414 // TODO: pinch the DAG before we see too many uses to avoid quadratic 415 // behavior. Limiting the scheduling window can accomplish the same thing. 416 VRegUses[Reg].push_back(SU); 417 } 418 419 void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { 420 // We'll be allocating one SUnit for each instruction, plus one for 421 // the region exit node. 422 SUnits.reserve(BB->size()); 423 424 // We build scheduling units by walking a block's instruction list from bottom 425 // to top. 426 427 // Remember where a generic side-effecting instruction is as we procede. 428 SUnit *BarrierChain = 0, *AliasChain = 0; 429 430 // Memory references to specific known memory locations are tracked 431 // so that they can be given more precise dependencies. We track 432 // separately the known memory locations that may alias and those 433 // that are known not to alias 434 std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs; 435 std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; 436 437 // Remove any stale debug info; sometimes BuildSchedGraph is called again 438 // without emitting the info from the previous call. 439 DbgValues.clear(); 440 FirstDbgValue = NULL; 441 442 // Model data dependencies between instructions being scheduled and the 443 // ExitSU. 444 AddSchedBarrierDeps(); 445 446 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 447 assert(Defs[i].empty() && "Only BuildGraph should push/pop Defs"); 448 } 449 450 // Reinitialize the large VReg vectors, while reusing the memory. 451 // 452 // Note: this can be an expensive part of DAG building. We may want to be more 453 // clever. Reevaluate after VRegUses goes away. 454 assert(VRegDefs.size() == 0 && VRegUses.size() == 0 && 455 "Only BuildSchedGraph should access VRegDefs/Uses"); 456 VRegDefs.resize(MF.getRegInfo().getNumVirtRegs()); 457 VRegUses.resize(MF.getRegInfo().getNumVirtRegs()); 458 459 // Walk the list of instructions, from bottom moving up. 460 MachineInstr *PrevMI = NULL; 461 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 462 MII != MIE; --MII) { 463 MachineInstr *MI = prior(MII); 464 if (MI && PrevMI) { 465 DbgValues.push_back(std::make_pair(PrevMI, MI)); 466 PrevMI = NULL; 467 } 468 469 if (MI->isDebugValue()) { 470 PrevMI = MI; 471 continue; 472 } 473 474 assert(!MI->isTerminator() && !MI->isLabel() && 475 "Cannot schedule terminators or labels!"); 476 // Create the SUnit for this MI. 477 SUnit *SU = NewSUnit(MI); 478 SU->isCall = MI->isCall(); 479 SU->isCommutable = MI->isCommutable(); 480 481 // Assign the Latency field of SU using target-provided information. 482 if (UnitLatencies) 483 SU->Latency = 1; 484 else 485 ComputeLatency(SU); 486 487 // Add register-based dependencies (data, anti, and output). 488 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 489 const MachineOperand &MO = MI->getOperand(j); 490 if (!MO.isReg()) continue; 491 unsigned Reg = MO.getReg(); 492 if (Reg == 0) continue; 493 494 if (TRI->isPhysicalRegister(Reg)) 495 addPhysRegDeps(SU, j); 496 else { 497 assert(!IsPostRA && "Virtual register encountered!"); 498 if (MO.isDef()) 499 addVRegDefDeps(SU, j); 500 else 501 addVRegUseDeps(SU, j); 502 } 503 } 504 505 // Add chain dependencies. 506 // Chain dependencies used to enforce memory order should have 507 // latency of 0 (except for true dependency of Store followed by 508 // aliased Load... we estimate that with a single cycle of latency 509 // assuming the hardware will bypass) 510 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 511 // after stack slots are lowered to actual addresses. 512 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 513 // produce more precise dependence information. 514 #define STORE_LOAD_LATENCY 1 515 unsigned TrueMemOrderLatency = 0; 516 if (MI->isCall() || MI->hasUnmodeledSideEffects() || 517 (MI->hasVolatileMemoryRef() && 518 (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) { 519 // Be conservative with these and add dependencies on all memory 520 // references, even those that are known to not alias. 521 for (std::map<const Value *, SUnit *>::iterator I = 522 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { 523 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 524 } 525 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 526 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { 527 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 528 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 529 } 530 NonAliasMemDefs.clear(); 531 NonAliasMemUses.clear(); 532 // Add SU to the barrier chain. 533 if (BarrierChain) 534 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 535 BarrierChain = SU; 536 537 // fall-through 538 new_alias_chain: 539 // Chain all possibly aliasing memory references though SU. 540 if (AliasChain) 541 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 542 AliasChain = SU; 543 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 544 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 545 for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(), 546 E = AliasMemDefs.end(); I != E; ++I) { 547 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 548 } 549 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 550 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { 551 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 552 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 553 } 554 PendingLoads.clear(); 555 AliasMemDefs.clear(); 556 AliasMemUses.clear(); 557 } else if (MI->mayStore()) { 558 bool MayAlias = true; 559 TrueMemOrderLatency = STORE_LOAD_LATENCY; 560 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 561 // A store to a specific PseudoSourceValue. Add precise dependencies. 562 // Record the def in MemDefs, first adding a dep if there is 563 // an existing def. 564 std::map<const Value *, SUnit *>::iterator I = 565 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 566 std::map<const Value *, SUnit *>::iterator IE = 567 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 568 if (I != IE) { 569 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 570 /*isNormalMemory=*/true)); 571 I->second = SU; 572 } else { 573 if (MayAlias) 574 AliasMemDefs[V] = SU; 575 else 576 NonAliasMemDefs[V] = SU; 577 } 578 // Handle the uses in MemUses, if there are any. 579 std::map<const Value *, std::vector<SUnit *> >::iterator J = 580 ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); 581 std::map<const Value *, std::vector<SUnit *> >::iterator JE = 582 ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); 583 if (J != JE) { 584 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 585 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency, 586 /*Reg=*/0, /*isNormalMemory=*/true)); 587 J->second.clear(); 588 } 589 if (MayAlias) { 590 // Add dependencies from all the PendingLoads, i.e. loads 591 // with no underlying object. 592 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 593 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 594 // Add dependence on alias chain, if needed. 595 if (AliasChain) 596 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 597 } 598 // Add dependence on barrier chain, if needed. 599 if (BarrierChain) 600 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 601 } else { 602 // Treat all other stores conservatively. 603 goto new_alias_chain; 604 } 605 606 if (!ExitSU.isPred(SU)) 607 // Push store's up a bit to avoid them getting in between cmp 608 // and branches. 609 ExitSU.addPred(SDep(SU, SDep::Order, 0, 610 /*Reg=*/0, /*isNormalMemory=*/false, 611 /*isMustAlias=*/false, 612 /*isArtificial=*/true)); 613 } else if (MI->mayLoad()) { 614 bool MayAlias = true; 615 TrueMemOrderLatency = 0; 616 if (MI->isInvariantLoad(AA)) { 617 // Invariant load, no chain dependencies needed! 618 } else { 619 if (const Value *V = 620 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 621 // A load from a specific PseudoSourceValue. Add precise dependencies. 622 std::map<const Value *, SUnit *>::iterator I = 623 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 624 std::map<const Value *, SUnit *>::iterator IE = 625 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 626 if (I != IE) 627 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 628 /*isNormalMemory=*/true)); 629 if (MayAlias) 630 AliasMemUses[V].push_back(SU); 631 else 632 NonAliasMemUses[V].push_back(SU); 633 } else { 634 // A load with no underlying object. Depend on all 635 // potentially aliasing stores. 636 for (std::map<const Value *, SUnit *>::iterator I = 637 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) 638 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 639 640 PendingLoads.push_back(SU); 641 MayAlias = true; 642 } 643 644 // Add dependencies on alias and barrier chains, if needed. 645 if (MayAlias && AliasChain) 646 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 647 if (BarrierChain) 648 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 649 } 650 } 651 } 652 if (PrevMI) 653 FirstDbgValue = PrevMI; 654 655 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 656 Defs[i].clear(); 657 Uses[i].clear(); 658 } 659 VRegDefs.clear(); 660 VRegUses.clear(); 661 PendingLoads.clear(); 662 } 663 664 void ScheduleDAGInstrs::FinishBlock() { 665 // Nothing to do. 666 } 667 668 void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 669 // Compute the latency for the node. 670 if (!InstrItins || InstrItins->isEmpty()) { 671 SU->Latency = 1; 672 673 // Simplistic target-independent heuristic: assume that loads take 674 // extra time. 675 if (SU->getInstr()->mayLoad()) 676 SU->Latency += 2; 677 } else { 678 SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr()); 679 } 680 } 681 682 void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 683 SDep& dep) const { 684 if (!InstrItins || InstrItins->isEmpty()) 685 return; 686 687 // For a data dependency with a known register... 688 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) 689 return; 690 691 const unsigned Reg = dep.getReg(); 692 693 // ... find the definition of the register in the defining 694 // instruction 695 MachineInstr *DefMI = Def->getInstr(); 696 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); 697 if (DefIdx != -1) { 698 const MachineOperand &MO = DefMI->getOperand(DefIdx); 699 if (MO.isReg() && MO.isImplicit() && 700 DefIdx >= (int)DefMI->getDesc().getNumOperands()) { 701 // This is an implicit def, getOperandLatency() won't return the correct 702 // latency. e.g. 703 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def> 704 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ... 705 // What we want is to compute latency between def of %D6/%D7 and use of 706 // %Q3 instead. 707 DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI); 708 } 709 MachineInstr *UseMI = Use->getInstr(); 710 // For all uses of the register, calculate the maxmimum latency 711 int Latency = -1; 712 if (UseMI) { 713 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 714 const MachineOperand &MO = UseMI->getOperand(i); 715 if (!MO.isReg() || !MO.isUse()) 716 continue; 717 unsigned MOReg = MO.getReg(); 718 if (MOReg != Reg) 719 continue; 720 721 int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx, 722 UseMI, i); 723 Latency = std::max(Latency, UseCycle); 724 } 725 } else { 726 // UseMI is null, then it must be a scheduling barrier. 727 if (!InstrItins || InstrItins->isEmpty()) 728 return; 729 unsigned DefClass = DefMI->getDesc().getSchedClass(); 730 Latency = InstrItins->getOperandCycle(DefClass, DefIdx); 731 } 732 733 // If we found a latency, then replace the existing dependence latency. 734 if (Latency >= 0) 735 dep.setLatency(Latency); 736 } 737 } 738 739 void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 740 SU->getInstr()->dump(); 741 } 742 743 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 744 std::string s; 745 raw_string_ostream oss(s); 746 if (SU == &EntrySU) 747 oss << "<entry>"; 748 else if (SU == &ExitSU) 749 oss << "<exit>"; 750 else 751 SU->getInstr()->print(oss); 752 return oss.str(); 753 } 754 755 // EmitSchedule - Emit the machine code in scheduled order. 756 MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() { 757 Begin = InsertPos; 758 759 // If first instruction was a DBG_VALUE then put it back. 760 if (FirstDbgValue) 761 BB->splice(InsertPos, BB, FirstDbgValue); 762 763 // Then re-insert them according to the given schedule. 764 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 765 if (SUnit *SU = Sequence[i]) 766 BB->splice(InsertPos, BB, SU->getInstr()); 767 else 768 // Null SUnit* is a noop. 769 EmitNoop(); 770 771 // Update the Begin iterator, as the first instruction in the block 772 // may have been scheduled later. 773 if (i == 0) 774 Begin = prior(InsertPos); 775 } 776 777 // Reinsert any remaining debug_values. 778 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 779 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 780 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 781 MachineInstr *DbgValue = P.first; 782 MachineBasicBlock::iterator OrigPrivMI = P.second; 783 BB->splice(++OrigPrivMI, BB, DbgValue); 784 } 785 DbgValues.clear(); 786 FirstDbgValue = NULL; 787 return BB; 788 } 789