1*0b57cec5SDimitry Andric //===- llvm/Target/TargetSchedule.cpp - Sched Machine Model ---------------===//
2*0b57cec5SDimitry Andric //
3*0b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*0b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*0b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*0b57cec5SDimitry Andric //
7*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
8*0b57cec5SDimitry Andric //
9*0b57cec5SDimitry Andric // This file implements a wrapper around MCSchedModel that allows the interface
10*0b57cec5SDimitry Andric // to benefit from information currently only available in TargetInstrInfo.
11*0b57cec5SDimitry Andric //
12*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
13*0b57cec5SDimitry Andric 
14*0b57cec5SDimitry Andric #include "llvm/CodeGen/TargetSchedule.h"
15*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunction.h"
16*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstr.h"
17*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineOperand.h"
18*0b57cec5SDimitry Andric #include "llvm/CodeGen/TargetInstrInfo.h"
19*0b57cec5SDimitry Andric #include "llvm/CodeGen/TargetRegisterInfo.h"
20*0b57cec5SDimitry Andric #include "llvm/CodeGen/TargetSubtargetInfo.h"
21*0b57cec5SDimitry Andric #include "llvm/MC/MCInstrDesc.h"
22*0b57cec5SDimitry Andric #include "llvm/MC/MCInstrItineraries.h"
23*0b57cec5SDimitry Andric #include "llvm/MC/MCSchedule.h"
24*0b57cec5SDimitry Andric #include "llvm/Support/CommandLine.h"
25*0b57cec5SDimitry Andric #include "llvm/Support/ErrorHandling.h"
26*0b57cec5SDimitry Andric #include "llvm/Support/raw_ostream.h"
27*0b57cec5SDimitry Andric #include <algorithm>
28*0b57cec5SDimitry Andric #include <cassert>
29*0b57cec5SDimitry Andric #include <cstdint>
30*0b57cec5SDimitry Andric 
31*0b57cec5SDimitry Andric using namespace llvm;
32*0b57cec5SDimitry Andric 
33*0b57cec5SDimitry Andric static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
34*0b57cec5SDimitry Andric   cl::desc("Use TargetSchedModel for latency lookup"));
35*0b57cec5SDimitry Andric 
36*0b57cec5SDimitry Andric static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
37*0b57cec5SDimitry Andric   cl::desc("Use InstrItineraryData for latency lookup"));
38*0b57cec5SDimitry Andric 
hasInstrSchedModel() const39*0b57cec5SDimitry Andric bool TargetSchedModel::hasInstrSchedModel() const {
40*0b57cec5SDimitry Andric   return EnableSchedModel && SchedModel.hasInstrSchedModel();
41*0b57cec5SDimitry Andric }
42*0b57cec5SDimitry Andric 
hasInstrItineraries() const43*0b57cec5SDimitry Andric bool TargetSchedModel::hasInstrItineraries() const {
44*0b57cec5SDimitry Andric   return EnableSchedItins && !InstrItins.isEmpty();
45*0b57cec5SDimitry Andric }
46*0b57cec5SDimitry Andric 
gcd(unsigned Dividend,unsigned Divisor)47*0b57cec5SDimitry Andric static unsigned gcd(unsigned Dividend, unsigned Divisor) {
48*0b57cec5SDimitry Andric   // Dividend and Divisor will be naturally swapped as needed.
49*0b57cec5SDimitry Andric   while (Divisor) {
50*0b57cec5SDimitry Andric     unsigned Rem = Dividend % Divisor;
51*0b57cec5SDimitry Andric     Dividend = Divisor;
52*0b57cec5SDimitry Andric     Divisor = Rem;
53*0b57cec5SDimitry Andric   };
54*0b57cec5SDimitry Andric   return Dividend;
55*0b57cec5SDimitry Andric }
56*0b57cec5SDimitry Andric 
lcm(unsigned A,unsigned B)57*0b57cec5SDimitry Andric static unsigned lcm(unsigned A, unsigned B) {
58*0b57cec5SDimitry Andric   unsigned LCM = (uint64_t(A) * B) / gcd(A, B);
59*0b57cec5SDimitry Andric   assert((LCM >= A && LCM >= B) && "LCM overflow");
60*0b57cec5SDimitry Andric   return LCM;
61*0b57cec5SDimitry Andric }
62*0b57cec5SDimitry Andric 
init(const TargetSubtargetInfo * TSInfo)63*0b57cec5SDimitry Andric void TargetSchedModel::init(const TargetSubtargetInfo *TSInfo) {
64*0b57cec5SDimitry Andric   STI = TSInfo;
65*0b57cec5SDimitry Andric   SchedModel = TSInfo->getSchedModel();
66*0b57cec5SDimitry Andric   TII = TSInfo->getInstrInfo();
67*0b57cec5SDimitry Andric   STI->initInstrItins(InstrItins);
68*0b57cec5SDimitry Andric 
69*0b57cec5SDimitry Andric   unsigned NumRes = SchedModel.getNumProcResourceKinds();
70*0b57cec5SDimitry Andric   ResourceFactors.resize(NumRes);
71*0b57cec5SDimitry Andric   ResourceLCM = SchedModel.IssueWidth;
72*0b57cec5SDimitry Andric   for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
73*0b57cec5SDimitry Andric     unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
74*0b57cec5SDimitry Andric     if (NumUnits > 0)
75*0b57cec5SDimitry Andric       ResourceLCM = lcm(ResourceLCM, NumUnits);
76*0b57cec5SDimitry Andric   }
77*0b57cec5SDimitry Andric   MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
78*0b57cec5SDimitry Andric   for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
79*0b57cec5SDimitry Andric     unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
80*0b57cec5SDimitry Andric     ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
81*0b57cec5SDimitry Andric   }
82*0b57cec5SDimitry Andric }
83*0b57cec5SDimitry Andric 
84*0b57cec5SDimitry Andric /// Returns true only if instruction is specified as single issue.
mustBeginGroup(const MachineInstr * MI,const MCSchedClassDesc * SC) const85*0b57cec5SDimitry Andric bool TargetSchedModel::mustBeginGroup(const MachineInstr *MI,
86*0b57cec5SDimitry Andric                                      const MCSchedClassDesc *SC) const {
87*0b57cec5SDimitry Andric   if (hasInstrSchedModel()) {
88*0b57cec5SDimitry Andric     if (!SC)
89*0b57cec5SDimitry Andric       SC = resolveSchedClass(MI);
90*0b57cec5SDimitry Andric     if (SC->isValid())
91*0b57cec5SDimitry Andric       return SC->BeginGroup;
92*0b57cec5SDimitry Andric   }
93*0b57cec5SDimitry Andric   return false;
94*0b57cec5SDimitry Andric }
95*0b57cec5SDimitry Andric 
mustEndGroup(const MachineInstr * MI,const MCSchedClassDesc * SC) const96*0b57cec5SDimitry Andric bool TargetSchedModel::mustEndGroup(const MachineInstr *MI,
97*0b57cec5SDimitry Andric                                      const MCSchedClassDesc *SC) const {
98*0b57cec5SDimitry Andric   if (hasInstrSchedModel()) {
99*0b57cec5SDimitry Andric     if (!SC)
100*0b57cec5SDimitry Andric       SC = resolveSchedClass(MI);
101*0b57cec5SDimitry Andric     if (SC->isValid())
102*0b57cec5SDimitry Andric       return SC->EndGroup;
103*0b57cec5SDimitry Andric   }
104*0b57cec5SDimitry Andric   return false;
105*0b57cec5SDimitry Andric }
106*0b57cec5SDimitry Andric 
getNumMicroOps(const MachineInstr * MI,const MCSchedClassDesc * SC) const107*0b57cec5SDimitry Andric unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI,
108*0b57cec5SDimitry Andric                                           const MCSchedClassDesc *SC) const {
109*0b57cec5SDimitry Andric   if (hasInstrItineraries()) {
110*0b57cec5SDimitry Andric     int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
111*0b57cec5SDimitry Andric     return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *MI);
112*0b57cec5SDimitry Andric   }
113*0b57cec5SDimitry Andric   if (hasInstrSchedModel()) {
114*0b57cec5SDimitry Andric     if (!SC)
115*0b57cec5SDimitry Andric       SC = resolveSchedClass(MI);
116*0b57cec5SDimitry Andric     if (SC->isValid())
117*0b57cec5SDimitry Andric       return SC->NumMicroOps;
118*0b57cec5SDimitry Andric   }
119*0b57cec5SDimitry Andric   return MI->isTransient() ? 0 : 1;
120*0b57cec5SDimitry Andric }
121*0b57cec5SDimitry Andric 
122*0b57cec5SDimitry Andric // The machine model may explicitly specify an invalid latency, which
123*0b57cec5SDimitry Andric // effectively means infinite latency. Since users of the TargetSchedule API
124*0b57cec5SDimitry Andric // don't know how to handle this, we convert it to a very large latency that is
125*0b57cec5SDimitry Andric // easy to distinguish when debugging the DAG but won't induce overflow.
capLatency(int Cycles)126*0b57cec5SDimitry Andric static unsigned capLatency(int Cycles) {
127*0b57cec5SDimitry Andric   return Cycles >= 0 ? Cycles : 1000;
128*0b57cec5SDimitry Andric }
129*0b57cec5SDimitry Andric 
130*0b57cec5SDimitry Andric /// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
131*0b57cec5SDimitry Andric /// evaluation of predicates that depend on instruction operands or flags.
132*0b57cec5SDimitry Andric const MCSchedClassDesc *TargetSchedModel::
resolveSchedClass(const MachineInstr * MI) const133*0b57cec5SDimitry Andric resolveSchedClass(const MachineInstr *MI) const {
134*0b57cec5SDimitry Andric   // Get the definition's scheduling class descriptor from this machine model.
135*0b57cec5SDimitry Andric   unsigned SchedClass = MI->getDesc().getSchedClass();
136*0b57cec5SDimitry Andric   const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
137*0b57cec5SDimitry Andric   if (!SCDesc->isValid())
138*0b57cec5SDimitry Andric     return SCDesc;
139*0b57cec5SDimitry Andric 
140*0b57cec5SDimitry Andric #ifndef NDEBUG
141*0b57cec5SDimitry Andric   unsigned NIter = 0;
142*0b57cec5SDimitry Andric #endif
143*0b57cec5SDimitry Andric   while (SCDesc->isVariant()) {
144*0b57cec5SDimitry Andric     assert(++NIter < 6 && "Variants are nested deeper than the magic number");
145*0b57cec5SDimitry Andric 
146*0b57cec5SDimitry Andric     SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
147*0b57cec5SDimitry Andric     SCDesc = SchedModel.getSchedClassDesc(SchedClass);
148*0b57cec5SDimitry Andric   }
149*0b57cec5SDimitry Andric   return SCDesc;
150*0b57cec5SDimitry Andric }
151*0b57cec5SDimitry Andric 
152*0b57cec5SDimitry Andric /// Find the def index of this operand. This index maps to the machine model and
153*0b57cec5SDimitry Andric /// is independent of use operands. Def operands may be reordered with uses or
154*0b57cec5SDimitry Andric /// merged with uses without affecting the def index (e.g. before/after
155*0b57cec5SDimitry Andric /// regalloc). However, an instruction's def operands must never be reordered
156*0b57cec5SDimitry Andric /// with respect to each other.
findDefIdx(const MachineInstr * MI,unsigned DefOperIdx)157*0b57cec5SDimitry Andric static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
158*0b57cec5SDimitry Andric   unsigned DefIdx = 0;
159*0b57cec5SDimitry Andric   for (unsigned i = 0; i != DefOperIdx; ++i) {
160*0b57cec5SDimitry Andric     const MachineOperand &MO = MI->getOperand(i);
161*0b57cec5SDimitry Andric     if (MO.isReg() && MO.isDef())
162*0b57cec5SDimitry Andric       ++DefIdx;
163*0b57cec5SDimitry Andric   }
164*0b57cec5SDimitry Andric   return DefIdx;
165*0b57cec5SDimitry Andric }
166*0b57cec5SDimitry Andric 
167*0b57cec5SDimitry Andric /// Find the use index of this operand. This is independent of the instruction's
168*0b57cec5SDimitry Andric /// def operands.
169*0b57cec5SDimitry Andric ///
170*0b57cec5SDimitry Andric /// Note that uses are not determined by the operand's isUse property, which
171*0b57cec5SDimitry Andric /// is simply the inverse of isDef. Here we consider any readsReg operand to be
172*0b57cec5SDimitry Andric /// a "use". The machine model allows an operand to be both a Def and Use.
findUseIdx(const MachineInstr * MI,unsigned UseOperIdx)173*0b57cec5SDimitry Andric static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
174*0b57cec5SDimitry Andric   unsigned UseIdx = 0;
175*0b57cec5SDimitry Andric   for (unsigned i = 0; i != UseOperIdx; ++i) {
176*0b57cec5SDimitry Andric     const MachineOperand &MO = MI->getOperand(i);
177*0b57cec5SDimitry Andric     if (MO.isReg() && MO.readsReg() && !MO.isDef())
178*0b57cec5SDimitry Andric       ++UseIdx;
179*0b57cec5SDimitry Andric   }
180*0b57cec5SDimitry Andric   return UseIdx;
181*0b57cec5SDimitry Andric }
182*0b57cec5SDimitry Andric 
183*0b57cec5SDimitry Andric // Top-level API for clients that know the operand indices.
computeOperandLatency(const MachineInstr * DefMI,unsigned DefOperIdx,const MachineInstr * UseMI,unsigned UseOperIdx) const184*0b57cec5SDimitry Andric unsigned TargetSchedModel::computeOperandLatency(
185*0b57cec5SDimitry Andric   const MachineInstr *DefMI, unsigned DefOperIdx,
186*0b57cec5SDimitry Andric   const MachineInstr *UseMI, unsigned UseOperIdx) const {
187*0b57cec5SDimitry Andric 
188*0b57cec5SDimitry Andric   if (!hasInstrSchedModel() && !hasInstrItineraries())
189*0b57cec5SDimitry Andric     return TII->defaultDefLatency(SchedModel, *DefMI);
190*0b57cec5SDimitry Andric 
191*0b57cec5SDimitry Andric   if (hasInstrItineraries()) {
192*0b57cec5SDimitry Andric     int OperLatency = 0;
193*0b57cec5SDimitry Andric     if (UseMI) {
194*0b57cec5SDimitry Andric       OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx,
195*0b57cec5SDimitry Andric                                            *UseMI, UseOperIdx);
196*0b57cec5SDimitry Andric     }
197*0b57cec5SDimitry Andric     else {
198*0b57cec5SDimitry Andric       unsigned DefClass = DefMI->getDesc().getSchedClass();
199*0b57cec5SDimitry Andric       OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
200*0b57cec5SDimitry Andric     }
201*0b57cec5SDimitry Andric     if (OperLatency >= 0)
202*0b57cec5SDimitry Andric       return OperLatency;
203*0b57cec5SDimitry Andric 
204*0b57cec5SDimitry Andric     // No operand latency was found.
205*0b57cec5SDimitry Andric     unsigned InstrLatency = TII->getInstrLatency(&InstrItins, *DefMI);
206*0b57cec5SDimitry Andric 
207*0b57cec5SDimitry Andric     // Expected latency is the max of the stage latency and itinerary props.
208*0b57cec5SDimitry Andric     // Rather than directly querying InstrItins stage latency, we call a TII
209*0b57cec5SDimitry Andric     // hook to allow subtargets to specialize latency. This hook is only
210*0b57cec5SDimitry Andric     // applicable to the InstrItins model. InstrSchedModel should model all
211*0b57cec5SDimitry Andric     // special cases without TII hooks.
212*0b57cec5SDimitry Andric     InstrLatency =
213*0b57cec5SDimitry Andric         std::max(InstrLatency, TII->defaultDefLatency(SchedModel, *DefMI));
214*0b57cec5SDimitry Andric     return InstrLatency;
215*0b57cec5SDimitry Andric   }
216*0b57cec5SDimitry Andric   // hasInstrSchedModel()
217*0b57cec5SDimitry Andric   const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
218*0b57cec5SDimitry Andric   unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
219*0b57cec5SDimitry Andric   if (DefIdx < SCDesc->NumWriteLatencyEntries) {
220*0b57cec5SDimitry Andric     // Lookup the definition's write latency in SubtargetInfo.
221*0b57cec5SDimitry Andric     const MCWriteLatencyEntry *WLEntry =
222*0b57cec5SDimitry Andric       STI->getWriteLatencyEntry(SCDesc, DefIdx);
223*0b57cec5SDimitry Andric     unsigned WriteID = WLEntry->WriteResourceID;
224*0b57cec5SDimitry Andric     unsigned Latency = capLatency(WLEntry->Cycles);
225*0b57cec5SDimitry Andric     if (!UseMI)
226*0b57cec5SDimitry Andric       return Latency;
227*0b57cec5SDimitry Andric 
228*0b57cec5SDimitry Andric     // Lookup the use's latency adjustment in SubtargetInfo.
229*0b57cec5SDimitry Andric     const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
230*0b57cec5SDimitry Andric     if (UseDesc->NumReadAdvanceEntries == 0)
231*0b57cec5SDimitry Andric       return Latency;
232*0b57cec5SDimitry Andric     unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
233*0b57cec5SDimitry Andric     int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
234*0b57cec5SDimitry Andric     if (Advance > 0 && (unsigned)Advance > Latency) // unsigned wrap
235*0b57cec5SDimitry Andric       return 0;
236*0b57cec5SDimitry Andric     return Latency - Advance;
237*0b57cec5SDimitry Andric   }
238*0b57cec5SDimitry Andric   // If DefIdx does not exist in the model (e.g. implicit defs), then return
239*0b57cec5SDimitry Andric   // unit latency (defaultDefLatency may be too conservative).
240*0b57cec5SDimitry Andric #ifndef NDEBUG
241*0b57cec5SDimitry Andric   if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
242*0b57cec5SDimitry Andric       && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()
243*0b57cec5SDimitry Andric       && SchedModel.isComplete()) {
244*0b57cec5SDimitry Andric     errs() << "DefIdx " << DefIdx << " exceeds machine model writes for "
245*0b57cec5SDimitry Andric            << *DefMI << " (Try with MCSchedModel.CompleteModel set to false)";
246*0b57cec5SDimitry Andric     llvm_unreachable("incomplete machine model");
247*0b57cec5SDimitry Andric   }
248*0b57cec5SDimitry Andric #endif
249*0b57cec5SDimitry Andric   // FIXME: Automatically giving all implicit defs defaultDefLatency is
250*0b57cec5SDimitry Andric   // undesirable. We should only do it for defs that are known to the MC
251*0b57cec5SDimitry Andric   // desc like flags. Truly implicit defs should get 1 cycle latency.
252*0b57cec5SDimitry Andric   return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, *DefMI);
253*0b57cec5SDimitry Andric }
254*0b57cec5SDimitry Andric 
255*0b57cec5SDimitry Andric unsigned
computeInstrLatency(const MCSchedClassDesc & SCDesc) const256*0b57cec5SDimitry Andric TargetSchedModel::computeInstrLatency(const MCSchedClassDesc &SCDesc) const {
257*0b57cec5SDimitry Andric   return capLatency(MCSchedModel::computeInstrLatency(*STI, SCDesc));
258*0b57cec5SDimitry Andric }
259*0b57cec5SDimitry Andric 
computeInstrLatency(unsigned Opcode) const260*0b57cec5SDimitry Andric unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const {
261*0b57cec5SDimitry Andric   assert(hasInstrSchedModel() && "Only call this function with a SchedModel");
262*0b57cec5SDimitry Andric   unsigned SCIdx = TII->get(Opcode).getSchedClass();
263*0b57cec5SDimitry Andric   return capLatency(SchedModel.computeInstrLatency(*STI, SCIdx));
264*0b57cec5SDimitry Andric }
265*0b57cec5SDimitry Andric 
computeInstrLatency(const MCInst & Inst) const266*0b57cec5SDimitry Andric unsigned TargetSchedModel::computeInstrLatency(const MCInst &Inst) const {
267*0b57cec5SDimitry Andric   if (hasInstrSchedModel())
268*0b57cec5SDimitry Andric     return capLatency(SchedModel.computeInstrLatency(*STI, *TII, Inst));
269*0b57cec5SDimitry Andric   return computeInstrLatency(Inst.getOpcode());
270*0b57cec5SDimitry Andric }
271*0b57cec5SDimitry Andric 
272*0b57cec5SDimitry Andric unsigned
computeInstrLatency(const MachineInstr * MI,bool UseDefaultDefLatency) const273*0b57cec5SDimitry Andric TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
274*0b57cec5SDimitry Andric                                       bool UseDefaultDefLatency) const {
275*0b57cec5SDimitry Andric   // For the itinerary model, fall back to the old subtarget hook.
276*0b57cec5SDimitry Andric   // Allow subtargets to compute Bundle latencies outside the machine model.
277*0b57cec5SDimitry Andric   if (hasInstrItineraries() || MI->isBundle() ||
278*0b57cec5SDimitry Andric       (!hasInstrSchedModel() && !UseDefaultDefLatency))
279*0b57cec5SDimitry Andric     return TII->getInstrLatency(&InstrItins, *MI);
280*0b57cec5SDimitry Andric 
281*0b57cec5SDimitry Andric   if (hasInstrSchedModel()) {
282*0b57cec5SDimitry Andric     const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
283*0b57cec5SDimitry Andric     if (SCDesc->isValid())
284*0b57cec5SDimitry Andric       return computeInstrLatency(*SCDesc);
285*0b57cec5SDimitry Andric   }
286*0b57cec5SDimitry Andric   return TII->defaultDefLatency(SchedModel, *MI);
287*0b57cec5SDimitry Andric }
288*0b57cec5SDimitry Andric 
289*0b57cec5SDimitry Andric unsigned TargetSchedModel::
computeOutputLatency(const MachineInstr * DefMI,unsigned DefOperIdx,const MachineInstr * DepMI) const290*0b57cec5SDimitry Andric computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
291*0b57cec5SDimitry Andric                      const MachineInstr *DepMI) const {
292*0b57cec5SDimitry Andric   if (!SchedModel.isOutOfOrder())
293*0b57cec5SDimitry Andric     return 1;
294*0b57cec5SDimitry Andric 
295*0b57cec5SDimitry Andric   // Out-of-order processor can dispatch WAW dependencies in the same cycle.
296*0b57cec5SDimitry Andric 
297*0b57cec5SDimitry Andric   // Treat predication as a data dependency for out-of-order cpus. In-order
298*0b57cec5SDimitry Andric   // cpus do not need to treat predicated writes specially.
299*0b57cec5SDimitry Andric   //
300*0b57cec5SDimitry Andric   // TODO: The following hack exists because predication passes do not
301*0b57cec5SDimitry Andric   // correctly append imp-use operands, and readsReg() strangely returns false
302*0b57cec5SDimitry Andric   // for predicated defs.
303*0b57cec5SDimitry Andric   Register Reg = DefMI->getOperand(DefOperIdx).getReg();
304*0b57cec5SDimitry Andric   const MachineFunction &MF = *DefMI->getMF();
305*0b57cec5SDimitry Andric   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
306*0b57cec5SDimitry Andric   if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(*DepMI))
307*0b57cec5SDimitry Andric     return computeInstrLatency(DefMI);
308*0b57cec5SDimitry Andric 
309*0b57cec5SDimitry Andric   // If we have a per operand scheduling model, check if this def is writing
310*0b57cec5SDimitry Andric   // an unbuffered resource. If so, it treated like an in-order cpu.
311*0b57cec5SDimitry Andric   if (hasInstrSchedModel()) {
312*0b57cec5SDimitry Andric     const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
313*0b57cec5SDimitry Andric     if (SCDesc->isValid()) {
314*0b57cec5SDimitry Andric       for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
315*0b57cec5SDimitry Andric              *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
316*0b57cec5SDimitry Andric         if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)
317*0b57cec5SDimitry Andric           return 1;
318*0b57cec5SDimitry Andric       }
319*0b57cec5SDimitry Andric     }
320*0b57cec5SDimitry Andric   }
321*0b57cec5SDimitry Andric   return 0;
322*0b57cec5SDimitry Andric }
323*0b57cec5SDimitry Andric 
324*0b57cec5SDimitry Andric double
computeReciprocalThroughput(const MachineInstr * MI) const325*0b57cec5SDimitry Andric TargetSchedModel::computeReciprocalThroughput(const MachineInstr *MI) const {
326*0b57cec5SDimitry Andric   if (hasInstrItineraries()) {
327*0b57cec5SDimitry Andric     unsigned SchedClass = MI->getDesc().getSchedClass();
328*0b57cec5SDimitry Andric     return MCSchedModel::getReciprocalThroughput(SchedClass,
329*0b57cec5SDimitry Andric                                                  *getInstrItineraries());
330*0b57cec5SDimitry Andric   }
331*0b57cec5SDimitry Andric 
332*0b57cec5SDimitry Andric   if (hasInstrSchedModel())
333*0b57cec5SDimitry Andric     return MCSchedModel::getReciprocalThroughput(*STI, *resolveSchedClass(MI));
334*0b57cec5SDimitry Andric 
335*0b57cec5SDimitry Andric   return 0.0;
336*0b57cec5SDimitry Andric }
337*0b57cec5SDimitry Andric 
338*0b57cec5SDimitry Andric double
computeReciprocalThroughput(unsigned Opcode) const339*0b57cec5SDimitry Andric TargetSchedModel::computeReciprocalThroughput(unsigned Opcode) const {
340*0b57cec5SDimitry Andric   unsigned SchedClass = TII->get(Opcode).getSchedClass();
341*0b57cec5SDimitry Andric   if (hasInstrItineraries())
342*0b57cec5SDimitry Andric     return MCSchedModel::getReciprocalThroughput(SchedClass,
343*0b57cec5SDimitry Andric                                                  *getInstrItineraries());
344*0b57cec5SDimitry Andric   if (hasInstrSchedModel()) {
345*0b57cec5SDimitry Andric     const MCSchedClassDesc &SCDesc = *SchedModel.getSchedClassDesc(SchedClass);
346*0b57cec5SDimitry Andric     if (SCDesc.isValid() && !SCDesc.isVariant())
347*0b57cec5SDimitry Andric       return MCSchedModel::getReciprocalThroughput(*STI, SCDesc);
348*0b57cec5SDimitry Andric   }
349*0b57cec5SDimitry Andric 
350*0b57cec5SDimitry Andric   return 0.0;
351*0b57cec5SDimitry Andric }
352*0b57cec5SDimitry Andric 
353*0b57cec5SDimitry Andric double
computeReciprocalThroughput(const MCInst & MI) const354*0b57cec5SDimitry Andric TargetSchedModel::computeReciprocalThroughput(const MCInst &MI) const {
355*0b57cec5SDimitry Andric   if (hasInstrSchedModel())
356*0b57cec5SDimitry Andric     return SchedModel.getReciprocalThroughput(*STI, *TII, MI);
357*0b57cec5SDimitry Andric   return computeReciprocalThroughput(MI.getOpcode());
358*0b57cec5SDimitry Andric }
359*0b57cec5SDimitry Andric 
360