1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
11 // of MachineInstrs.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
16 #include "llvm/ADT/IntEqClasses.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineMemOperand.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/PseudoSourceValue.h"
28 #include "llvm/CodeGen/RegisterPressure.h"
29 #include "llvm/CodeGen/ScheduleDFS.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/IR/Operator.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/Format.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetRegisterInfo.h"
40 #include "llvm/Target/TargetSubtargetInfo.h"
41 #include <queue>
42 
43 using namespace llvm;
44 
45 #define DEBUG_TYPE "misched"
46 
47 static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden,
48     cl::ZeroOrMore, cl::init(false),
49     cl::desc("Enable use of AA during MI DAG construction"));
50 
51 static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden,
52     cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"));
53 
54 // Note: the two options below might be used in tuning compile time vs
55 // output quality. Setting HugeRegion so large that it will never be
56 // reached means best-effort, but may be slow.
57 
58 // When Stores and Loads maps (or NonAliasStores and NonAliasLoads)
59 // together hold this many SUs, a reduction of maps will be done.
60 static cl::opt<unsigned> HugeRegion("dag-maps-huge-region", cl::Hidden,
61     cl::init(1000), cl::desc("The limit to use while constructing the DAG "
62                              "prior to scheduling, at which point a trade-off "
63                              "is made to avoid excessive compile time."));
64 
65 static cl::opt<unsigned> ReductionSize(
66     "dag-maps-reduction-size", cl::Hidden,
67     cl::desc("A huge scheduling region will have maps reduced by this many "
68              "nodes at a time. Defaults to HugeRegion / 2."));
69 
70 static unsigned getReductionSize() {
71   // Always reduce a huge region with half of the elements, except
72   // when user sets this number explicitly.
73   if (ReductionSize.getNumOccurrences() == 0)
74     return HugeRegion / 2;
75   return ReductionSize;
76 }
77 
78 static void dumpSUList(ScheduleDAGInstrs::SUList &L) {
79 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
80   dbgs() << "{ ";
81   for (auto *su : L) {
82     dbgs() << "SU(" << su->NodeNum << ")";
83     if (su != L.back())
84       dbgs() << ", ";
85   }
86   dbgs() << "}\n";
87 #endif
88 }
89 
90 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
91                                      const MachineLoopInfo *mli,
92                                      bool RemoveKillFlags)
93     : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()),
94       RemoveKillFlags(RemoveKillFlags), CanHandleTerminators(false),
95       TrackLaneMasks(false), AAForDep(nullptr), BarrierChain(nullptr),
96       UnknownValue(UndefValue::get(
97                      Type::getVoidTy(mf.getFunction()->getContext()))),
98       FirstDbgValue(nullptr) {
99   DbgValues.clear();
100 
101   const TargetSubtargetInfo &ST = mf.getSubtarget();
102   SchedModel.init(ST.getSchedModel(), &ST, TII);
103 }
104 
105 /// getUnderlyingObjectFromInt - This is the function that does the work of
106 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
107 static const Value *getUnderlyingObjectFromInt(const Value *V) {
108   do {
109     if (const Operator *U = dyn_cast<Operator>(V)) {
110       // If we find a ptrtoint, we can transfer control back to the
111       // regular getUnderlyingObjectFromInt.
112       if (U->getOpcode() == Instruction::PtrToInt)
113         return U->getOperand(0);
114       // If we find an add of a constant, a multiplied value, or a phi, it's
115       // likely that the other operand will lead us to the base
116       // object. We don't have to worry about the case where the
117       // object address is somehow being computed by the multiply,
118       // because our callers only care when the result is an
119       // identifiable object.
120       if (U->getOpcode() != Instruction::Add ||
121           (!isa<ConstantInt>(U->getOperand(1)) &&
122            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
123            !isa<PHINode>(U->getOperand(1))))
124         return V;
125       V = U->getOperand(0);
126     } else {
127       return V;
128     }
129     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
130   } while (1);
131 }
132 
133 /// getUnderlyingObjects - This is a wrapper around GetUnderlyingObjects
134 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
135 static void getUnderlyingObjects(const Value *V,
136                                  SmallVectorImpl<Value *> &Objects,
137                                  const DataLayout &DL) {
138   SmallPtrSet<const Value *, 16> Visited;
139   SmallVector<const Value *, 4> Working(1, V);
140   do {
141     V = Working.pop_back_val();
142 
143     SmallVector<Value *, 4> Objs;
144     GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
145 
146     for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
147          I != IE; ++I) {
148       V = *I;
149       if (!Visited.insert(V).second)
150         continue;
151       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
152         const Value *O =
153           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
154         if (O->getType()->isPointerTy()) {
155           Working.push_back(O);
156           continue;
157         }
158       }
159       Objects.push_back(const_cast<Value *>(V));
160     }
161   } while (!Working.empty());
162 }
163 
164 /// getUnderlyingObjectsForInstr - If this machine instr has memory reference
165 /// information and it can be tracked to a normal reference to a known
166 /// object, return the Value for that object.
167 static void getUnderlyingObjectsForInstr(const MachineInstr *MI,
168                                          const MachineFrameInfo *MFI,
169                                          UnderlyingObjectsVector &Objects,
170                                          const DataLayout &DL) {
171   auto allMMOsOkay = [&]() {
172     for (const MachineMemOperand *MMO : MI->memoperands()) {
173       if (MMO->isVolatile())
174         return false;
175 
176       if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
177         // Function that contain tail calls don't have unique PseudoSourceValue
178         // objects. Two PseudoSourceValues might refer to the same or
179         // overlapping locations. The client code calling this function assumes
180         // this is not the case. So return a conservative answer of no known
181         // object.
182         if (MFI->hasTailCall())
183           return false;
184 
185         // For now, ignore PseudoSourceValues which may alias LLVM IR values
186         // because the code that uses this function has no way to cope with
187         // such aliases.
188         if (PSV->isAliased(MFI))
189           return false;
190 
191         bool MayAlias = PSV->mayAlias(MFI);
192         Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias));
193       } else if (const Value *V = MMO->getValue()) {
194         SmallVector<Value *, 4> Objs;
195         getUnderlyingObjects(V, Objs, DL);
196 
197         for (Value *V : Objs) {
198           if (!isIdentifiedObject(V))
199             return false;
200 
201           Objects.push_back(UnderlyingObjectsVector::value_type(V, true));
202         }
203       } else
204         return false;
205     }
206     return true;
207   };
208 
209   if (!allMMOsOkay())
210     Objects.clear();
211 }
212 
213 void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) {
214   BB = bb;
215 }
216 
217 void ScheduleDAGInstrs::finishBlock() {
218   // Subclasses should no longer refer to the old block.
219   BB = nullptr;
220 }
221 
222 /// Initialize the DAG and common scheduler state for the current scheduling
223 /// region. This does not actually create the DAG, only clears it. The
224 /// scheduling driver may call BuildSchedGraph multiple times per scheduling
225 /// region.
226 void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb,
227                                     MachineBasicBlock::iterator begin,
228                                     MachineBasicBlock::iterator end,
229                                     unsigned regioninstrs) {
230   assert(bb == BB && "startBlock should set BB");
231   RegionBegin = begin;
232   RegionEnd = end;
233   NumRegionInstrs = regioninstrs;
234 }
235 
236 /// Close the current scheduling region. Don't clear any state in case the
237 /// driver wants to refer to the previous scheduling region.
238 void ScheduleDAGInstrs::exitRegion() {
239   // Nothing to do.
240 }
241 
242 /// addSchedBarrierDeps - Add dependencies from instructions in the current
243 /// list of instructions being scheduled to scheduling barrier by adding
244 /// the exit SU to the register defs and use list. This is because we want to
245 /// make sure instructions which define registers that are either used by
246 /// the terminator or are live-out are properly scheduled. This is
247 /// especially important when the definition latency of the return value(s)
248 /// are too high to be hidden by the branch or when the liveout registers
249 /// used by instructions in the fallthrough block.
250 void ScheduleDAGInstrs::addSchedBarrierDeps() {
251   MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : nullptr;
252   ExitSU.setInstr(ExitMI);
253   bool AllDepKnown = ExitMI &&
254     (ExitMI->isCall() || ExitMI->isBarrier());
255   if (ExitMI && AllDepKnown) {
256     // If it's a call or a barrier, add dependencies on the defs and uses of
257     // instruction.
258     for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
259       const MachineOperand &MO = ExitMI->getOperand(i);
260       if (!MO.isReg() || MO.isDef()) continue;
261       unsigned Reg = MO.getReg();
262       if (Reg == 0) continue;
263 
264       if (TRI->isPhysicalRegister(Reg))
265         Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
266       else if (MO.readsReg()) // ignore undef operands
267         addVRegUseDeps(&ExitSU, i);
268     }
269   } else {
270     // For others, e.g. fallthrough, conditional branch, assume the exit
271     // uses all the registers that are livein to the successor blocks.
272     assert(Uses.empty() && "Uses in set before adding deps?");
273     for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
274            SE = BB->succ_end(); SI != SE; ++SI)
275       for (const auto &LI : (*SI)->liveins()) {
276         if (!Uses.contains(LI.PhysReg))
277           Uses.insert(PhysRegSUOper(&ExitSU, -1, LI.PhysReg));
278       }
279   }
280 }
281 
282 /// MO is an operand of SU's instruction that defines a physical register. Add
283 /// data dependencies from SU to any uses of the physical register.
284 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
285   const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx);
286   assert(MO.isDef() && "expect physreg def");
287 
288   // Ask the target if address-backscheduling is desirable, and if so how much.
289   const TargetSubtargetInfo &ST = MF.getSubtarget();
290 
291   for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
292        Alias.isValid(); ++Alias) {
293     if (!Uses.contains(*Alias))
294       continue;
295     for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) {
296       SUnit *UseSU = I->SU;
297       if (UseSU == SU)
298         continue;
299 
300       // Adjust the dependence latency using operand def/use information,
301       // then allow the target to perform its own adjustments.
302       int UseOp = I->OpIdx;
303       MachineInstr *RegUse = nullptr;
304       SDep Dep;
305       if (UseOp < 0)
306         Dep = SDep(SU, SDep::Artificial);
307       else {
308         // Set the hasPhysRegDefs only for physreg defs that have a use within
309         // the scheduling region.
310         SU->hasPhysRegDefs = true;
311         Dep = SDep(SU, SDep::Data, *Alias);
312         RegUse = UseSU->getInstr();
313       }
314       Dep.setLatency(
315         SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, RegUse,
316                                          UseOp));
317 
318       ST.adjustSchedDependency(SU, UseSU, Dep);
319       UseSU->addPred(Dep);
320     }
321   }
322 }
323 
324 /// addPhysRegDeps - Add register dependencies (data, anti, and output) from
325 /// this SUnit to following instructions in the same scheduling region that
326 /// depend the physical register referenced at OperIdx.
327 void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
328   MachineInstr *MI = SU->getInstr();
329   MachineOperand &MO = MI->getOperand(OperIdx);
330 
331   // Optionally add output and anti dependencies. For anti
332   // dependencies we use a latency of 0 because for a multi-issue
333   // target we want to allow the defining instruction to issue
334   // in the same cycle as the using instruction.
335   // TODO: Using a latency of 1 here for output dependencies assumes
336   //       there's no cost for reusing registers.
337   SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
338   for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
339        Alias.isValid(); ++Alias) {
340     if (!Defs.contains(*Alias))
341       continue;
342     for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) {
343       SUnit *DefSU = I->SU;
344       if (DefSU == &ExitSU)
345         continue;
346       if (DefSU != SU &&
347           (Kind != SDep::Output || !MO.isDead() ||
348            !DefSU->getInstr()->registerDefIsDead(*Alias))) {
349         if (Kind == SDep::Anti)
350           DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias));
351         else {
352           SDep Dep(SU, Kind, /*Reg=*/*Alias);
353           Dep.setLatency(
354             SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()));
355           DefSU->addPred(Dep);
356         }
357       }
358     }
359   }
360 
361   if (!MO.isDef()) {
362     SU->hasPhysRegUses = true;
363     // Either insert a new Reg2SUnits entry with an empty SUnits list, or
364     // retrieve the existing SUnits list for this register's uses.
365     // Push this SUnit on the use list.
366     Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg()));
367     if (RemoveKillFlags)
368       MO.setIsKill(false);
369   }
370   else {
371     addPhysRegDataDeps(SU, OperIdx);
372     unsigned Reg = MO.getReg();
373 
374     // clear this register's use list
375     if (Uses.contains(Reg))
376       Uses.eraseAll(Reg);
377 
378     if (!MO.isDead()) {
379       Defs.eraseAll(Reg);
380     } else if (SU->isCall) {
381       // Calls will not be reordered because of chain dependencies (see
382       // below). Since call operands are dead, calls may continue to be added
383       // to the DefList making dependence checking quadratic in the size of
384       // the block. Instead, we leave only one call at the back of the
385       // DefList.
386       Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg);
387       Reg2SUnitsMap::iterator B = P.first;
388       Reg2SUnitsMap::iterator I = P.second;
389       for (bool isBegin = I == B; !isBegin; /* empty */) {
390         isBegin = (--I) == B;
391         if (!I->SU->isCall)
392           break;
393         I = Defs.erase(I);
394       }
395     }
396 
397     // Defs are pushed in the order they are visited and never reordered.
398     Defs.insert(PhysRegSUOper(SU, OperIdx, Reg));
399   }
400 }
401 
402 LaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const
403 {
404   unsigned Reg = MO.getReg();
405   // No point in tracking lanemasks if we don't have interesting subregisters.
406   const TargetRegisterClass &RC = *MRI.getRegClass(Reg);
407   if (!RC.HasDisjunctSubRegs)
408     return ~0u;
409 
410   unsigned SubReg = MO.getSubReg();
411   if (SubReg == 0)
412     return RC.getLaneMask();
413   return TRI->getSubRegIndexLaneMask(SubReg);
414 }
415 
416 /// addVRegDefDeps - Add register output and data dependencies from this SUnit
417 /// to instructions that occur later in the same scheduling region if they read
418 /// from or write to the virtual register defined at OperIdx.
419 ///
420 /// TODO: Hoist loop induction variable increments. This has to be
421 /// reevaluated. Generally, IV scheduling should be done before coalescing.
422 void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
423   MachineInstr *MI = SU->getInstr();
424   MachineOperand &MO = MI->getOperand(OperIdx);
425   unsigned Reg = MO.getReg();
426 
427   LaneBitmask DefLaneMask;
428   LaneBitmask KillLaneMask;
429   if (TrackLaneMasks) {
430     bool IsKill = MO.getSubReg() == 0 || MO.isUndef();
431     DefLaneMask = getLaneMaskForMO(MO);
432     // If we have a <read-undef> flag, none of the lane values comes from an
433     // earlier instruction.
434     KillLaneMask = IsKill ? ~0u : DefLaneMask;
435 
436     // Clear undef flag, we'll re-add it later once we know which subregister
437     // Def is first.
438     MO.setIsUndef(false);
439   } else {
440     DefLaneMask = ~0u;
441     KillLaneMask = ~0u;
442   }
443 
444   if (MO.isDead()) {
445     assert(CurrentVRegUses.find(Reg) == CurrentVRegUses.end() &&
446            "Dead defs should have no uses");
447   } else {
448     // Add data dependence to all uses we found so far.
449     const TargetSubtargetInfo &ST = MF.getSubtarget();
450     for (VReg2SUnitOperIdxMultiMap::iterator I = CurrentVRegUses.find(Reg),
451          E = CurrentVRegUses.end(); I != E; /*empty*/) {
452       LaneBitmask LaneMask = I->LaneMask;
453       // Ignore uses of other lanes.
454       if ((LaneMask & KillLaneMask) == 0) {
455         ++I;
456         continue;
457       }
458 
459       if ((LaneMask & DefLaneMask) != 0) {
460         SUnit *UseSU = I->SU;
461         MachineInstr *Use = UseSU->getInstr();
462         SDep Dep(SU, SDep::Data, Reg);
463         Dep.setLatency(SchedModel.computeOperandLatency(MI, OperIdx, Use,
464                                                         I->OperandIndex));
465         ST.adjustSchedDependency(SU, UseSU, Dep);
466         UseSU->addPred(Dep);
467       }
468 
469       LaneMask &= ~KillLaneMask;
470       // If we found a Def for all lanes of this use, remove it from the list.
471       if (LaneMask != 0) {
472         I->LaneMask = LaneMask;
473         ++I;
474       } else
475         I = CurrentVRegUses.erase(I);
476     }
477   }
478 
479   // Shortcut: Singly defined vregs do not have output/anti dependencies.
480   if (MRI.hasOneDef(Reg))
481     return;
482 
483   // Add output dependence to the next nearest defs of this vreg.
484   //
485   // Unless this definition is dead, the output dependence should be
486   // transitively redundant with antidependencies from this definition's
487   // uses. We're conservative for now until we have a way to guarantee the uses
488   // are not eliminated sometime during scheduling. The output dependence edge
489   // is also useful if output latency exceeds def-use latency.
490   LaneBitmask LaneMask = DefLaneMask;
491   for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg),
492                                      CurrentVRegDefs.end())) {
493     // Ignore defs for other lanes.
494     if ((V2SU.LaneMask & LaneMask) == 0)
495       continue;
496     // Add an output dependence.
497     SUnit *DefSU = V2SU.SU;
498     // Ignore additional defs of the same lanes in one instruction. This can
499     // happen because lanemasks are shared for targets with too many
500     // subregisters. We also use some representration tricks/hacks where we
501     // add super-register defs/uses, to imply that although we only access parts
502     // of the reg we care about the full one.
503     if (DefSU == SU)
504       continue;
505     SDep Dep(SU, SDep::Output, Reg);
506     Dep.setLatency(
507       SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()));
508     DefSU->addPred(Dep);
509 
510     // Update current definition. This can get tricky if the def was about a
511     // bigger lanemask before. We then have to shrink it and create a new
512     // VReg2SUnit for the non-overlapping part.
513     LaneBitmask OverlapMask = V2SU.LaneMask & LaneMask;
514     LaneBitmask NonOverlapMask = V2SU.LaneMask & ~LaneMask;
515     if (NonOverlapMask != 0)
516       CurrentVRegDefs.insert(VReg2SUnit(Reg, NonOverlapMask, V2SU.SU));
517     V2SU.SU = SU;
518     V2SU.LaneMask = OverlapMask;
519   }
520   // If there was no CurrentVRegDefs entry for some lanes yet, create one.
521   if (LaneMask != 0)
522     CurrentVRegDefs.insert(VReg2SUnit(Reg, LaneMask, SU));
523 }
524 
525 /// addVRegUseDeps - Add a register data dependency if the instruction that
526 /// defines the virtual register used at OperIdx is mapped to an SUnit. Add a
527 /// register antidependency from this SUnit to instructions that occur later in
528 /// the same scheduling region if they write the virtual register.
529 ///
530 /// TODO: Handle ExitSU "uses" properly.
531 void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
532   const MachineInstr *MI = SU->getInstr();
533   const MachineOperand &MO = MI->getOperand(OperIdx);
534   unsigned Reg = MO.getReg();
535 
536   // Remember the use. Data dependencies will be added when we find the def.
537   LaneBitmask LaneMask = TrackLaneMasks ? getLaneMaskForMO(MO) : ~0u;
538   CurrentVRegUses.insert(VReg2SUnitOperIdx(Reg, LaneMask, OperIdx, SU));
539 
540   // Add antidependences to the following defs of the vreg.
541   for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg),
542                                      CurrentVRegDefs.end())) {
543     // Ignore defs for unrelated lanes.
544     LaneBitmask PrevDefLaneMask = V2SU.LaneMask;
545     if ((PrevDefLaneMask & LaneMask) == 0)
546       continue;
547     if (V2SU.SU == SU)
548       continue;
549 
550     V2SU.SU->addPred(SDep(SU, SDep::Anti, Reg));
551   }
552 }
553 
554 /// Return true if MI is an instruction we are unable to reason about
555 /// (like a call or something with unmodeled side effects).
556 static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) {
557   return MI->isCall() || MI->hasUnmodeledSideEffects() ||
558          (MI->hasOrderedMemoryRef() && !MI->isInvariantLoad(AA));
559 }
560 
561 /// This returns true if the two MIs need a chain edge between them.
562 /// This is called on normal stores and loads.
563 static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
564                              const DataLayout &DL, MachineInstr *MIa,
565                              MachineInstr *MIb) {
566   const MachineFunction *MF = MIa->getParent()->getParent();
567   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
568 
569   assert ((MIa->mayStore() || MIb->mayStore()) &&
570           "Dependency checked between two loads");
571 
572   // Let the target decide if memory accesses cannot possibly overlap.
573   if (TII->areMemAccessesTriviallyDisjoint(MIa, MIb, AA))
574     return false;
575 
576   // To this point analysis is generic. From here on we do need AA.
577   if (!AA)
578     return true;
579 
580   // FIXME: Need to handle multiple memory operands to support all targets.
581   if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand())
582     return true;
583 
584   MachineMemOperand *MMOa = *MIa->memoperands_begin();
585   MachineMemOperand *MMOb = *MIb->memoperands_begin();
586 
587   if (!MMOa->getValue() || !MMOb->getValue())
588     return true;
589 
590   // The following interface to AA is fashioned after DAGCombiner::isAlias
591   // and operates with MachineMemOperand offset with some important
592   // assumptions:
593   //   - LLVM fundamentally assumes flat address spaces.
594   //   - MachineOperand offset can *only* result from legalization and
595   //     cannot affect queries other than the trivial case of overlap
596   //     checking.
597   //   - These offsets never wrap and never step outside
598   //     of allocated objects.
599   //   - There should never be any negative offsets here.
600   //
601   // FIXME: Modify API to hide this math from "user"
602   // FIXME: Even before we go to AA we can reason locally about some
603   // memory objects. It can save compile time, and possibly catch some
604   // corner cases not currently covered.
605 
606   assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset");
607   assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset");
608 
609   int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset());
610   int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset;
611   int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset;
612 
613   AliasResult AAResult =
614       AA->alias(MemoryLocation(MMOa->getValue(), Overlapa,
615                                UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
616                 MemoryLocation(MMOb->getValue(), Overlapb,
617                                UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
618 
619   return (AAResult != NoAlias);
620 }
621 
622 /// Check whether two objects need a chain edge and add it if needed.
623 void ScheduleDAGInstrs::addChainDependency (SUnit *SUa, SUnit *SUb,
624                                             unsigned Latency) {
625   if (MIsNeedChainEdge(AAForDep, MFI, MF.getDataLayout(), SUa->getInstr(),
626 		       SUb->getInstr())) {
627     SDep Dep(SUa, SDep::MayAliasMem);
628     Dep.setLatency(Latency);
629     SUb->addPred(Dep);
630   }
631 }
632 
633 /// Create an SUnit for each real instruction, numbered in top-down topological
634 /// order. The instruction order A < B, implies that no edge exists from B to A.
635 ///
636 /// Map each real instruction to its SUnit.
637 ///
638 /// After initSUnits, the SUnits vector cannot be resized and the scheduler may
639 /// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs
640 /// instead of pointers.
641 ///
642 /// MachineScheduler relies on initSUnits numbering the nodes by their order in
643 /// the original instruction list.
644 void ScheduleDAGInstrs::initSUnits() {
645   // We'll be allocating one SUnit for each real instruction in the region,
646   // which is contained within a basic block.
647   SUnits.reserve(NumRegionInstrs);
648 
649   for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) {
650     MachineInstr *MI = I;
651     if (MI->isDebugValue())
652       continue;
653 
654     SUnit *SU = newSUnit(MI);
655     MISUnitMap[MI] = SU;
656 
657     SU->isCall = MI->isCall();
658     SU->isCommutable = MI->isCommutable();
659 
660     // Assign the Latency field of SU using target-provided information.
661     SU->Latency = SchedModel.computeInstrLatency(SU->getInstr());
662 
663     // If this SUnit uses a reserved or unbuffered resource, mark it as such.
664     //
665     // Reserved resources block an instruction from issuing and stall the
666     // entire pipeline. These are identified by BufferSize=0.
667     //
668     // Unbuffered resources prevent execution of subsequent instructions that
669     // require the same resources. This is used for in-order execution pipelines
670     // within an out-of-order core. These are identified by BufferSize=1.
671     if (SchedModel.hasInstrSchedModel()) {
672       const MCSchedClassDesc *SC = getSchedClass(SU);
673       for (TargetSchedModel::ProcResIter
674              PI = SchedModel.getWriteProcResBegin(SC),
675              PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) {
676         switch (SchedModel.getProcResource(PI->ProcResourceIdx)->BufferSize) {
677         case 0:
678           SU->hasReservedResource = true;
679           break;
680         case 1:
681           SU->isUnbuffered = true;
682           break;
683         default:
684           break;
685         }
686       }
687     }
688   }
689 }
690 
691 void ScheduleDAGInstrs::collectVRegUses(SUnit *SU) {
692   const MachineInstr *MI = SU->getInstr();
693   for (const MachineOperand &MO : MI->operands()) {
694     if (!MO.isReg())
695       continue;
696     if (!MO.readsReg())
697       continue;
698     if (TrackLaneMasks && !MO.isUse())
699       continue;
700 
701     unsigned Reg = MO.getReg();
702     if (!TargetRegisterInfo::isVirtualRegister(Reg))
703       continue;
704 
705     // Ignore re-defs.
706     if (TrackLaneMasks) {
707       bool FoundDef = false;
708       for (const MachineOperand &MO2 : MI->operands()) {
709         if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) {
710           FoundDef = true;
711           break;
712         }
713       }
714       if (FoundDef)
715         continue;
716     }
717 
718     // Record this local VReg use.
719     VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg);
720     for (; UI != VRegUses.end(); ++UI) {
721       if (UI->SU == SU)
722         break;
723     }
724     if (UI == VRegUses.end())
725       VRegUses.insert(VReg2SUnit(Reg, 0, SU));
726   }
727 }
728 
729 class ScheduleDAGInstrs::Value2SUsMap : public MapVector<ValueType, SUList> {
730 
731   /// Current total number of SUs in map.
732   unsigned NumNodes;
733 
734   /// 1 for loads, 0 for stores. (see comment in SUList)
735   unsigned TrueMemOrderLatency;
736 public:
737 
738   Value2SUsMap(unsigned lat = 0) : NumNodes(0), TrueMemOrderLatency(lat) {}
739 
740   /// To keep NumNodes up to date, insert() is used instead of
741   /// this operator w/ push_back().
742   ValueType &operator[](const SUList &Key) {
743     llvm_unreachable("Don't use. Use insert() instead."); };
744 
745   /// Add SU to the SUList of V. If Map grows huge, reduce its size
746   /// by calling reduce().
747   void inline insert(SUnit *SU, ValueType V) {
748     MapVector::operator[](V).push_back(SU);
749     NumNodes++;
750   }
751 
752   /// Clears the list of SUs mapped to V.
753   void inline clearList(ValueType V) {
754     iterator Itr = find(V);
755     if (Itr != end()) {
756       assert (NumNodes >= Itr->second.size());
757       NumNodes -= Itr->second.size();
758 
759       Itr->second.clear();
760     }
761   }
762 
763   /// Clears map from all contents.
764   void clear() {
765     MapVector<ValueType, SUList>::clear();
766     NumNodes = 0;
767   }
768 
769   unsigned inline size() const { return NumNodes; }
770 
771   /// Count the number of SUs in this map after a reduction.
772   void reComputeSize(void) {
773     NumNodes = 0;
774     for (auto &I : *this)
775       NumNodes += I.second.size();
776   }
777 
778   unsigned inline getTrueMemOrderLatency() const {
779     return TrueMemOrderLatency;
780   }
781 
782   void dump();
783 };
784 
785 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU,
786                                              Value2SUsMap &Val2SUsMap) {
787   for (auto &I : Val2SUsMap)
788     addChainDependencies(SU, I.second,
789                          Val2SUsMap.getTrueMemOrderLatency());
790 }
791 
792 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU,
793                                              Value2SUsMap &Val2SUsMap,
794                                              ValueType V) {
795   Value2SUsMap::iterator Itr = Val2SUsMap.find(V);
796   if (Itr != Val2SUsMap.end())
797     addChainDependencies(SU, Itr->second,
798                          Val2SUsMap.getTrueMemOrderLatency());
799 }
800 
801 void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap &map) {
802   assert (BarrierChain != nullptr);
803 
804   for (auto &I : map) {
805     SUList &sus = I.second;
806     for (auto *SU : sus)
807       SU->addPredBarrier(BarrierChain);
808   }
809   map.clear();
810 }
811 
812 void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap &map) {
813   assert (BarrierChain != nullptr);
814 
815   // Go through all lists of SUs.
816   for (Value2SUsMap::iterator I = map.begin(), EE = map.end(); I != EE;) {
817     Value2SUsMap::iterator CurrItr = I++;
818     SUList &sus = CurrItr->second;
819     SUList::iterator SUItr = sus.begin(), SUEE = sus.end();
820     for (; SUItr != SUEE; ++SUItr) {
821       // Stop on BarrierChain or any instruction above it.
822       if ((*SUItr)->NodeNum <= BarrierChain->NodeNum)
823         break;
824 
825       (*SUItr)->addPredBarrier(BarrierChain);
826     }
827 
828     // Remove also the BarrierChain from list if present.
829     if (*SUItr == BarrierChain)
830       SUItr++;
831 
832     // Remove all SUs that are now successors of BarrierChain.
833     if (SUItr != sus.begin())
834       sus.erase(sus.begin(), SUItr);
835   }
836 
837   // Remove all entries with empty su lists.
838   map.remove_if([&](std::pair<ValueType, SUList> &mapEntry) {
839       return (mapEntry.second.empty()); });
840 
841   // Recompute the size of the map (NumNodes).
842   map.reComputeSize();
843 }
844 
845 /// If RegPressure is non-null, compute register pressure as a side effect. The
846 /// DAG builder is an efficient place to do it because it already visits
847 /// operands.
848 void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
849                                         RegPressureTracker *RPTracker,
850                                         PressureDiffs *PDiffs,
851                                         LiveIntervals *LIS,
852                                         bool TrackLaneMasks) {
853   const TargetSubtargetInfo &ST = MF.getSubtarget();
854   bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI
855                                                        : ST.useAA();
856   AAForDep = UseAA ? AA : nullptr;
857 
858   BarrierChain = nullptr;
859 
860   this->TrackLaneMasks = TrackLaneMasks;
861   MISUnitMap.clear();
862   ScheduleDAG::clearDAG();
863 
864   // Create an SUnit for each real instruction.
865   initSUnits();
866 
867   if (PDiffs)
868     PDiffs->init(SUnits.size());
869 
870   // We build scheduling units by walking a block's instruction list
871   // from bottom to top.
872 
873   // Each MIs' memory operand(s) is analyzed to a list of underlying
874   // objects. The SU is then inserted in the SUList(s) mapped from the
875   // Value(s). Each Value thus gets mapped to lists of SUs depending
876   // on it, stores and loads kept separately. Two SUs are trivially
877   // non-aliasing if they both depend on only identified Values and do
878   // not share any common Value.
879   Value2SUsMap Stores, Loads(1 /*TrueMemOrderLatency*/);
880 
881   // Certain memory accesses are known to not alias any SU in Stores
882   // or Loads, and have therefore their own 'NonAlias'
883   // domain. E.g. spill / reload instructions never alias LLVM I/R
884   // Values. It would be nice to assume that this type of memory
885   // accesses always have a proper memory operand modelling, and are
886   // therefore never unanalyzable, but this is conservatively not
887   // done.
888   Value2SUsMap NonAliasStores, NonAliasLoads(1 /*TrueMemOrderLatency*/);
889 
890   // Remove any stale debug info; sometimes BuildSchedGraph is called again
891   // without emitting the info from the previous call.
892   DbgValues.clear();
893   FirstDbgValue = nullptr;
894 
895   assert(Defs.empty() && Uses.empty() &&
896          "Only BuildGraph should update Defs/Uses");
897   Defs.setUniverse(TRI->getNumRegs());
898   Uses.setUniverse(TRI->getNumRegs());
899 
900   assert(CurrentVRegDefs.empty() && "nobody else should use CurrentVRegDefs");
901   assert(CurrentVRegUses.empty() && "nobody else should use CurrentVRegUses");
902   unsigned NumVirtRegs = MRI.getNumVirtRegs();
903   CurrentVRegDefs.setUniverse(NumVirtRegs);
904   CurrentVRegUses.setUniverse(NumVirtRegs);
905 
906   VRegUses.clear();
907   VRegUses.setUniverse(NumVirtRegs);
908 
909   // Model data dependencies between instructions being scheduled and the
910   // ExitSU.
911   addSchedBarrierDeps();
912 
913   // Walk the list of instructions, from bottom moving up.
914   MachineInstr *DbgMI = nullptr;
915   for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin;
916        MII != MIE; --MII) {
917     MachineInstr *MI = std::prev(MII);
918     if (MI && DbgMI) {
919       DbgValues.push_back(std::make_pair(DbgMI, MI));
920       DbgMI = nullptr;
921     }
922 
923     if (MI->isDebugValue()) {
924       DbgMI = MI;
925       continue;
926     }
927     SUnit *SU = MISUnitMap[MI];
928     assert(SU && "No SUnit mapped to this MI");
929 
930     if (RPTracker) {
931       collectVRegUses(SU);
932 
933       RegisterOperands RegOpers;
934       RegOpers.collect(*MI, *TRI, MRI, TrackLaneMasks, false);
935       if (TrackLaneMasks) {
936         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI);
937         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx);
938       }
939       if (PDiffs != nullptr)
940         PDiffs->addInstruction(SU->NodeNum, RegOpers, MRI);
941 
942       RPTracker->recedeSkipDebugValues();
943       assert(&*RPTracker->getPos() == MI && "RPTracker in sync");
944       RPTracker->recede(RegOpers);
945     }
946 
947     assert(
948         (CanHandleTerminators || (!MI->isTerminator() && !MI->isPosition())) &&
949         "Cannot schedule terminators or labels!");
950 
951     // Add register-based dependencies (data, anti, and output).
952     bool HasVRegDef = false;
953     for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
954       const MachineOperand &MO = MI->getOperand(j);
955       if (!MO.isReg()) continue;
956       unsigned Reg = MO.getReg();
957       if (Reg == 0) continue;
958 
959       if (TRI->isPhysicalRegister(Reg))
960         addPhysRegDeps(SU, j);
961       else {
962         if (MO.isDef()) {
963           HasVRegDef = true;
964           addVRegDefDeps(SU, j);
965         }
966         else if (MO.readsReg()) // ignore undef operands
967           addVRegUseDeps(SU, j);
968       }
969     }
970     // If we haven't seen any uses in this scheduling region, create a
971     // dependence edge to ExitSU to model the live-out latency. This is required
972     // for vreg defs with no in-region use, and prefetches with no vreg def.
973     //
974     // FIXME: NumDataSuccs would be more precise than NumSuccs here. This
975     // check currently relies on being called before adding chain deps.
976     if (SU->NumSuccs == 0 && SU->Latency > 1
977         && (HasVRegDef || MI->mayLoad())) {
978       SDep Dep(SU, SDep::Artificial);
979       Dep.setLatency(SU->Latency - 1);
980       ExitSU.addPred(Dep);
981     }
982 
983     // Add memory dependencies (Note: isStoreToStackSlot and
984     // isLoadFromStackSLot are not usable after stack slots are lowered to
985     // actual addresses).
986 
987     // This is a barrier event that acts as a pivotal node in the DAG.
988     if (isGlobalMemoryObject(AA, MI)) {
989 
990       // Become the barrier chain.
991       if (BarrierChain)
992         BarrierChain->addPredBarrier(SU);
993       BarrierChain = SU;
994 
995       DEBUG(dbgs() << "Global memory object and new barrier chain: SU("
996             << BarrierChain->NodeNum << ").\n";);
997 
998       // Add dependencies against everything below it and clear maps.
999       addBarrierChain(Stores);
1000       addBarrierChain(Loads);
1001       addBarrierChain(NonAliasStores);
1002       addBarrierChain(NonAliasLoads);
1003 
1004       continue;
1005     }
1006 
1007     // If it's not a store or a variant load, we're done.
1008     if (!MI->mayStore() && !(MI->mayLoad() && !MI->isInvariantLoad(AA)))
1009       continue;
1010 
1011     // Always add dependecy edge to BarrierChain if present.
1012     if (BarrierChain)
1013       BarrierChain->addPredBarrier(SU);
1014 
1015     // Find the underlying objects for MI. The Objs vector is either
1016     // empty, or filled with the Values of memory locations which this
1017     // SU depends on. An empty vector means the memory location is
1018     // unknown, and may alias anything.
1019     UnderlyingObjectsVector Objs;
1020     getUnderlyingObjectsForInstr(MI, MFI, Objs, MF.getDataLayout());
1021 
1022     if (MI->mayStore()) {
1023       if (Objs.empty()) {
1024         // An unknown store depends on all stores and loads.
1025         addChainDependencies(SU, Stores);
1026         addChainDependencies(SU, NonAliasStores);
1027         addChainDependencies(SU, Loads);
1028         addChainDependencies(SU, NonAliasLoads);
1029 
1030         // Map this store to 'UnknownValue'.
1031         Stores.insert(SU, UnknownValue);
1032       } else {
1033         // Add precise dependencies against all previously seen memory
1034         // accesses mapped to the same Value(s).
1035         for (const UnderlyingObject &UnderlObj : Objs) {
1036           ValueType V = UnderlObj.getValue();
1037           bool ThisMayAlias = UnderlObj.mayAlias();
1038 
1039           // Add dependencies to previous stores and loads mapped to V.
1040           addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V);
1041           addChainDependencies(SU, (ThisMayAlias ? Loads : NonAliasLoads), V);
1042         }
1043         // Update the store map after all chains have been added to avoid adding
1044         // self-loop edge if multiple underlying objects are present.
1045         for (const UnderlyingObject &UnderlObj : Objs) {
1046           ValueType V = UnderlObj.getValue();
1047           bool ThisMayAlias = UnderlObj.mayAlias();
1048 
1049           // Map this store to V.
1050           (ThisMayAlias ? Stores : NonAliasStores).insert(SU, V);
1051         }
1052         // The store may have dependencies to unanalyzable loads and
1053         // stores.
1054         addChainDependencies(SU, Loads, UnknownValue);
1055         addChainDependencies(SU, Stores, UnknownValue);
1056       }
1057     } else { // SU is a load.
1058       if (Objs.empty()) {
1059         // An unknown load depends on all stores.
1060         addChainDependencies(SU, Stores);
1061         addChainDependencies(SU, NonAliasStores);
1062 
1063         Loads.insert(SU, UnknownValue);
1064       } else {
1065         for (const UnderlyingObject &UnderlObj : Objs) {
1066           ValueType V = UnderlObj.getValue();
1067           bool ThisMayAlias = UnderlObj.mayAlias();
1068 
1069           // Add precise dependencies against all previously seen stores
1070           // mapping to the same Value(s).
1071           addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V);
1072 
1073           // Map this load to V.
1074           (ThisMayAlias ? Loads : NonAliasLoads).insert(SU, V);
1075         }
1076         // The load may have dependencies to unanalyzable stores.
1077         addChainDependencies(SU, Stores, UnknownValue);
1078       }
1079     }
1080 
1081     // Reduce maps if they grow huge.
1082     if (Stores.size() + Loads.size() >= HugeRegion) {
1083       DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";);
1084       reduceHugeMemNodeMaps(Stores, Loads, getReductionSize());
1085     }
1086     if (NonAliasStores.size() + NonAliasLoads.size() >= HugeRegion) {
1087       DEBUG(dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";);
1088       reduceHugeMemNodeMaps(NonAliasStores, NonAliasLoads, getReductionSize());
1089     }
1090   }
1091 
1092   if (DbgMI)
1093     FirstDbgValue = DbgMI;
1094 
1095   Defs.clear();
1096   Uses.clear();
1097   CurrentVRegDefs.clear();
1098   CurrentVRegUses.clear();
1099 }
1100 
1101 raw_ostream &llvm::operator<<(raw_ostream &OS, const PseudoSourceValue* PSV) {
1102   PSV->printCustom(OS);
1103   return OS;
1104 }
1105 
1106 void ScheduleDAGInstrs::Value2SUsMap::dump() {
1107   for (auto &Itr : *this) {
1108     if (Itr.first.is<const Value*>()) {
1109       const Value *V = Itr.first.get<const Value*>();
1110       if (isa<UndefValue>(V))
1111         dbgs() << "Unknown";
1112       else
1113         V->printAsOperand(dbgs());
1114     }
1115     else if (Itr.first.is<const PseudoSourceValue*>())
1116       dbgs() <<  Itr.first.get<const PseudoSourceValue*>();
1117     else
1118       llvm_unreachable("Unknown Value type.");
1119 
1120     dbgs() << " : ";
1121     dumpSUList(Itr.second);
1122   }
1123 }
1124 
1125 /// Reduce maps in FIFO order, by N SUs. This is better than turning
1126 /// every Nth memory SU into BarrierChain in buildSchedGraph(), since
1127 /// it avoids unnecessary edges between seen SUs above the new
1128 /// BarrierChain, and those below it.
1129 void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores,
1130                                               Value2SUsMap &loads, unsigned N) {
1131   DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n";
1132         stores.dump();
1133         dbgs() << "Loading SUnits:\n";
1134         loads.dump());
1135 
1136   // Insert all SU's NodeNums into a vector and sort it.
1137   std::vector<unsigned> NodeNums;
1138   NodeNums.reserve(stores.size() + loads.size());
1139   for (auto &I : stores)
1140     for (auto *SU : I.second)
1141       NodeNums.push_back(SU->NodeNum);
1142   for (auto &I : loads)
1143     for (auto *SU : I.second)
1144       NodeNums.push_back(SU->NodeNum);
1145   std::sort(NodeNums.begin(), NodeNums.end());
1146 
1147   // The N last elements in NodeNums will be removed, and the SU with
1148   // the lowest NodeNum of them will become the new BarrierChain to
1149   // let the not yet seen SUs have a dependency to the removed SUs.
1150   assert (N <= NodeNums.size());
1151   SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)];
1152   if (BarrierChain) {
1153     // The aliasing and non-aliasing maps reduce independently of each
1154     // other, but share a common BarrierChain. Check if the
1155     // newBarrierChain is above the former one. If it is not, it may
1156     // introduce a loop to use newBarrierChain, so keep the old one.
1157     if (newBarrierChain->NodeNum < BarrierChain->NodeNum) {
1158       BarrierChain->addPredBarrier(newBarrierChain);
1159       BarrierChain = newBarrierChain;
1160       DEBUG(dbgs() << "Inserting new barrier chain: SU("
1161             << BarrierChain->NodeNum << ").\n";);
1162     }
1163     else
1164       DEBUG(dbgs() << "Keeping old barrier chain: SU("
1165             << BarrierChain->NodeNum << ").\n";);
1166   }
1167   else
1168     BarrierChain = newBarrierChain;
1169 
1170   insertBarrierChain(stores);
1171   insertBarrierChain(loads);
1172 
1173   DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n";
1174         stores.dump();
1175         dbgs() << "Loading SUnits:\n";
1176         loads.dump());
1177 }
1178 
1179 /// \brief Initialize register live-range state for updating kills.
1180 void ScheduleDAGInstrs::startBlockForKills(MachineBasicBlock *BB) {
1181   // Start with no live registers.
1182   LiveRegs.reset();
1183 
1184   // Examine the live-in regs of all successors.
1185   for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
1186        SE = BB->succ_end(); SI != SE; ++SI) {
1187     for (const auto &LI : (*SI)->liveins()) {
1188       // Repeat, for reg and all subregs.
1189       for (MCSubRegIterator SubRegs(LI.PhysReg, TRI, /*IncludeSelf=*/true);
1190            SubRegs.isValid(); ++SubRegs)
1191         LiveRegs.set(*SubRegs);
1192     }
1193   }
1194 }
1195 
1196 /// \brief If we change a kill flag on the bundle instruction implicit register
1197 /// operands, then we also need to propagate that to any instructions inside
1198 /// the bundle which had the same kill state.
1199 static void toggleBundleKillFlag(MachineInstr *MI, unsigned Reg,
1200                                  bool NewKillState) {
1201   if (MI->getOpcode() != TargetOpcode::BUNDLE)
1202     return;
1203 
1204   // Walk backwards from the last instruction in the bundle to the first.
1205   // Once we set a kill flag on an instruction, we bail out, as otherwise we
1206   // might set it on too many operands.  We will clear as many flags as we
1207   // can though.
1208   MachineBasicBlock::instr_iterator Begin = MI->getIterator();
1209   MachineBasicBlock::instr_iterator End = getBundleEnd(*MI);
1210   while (Begin != End) {
1211     for (MachineOperand &MO : (--End)->operands()) {
1212       if (!MO.isReg() || MO.isDef() || Reg != MO.getReg())
1213         continue;
1214 
1215       // DEBUG_VALUE nodes do not contribute to code generation and should
1216       // always be ignored.  Failure to do so may result in trying to modify
1217       // KILL flags on DEBUG_VALUE nodes, which is distressing.
1218       if (MO.isDebug())
1219         continue;
1220 
1221       // If the register has the internal flag then it could be killing an
1222       // internal def of the register.  In this case, just skip.  We only want
1223       // to toggle the flag on operands visible outside the bundle.
1224       if (MO.isInternalRead())
1225         continue;
1226 
1227       if (MO.isKill() == NewKillState)
1228         continue;
1229       MO.setIsKill(NewKillState);
1230       if (NewKillState)
1231         return;
1232     }
1233   }
1234 }
1235 
1236 bool ScheduleDAGInstrs::toggleKillFlag(MachineInstr *MI, MachineOperand &MO) {
1237   // Setting kill flag...
1238   if (!MO.isKill()) {
1239     MO.setIsKill(true);
1240     toggleBundleKillFlag(MI, MO.getReg(), true);
1241     return false;
1242   }
1243 
1244   // If MO itself is live, clear the kill flag...
1245   if (LiveRegs.test(MO.getReg())) {
1246     MO.setIsKill(false);
1247     toggleBundleKillFlag(MI, MO.getReg(), false);
1248     return false;
1249   }
1250 
1251   // If any subreg of MO is live, then create an imp-def for that
1252   // subreg and keep MO marked as killed.
1253   MO.setIsKill(false);
1254   toggleBundleKillFlag(MI, MO.getReg(), false);
1255   bool AllDead = true;
1256   const unsigned SuperReg = MO.getReg();
1257   MachineInstrBuilder MIB(MF, MI);
1258   for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) {
1259     if (LiveRegs.test(*SubRegs)) {
1260       MIB.addReg(*SubRegs, RegState::ImplicitDefine);
1261       AllDead = false;
1262     }
1263   }
1264 
1265   if(AllDead) {
1266     MO.setIsKill(true);
1267     toggleBundleKillFlag(MI, MO.getReg(), true);
1268   }
1269   return false;
1270 }
1271 
1272 // FIXME: Reuse the LivePhysRegs utility for this.
1273 void ScheduleDAGInstrs::fixupKills(MachineBasicBlock *MBB) {
1274   DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
1275 
1276   LiveRegs.resize(TRI->getNumRegs());
1277   BitVector killedRegs(TRI->getNumRegs());
1278 
1279   startBlockForKills(MBB);
1280 
1281   // Examine block from end to start...
1282   unsigned Count = MBB->size();
1283   for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
1284        I != E; --Count) {
1285     MachineInstr *MI = --I;
1286     if (MI->isDebugValue())
1287       continue;
1288 
1289     // Update liveness.  Registers that are defed but not used in this
1290     // instruction are now dead. Mark register and all subregs as they
1291     // are completely defined.
1292     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1293       MachineOperand &MO = MI->getOperand(i);
1294       if (MO.isRegMask())
1295         LiveRegs.clearBitsNotInMask(MO.getRegMask());
1296       if (!MO.isReg()) continue;
1297       unsigned Reg = MO.getReg();
1298       if (Reg == 0) continue;
1299       if (!MO.isDef()) continue;
1300       // Ignore two-addr defs.
1301       if (MI->isRegTiedToUseOperand(i)) continue;
1302 
1303       // Repeat for reg and all subregs.
1304       for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
1305            SubRegs.isValid(); ++SubRegs)
1306         LiveRegs.reset(*SubRegs);
1307     }
1308 
1309     // Examine all used registers and set/clear kill flag. When a
1310     // register is used multiple times we only set the kill flag on
1311     // the first use. Don't set kill flags on undef operands.
1312     killedRegs.reset();
1313     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1314       MachineOperand &MO = MI->getOperand(i);
1315       if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
1316       unsigned Reg = MO.getReg();
1317       if ((Reg == 0) || MRI.isReserved(Reg)) continue;
1318 
1319       bool kill = false;
1320       if (!killedRegs.test(Reg)) {
1321         kill = true;
1322         // A register is not killed if any subregs are live...
1323         for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
1324           if (LiveRegs.test(*SubRegs)) {
1325             kill = false;
1326             break;
1327           }
1328         }
1329 
1330         // If subreg is not live, then register is killed if it became
1331         // live in this instruction
1332         if (kill)
1333           kill = !LiveRegs.test(Reg);
1334       }
1335 
1336       if (MO.isKill() != kill) {
1337         DEBUG(dbgs() << "Fixing " << MO << " in ");
1338         // Warning: toggleKillFlag may invalidate MO.
1339         toggleKillFlag(MI, MO);
1340         DEBUG(MI->dump());
1341         DEBUG(if (MI->getOpcode() == TargetOpcode::BUNDLE) {
1342           MachineBasicBlock::instr_iterator Begin = MI->getIterator();
1343           MachineBasicBlock::instr_iterator End = getBundleEnd(*MI);
1344           while (++Begin != End)
1345             DEBUG(Begin->dump());
1346         });
1347       }
1348 
1349       killedRegs.set(Reg);
1350     }
1351 
1352     // Mark any used register (that is not using undef) and subregs as
1353     // now live...
1354     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1355       MachineOperand &MO = MI->getOperand(i);
1356       if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
1357       unsigned Reg = MO.getReg();
1358       if ((Reg == 0) || MRI.isReserved(Reg)) continue;
1359 
1360       for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
1361            SubRegs.isValid(); ++SubRegs)
1362         LiveRegs.set(*SubRegs);
1363     }
1364   }
1365 }
1366 
1367 void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
1368 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1369   SU->getInstr()->dump();
1370 #endif
1371 }
1372 
1373 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
1374   std::string s;
1375   raw_string_ostream oss(s);
1376   if (SU == &EntrySU)
1377     oss << "<entry>";
1378   else if (SU == &ExitSU)
1379     oss << "<exit>";
1380   else
1381     SU->getInstr()->print(oss, /*SkipOpers=*/true);
1382   return oss.str();
1383 }
1384 
1385 /// Return the basic block label. It is not necessarilly unique because a block
1386 /// contains multiple scheduling regions. But it is fine for visualization.
1387 std::string ScheduleDAGInstrs::getDAGName() const {
1388   return "dag." + BB->getFullName();
1389 }
1390 
1391 //===----------------------------------------------------------------------===//
1392 // SchedDFSResult Implementation
1393 //===----------------------------------------------------------------------===//
1394 
1395 namespace llvm {
1396 /// \brief Internal state used to compute SchedDFSResult.
1397 class SchedDFSImpl {
1398   SchedDFSResult &R;
1399 
1400   /// Join DAG nodes into equivalence classes by their subtree.
1401   IntEqClasses SubtreeClasses;
1402   /// List PredSU, SuccSU pairs that represent data edges between subtrees.
1403   std::vector<std::pair<const SUnit*, const SUnit*> > ConnectionPairs;
1404 
1405   struct RootData {
1406     unsigned NodeID;
1407     unsigned ParentNodeID;  // Parent node (member of the parent subtree).
1408     unsigned SubInstrCount; // Instr count in this tree only, not children.
1409 
1410     RootData(unsigned id): NodeID(id),
1411                            ParentNodeID(SchedDFSResult::InvalidSubtreeID),
1412                            SubInstrCount(0) {}
1413 
1414     unsigned getSparseSetIndex() const { return NodeID; }
1415   };
1416 
1417   SparseSet<RootData> RootSet;
1418 
1419 public:
1420   SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) {
1421     RootSet.setUniverse(R.DFSNodeData.size());
1422   }
1423 
1424   /// Return true if this node been visited by the DFS traversal.
1425   ///
1426   /// During visitPostorderNode the Node's SubtreeID is assigned to the Node
1427   /// ID. Later, SubtreeID is updated but remains valid.
1428   bool isVisited(const SUnit *SU) const {
1429     return R.DFSNodeData[SU->NodeNum].SubtreeID
1430       != SchedDFSResult::InvalidSubtreeID;
1431   }
1432 
1433   /// Initialize this node's instruction count. We don't need to flag the node
1434   /// visited until visitPostorder because the DAG cannot have cycles.
1435   void visitPreorder(const SUnit *SU) {
1436     R.DFSNodeData[SU->NodeNum].InstrCount =
1437       SU->getInstr()->isTransient() ? 0 : 1;
1438   }
1439 
1440   /// Called once for each node after all predecessors are visited. Revisit this
1441   /// node's predecessors and potentially join them now that we know the ILP of
1442   /// the other predecessors.
1443   void visitPostorderNode(const SUnit *SU) {
1444     // Mark this node as the root of a subtree. It may be joined with its
1445     // successors later.
1446     R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum;
1447     RootData RData(SU->NodeNum);
1448     RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1;
1449 
1450     // If any predecessors are still in their own subtree, they either cannot be
1451     // joined or are large enough to remain separate. If this parent node's
1452     // total instruction count is not greater than a child subtree by at least
1453     // the subtree limit, then try to join it now since splitting subtrees is
1454     // only useful if multiple high-pressure paths are possible.
1455     unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount;
1456     for (SUnit::const_pred_iterator
1457            PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1458       if (PI->getKind() != SDep::Data)
1459         continue;
1460       unsigned PredNum = PI->getSUnit()->NodeNum;
1461       if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit)
1462         joinPredSubtree(*PI, SU, /*CheckLimit=*/false);
1463 
1464       // Either link or merge the TreeData entry from the child to the parent.
1465       if (R.DFSNodeData[PredNum].SubtreeID == PredNum) {
1466         // If the predecessor's parent is invalid, this is a tree edge and the
1467         // current node is the parent.
1468         if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID)
1469           RootSet[PredNum].ParentNodeID = SU->NodeNum;
1470       }
1471       else if (RootSet.count(PredNum)) {
1472         // The predecessor is not a root, but is still in the root set. This
1473         // must be the new parent that it was just joined to. Note that
1474         // RootSet[PredNum].ParentNodeID may either be invalid or may still be
1475         // set to the original parent.
1476         RData.SubInstrCount += RootSet[PredNum].SubInstrCount;
1477         RootSet.erase(PredNum);
1478       }
1479     }
1480     RootSet[SU->NodeNum] = RData;
1481   }
1482 
1483   /// Called once for each tree edge after calling visitPostOrderNode on the
1484   /// predecessor. Increment the parent node's instruction count and
1485   /// preemptively join this subtree to its parent's if it is small enough.
1486   void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) {
1487     R.DFSNodeData[Succ->NodeNum].InstrCount
1488       += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount;
1489     joinPredSubtree(PredDep, Succ);
1490   }
1491 
1492   /// Add a connection for cross edges.
1493   void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) {
1494     ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ));
1495   }
1496 
1497   /// Set each node's subtree ID to the representative ID and record connections
1498   /// between trees.
1499   void finalize() {
1500     SubtreeClasses.compress();
1501     R.DFSTreeData.resize(SubtreeClasses.getNumClasses());
1502     assert(SubtreeClasses.getNumClasses() == RootSet.size()
1503            && "number of roots should match trees");
1504     for (SparseSet<RootData>::const_iterator
1505            RI = RootSet.begin(), RE = RootSet.end(); RI != RE; ++RI) {
1506       unsigned TreeID = SubtreeClasses[RI->NodeID];
1507       if (RI->ParentNodeID != SchedDFSResult::InvalidSubtreeID)
1508         R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[RI->ParentNodeID];
1509       R.DFSTreeData[TreeID].SubInstrCount = RI->SubInstrCount;
1510       // Note that SubInstrCount may be greater than InstrCount if we joined
1511       // subtrees across a cross edge. InstrCount will be attributed to the
1512       // original parent, while SubInstrCount will be attributed to the joined
1513       // parent.
1514     }
1515     R.SubtreeConnections.resize(SubtreeClasses.getNumClasses());
1516     R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses());
1517     DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n");
1518     for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) {
1519       R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx];
1520       DEBUG(dbgs() << "  SU(" << Idx << ") in tree "
1521             << R.DFSNodeData[Idx].SubtreeID << '\n');
1522     }
1523     for (std::vector<std::pair<const SUnit*, const SUnit*> >::const_iterator
1524            I = ConnectionPairs.begin(), E = ConnectionPairs.end();
1525          I != E; ++I) {
1526       unsigned PredTree = SubtreeClasses[I->first->NodeNum];
1527       unsigned SuccTree = SubtreeClasses[I->second->NodeNum];
1528       if (PredTree == SuccTree)
1529         continue;
1530       unsigned Depth = I->first->getDepth();
1531       addConnection(PredTree, SuccTree, Depth);
1532       addConnection(SuccTree, PredTree, Depth);
1533     }
1534   }
1535 
1536 protected:
1537   /// Join the predecessor subtree with the successor that is its DFS
1538   /// parent. Apply some heuristics before joining.
1539   bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ,
1540                        bool CheckLimit = true) {
1541     assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges");
1542 
1543     // Check if the predecessor is already joined.
1544     const SUnit *PredSU = PredDep.getSUnit();
1545     unsigned PredNum = PredSU->NodeNum;
1546     if (R.DFSNodeData[PredNum].SubtreeID != PredNum)
1547       return false;
1548 
1549     // Four is the magic number of successors before a node is considered a
1550     // pinch point.
1551     unsigned NumDataSucs = 0;
1552     for (SUnit::const_succ_iterator SI = PredSU->Succs.begin(),
1553            SE = PredSU->Succs.end(); SI != SE; ++SI) {
1554       if (SI->getKind() == SDep::Data) {
1555         if (++NumDataSucs >= 4)
1556           return false;
1557       }
1558     }
1559     if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit)
1560       return false;
1561     R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum;
1562     SubtreeClasses.join(Succ->NodeNum, PredNum);
1563     return true;
1564   }
1565 
1566   /// Called by finalize() to record a connection between trees.
1567   void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) {
1568     if (!Depth)
1569       return;
1570 
1571     do {
1572       SmallVectorImpl<SchedDFSResult::Connection> &Connections =
1573         R.SubtreeConnections[FromTree];
1574       for (SmallVectorImpl<SchedDFSResult::Connection>::iterator
1575              I = Connections.begin(), E = Connections.end(); I != E; ++I) {
1576         if (I->TreeID == ToTree) {
1577           I->Level = std::max(I->Level, Depth);
1578           return;
1579         }
1580       }
1581       Connections.push_back(SchedDFSResult::Connection(ToTree, Depth));
1582       FromTree = R.DFSTreeData[FromTree].ParentTreeID;
1583     } while (FromTree != SchedDFSResult::InvalidSubtreeID);
1584   }
1585 };
1586 } // namespace llvm
1587 
1588 namespace {
1589 /// \brief Manage the stack used by a reverse depth-first search over the DAG.
1590 class SchedDAGReverseDFS {
1591   std::vector<std::pair<const SUnit*, SUnit::const_pred_iterator> > DFSStack;
1592 public:
1593   bool isComplete() const { return DFSStack.empty(); }
1594 
1595   void follow(const SUnit *SU) {
1596     DFSStack.push_back(std::make_pair(SU, SU->Preds.begin()));
1597   }
1598   void advance() { ++DFSStack.back().second; }
1599 
1600   const SDep *backtrack() {
1601     DFSStack.pop_back();
1602     return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second);
1603   }
1604 
1605   const SUnit *getCurr() const { return DFSStack.back().first; }
1606 
1607   SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; }
1608 
1609   SUnit::const_pred_iterator getPredEnd() const {
1610     return getCurr()->Preds.end();
1611   }
1612 };
1613 } // anonymous
1614 
1615 static bool hasDataSucc(const SUnit *SU) {
1616   for (SUnit::const_succ_iterator
1617          SI = SU->Succs.begin(), SE = SU->Succs.end(); SI != SE; ++SI) {
1618     if (SI->getKind() == SDep::Data && !SI->getSUnit()->isBoundaryNode())
1619       return true;
1620   }
1621   return false;
1622 }
1623 
1624 /// Compute an ILP metric for all nodes in the subDAG reachable via depth-first
1625 /// search from this root.
1626 void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) {
1627   if (!IsBottomUp)
1628     llvm_unreachable("Top-down ILP metric is unimplemnted");
1629 
1630   SchedDFSImpl Impl(*this);
1631   for (ArrayRef<SUnit>::const_iterator
1632          SI = SUnits.begin(), SE = SUnits.end(); SI != SE; ++SI) {
1633     const SUnit *SU = &*SI;
1634     if (Impl.isVisited(SU) || hasDataSucc(SU))
1635       continue;
1636 
1637     SchedDAGReverseDFS DFS;
1638     Impl.visitPreorder(SU);
1639     DFS.follow(SU);
1640     for (;;) {
1641       // Traverse the leftmost path as far as possible.
1642       while (DFS.getPred() != DFS.getPredEnd()) {
1643         const SDep &PredDep = *DFS.getPred();
1644         DFS.advance();
1645         // Ignore non-data edges.
1646         if (PredDep.getKind() != SDep::Data
1647             || PredDep.getSUnit()->isBoundaryNode()) {
1648           continue;
1649         }
1650         // An already visited edge is a cross edge, assuming an acyclic DAG.
1651         if (Impl.isVisited(PredDep.getSUnit())) {
1652           Impl.visitCrossEdge(PredDep, DFS.getCurr());
1653           continue;
1654         }
1655         Impl.visitPreorder(PredDep.getSUnit());
1656         DFS.follow(PredDep.getSUnit());
1657       }
1658       // Visit the top of the stack in postorder and backtrack.
1659       const SUnit *Child = DFS.getCurr();
1660       const SDep *PredDep = DFS.backtrack();
1661       Impl.visitPostorderNode(Child);
1662       if (PredDep)
1663         Impl.visitPostorderEdge(*PredDep, DFS.getCurr());
1664       if (DFS.isComplete())
1665         break;
1666     }
1667   }
1668   Impl.finalize();
1669 }
1670 
1671 /// The root of the given SubtreeID was just scheduled. For all subtrees
1672 /// connected to this tree, record the depth of the connection so that the
1673 /// nearest connected subtrees can be prioritized.
1674 void SchedDFSResult::scheduleTree(unsigned SubtreeID) {
1675   for (SmallVectorImpl<Connection>::const_iterator
1676          I = SubtreeConnections[SubtreeID].begin(),
1677          E = SubtreeConnections[SubtreeID].end(); I != E; ++I) {
1678     SubtreeConnectLevels[I->TreeID] =
1679       std::max(SubtreeConnectLevels[I->TreeID], I->Level);
1680     DEBUG(dbgs() << "  Tree: " << I->TreeID
1681           << " @" << SubtreeConnectLevels[I->TreeID] << '\n');
1682   }
1683 }
1684 
1685 LLVM_DUMP_METHOD
1686 void ILPValue::print(raw_ostream &OS) const {
1687   OS << InstrCount << " / " << Length << " = ";
1688   if (!Length)
1689     OS << "BADILP";
1690   else
1691     OS << format("%g", ((double)InstrCount / Length));
1692 }
1693 
1694 LLVM_DUMP_METHOD
1695 void ILPValue::dump() const {
1696   dbgs() << *this << '\n';
1697 }
1698 
1699 namespace llvm {
1700 
1701 LLVM_DUMP_METHOD
1702 raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) {
1703   Val.print(OS);
1704   return OS;
1705 }
1706 
1707 } // namespace llvm
1708