1 //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // The machine combiner pass uses machine trace metrics to ensure the combined
11 // instructions do not lengthen the critical path or the resource depth.
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/CodeGen/MachineDominators.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineLoopInfo.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/MachineTraceMetrics.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/TargetInstrInfo.h"
24 #include "llvm/CodeGen/TargetRegisterInfo.h"
25 #include "llvm/CodeGen/TargetSchedule.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 
31 using namespace llvm;
32 
33 #define DEBUG_TYPE "machine-combiner"
34 
35 STATISTIC(NumInstCombined, "Number of machineinst combined");
36 
37 static cl::opt<unsigned>
38 inc_threshold("machine-combiner-inc-threshold", cl::Hidden,
39               cl::desc("Incremental depth computation will be used for basic "
40                        "blocks with more instructions."), cl::init(500));
41 
42 #ifdef EXPENSIVE_CHECKS
43 static cl::opt<bool> VerifyPatternOrder(
44     "machine-combiner-verify-pattern-order", cl::Hidden,
45     cl::desc(
46         "Verify that the generated patterns are ordered by increasing latency"),
47     cl::init(true));
48 #else
49 static cl::opt<bool> VerifyPatternOrder(
50     "machine-combiner-verify-pattern-order", cl::Hidden,
51     cl::desc(
52         "Verify that the generated patterns are ordered by increasing latency"),
53     cl::init(false));
54 #endif
55 
56 namespace {
57 class MachineCombiner : public MachineFunctionPass {
58   const TargetInstrInfo *TII;
59   const TargetRegisterInfo *TRI;
60   MCSchedModel SchedModel;
61   MachineRegisterInfo *MRI;
62   MachineLoopInfo *MLI; // Current MachineLoopInfo
63   MachineTraceMetrics *Traces;
64   MachineTraceMetrics::Ensemble *MinInstr;
65 
66   TargetSchedModel TSchedModel;
67 
68   /// True if optimizing for code size.
69   bool OptSize;
70 
71 public:
72   static char ID;
73   MachineCombiner() : MachineFunctionPass(ID) {
74     initializeMachineCombinerPass(*PassRegistry::getPassRegistry());
75   }
76   void getAnalysisUsage(AnalysisUsage &AU) const override;
77   bool runOnMachineFunction(MachineFunction &MF) override;
78   StringRef getPassName() const override { return "Machine InstCombiner"; }
79 
80 private:
81   bool doSubstitute(unsigned NewSize, unsigned OldSize);
82   bool combineInstructions(MachineBasicBlock *);
83   MachineInstr *getOperandDef(const MachineOperand &MO);
84   unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
85                     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
86                     MachineTraceMetrics::Trace BlockTrace);
87   unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot,
88                       MachineTraceMetrics::Trace BlockTrace);
89   bool
90   improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root,
91                           MachineTraceMetrics::Trace BlockTrace,
92                           SmallVectorImpl<MachineInstr *> &InsInstrs,
93                           SmallVectorImpl<MachineInstr *> &DelInstrs,
94                           DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
95                           MachineCombinerPattern Pattern, bool SlackIsAccurate);
96   bool preservesResourceLen(MachineBasicBlock *MBB,
97                             MachineTraceMetrics::Trace BlockTrace,
98                             SmallVectorImpl<MachineInstr *> &InsInstrs,
99                             SmallVectorImpl<MachineInstr *> &DelInstrs);
100   void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs,
101                      SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC);
102   std::pair<unsigned, unsigned>
103   getLatenciesForInstrSequences(MachineInstr &MI,
104                                 SmallVectorImpl<MachineInstr *> &InsInstrs,
105                                 SmallVectorImpl<MachineInstr *> &DelInstrs,
106                                 MachineTraceMetrics::Trace BlockTrace);
107 
108   void verifyPatternOrder(MachineBasicBlock *MBB, MachineInstr &Root,
109                           SmallVector<MachineCombinerPattern, 16> &Patterns);
110 };
111 }
112 
113 char MachineCombiner::ID = 0;
114 char &llvm::MachineCombinerID = MachineCombiner::ID;
115 
116 INITIALIZE_PASS_BEGIN(MachineCombiner, DEBUG_TYPE,
117                       "Machine InstCombiner", false, false)
118 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
119 INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
120 INITIALIZE_PASS_END(MachineCombiner, DEBUG_TYPE, "Machine InstCombiner",
121                     false, false)
122 
123 void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
124   AU.setPreservesCFG();
125   AU.addPreserved<MachineDominatorTree>();
126   AU.addRequired<MachineLoopInfo>();
127   AU.addPreserved<MachineLoopInfo>();
128   AU.addRequired<MachineTraceMetrics>();
129   AU.addPreserved<MachineTraceMetrics>();
130   MachineFunctionPass::getAnalysisUsage(AU);
131 }
132 
133 MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) {
134   MachineInstr *DefInstr = nullptr;
135   // We need a virtual register definition.
136   if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
137     DefInstr = MRI->getUniqueVRegDef(MO.getReg());
138   // PHI's have no depth etc.
139   if (DefInstr && DefInstr->isPHI())
140     DefInstr = nullptr;
141   return DefInstr;
142 }
143 
144 /// Computes depth of instructions in vector \InsInstr.
145 ///
146 /// \param InsInstrs is a vector of machine instructions
147 /// \param InstrIdxForVirtReg is a dense map of virtual register to index
148 /// of defining machine instruction in \p InsInstrs
149 /// \param BlockTrace is a trace of machine instructions
150 ///
151 /// \returns Depth of last instruction in \InsInstrs ("NewRoot")
152 unsigned
153 MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
154                           DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
155                           MachineTraceMetrics::Trace BlockTrace) {
156   SmallVector<unsigned, 16> InstrDepth;
157   assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
158          "Missing machine model\n");
159 
160   // For each instruction in the new sequence compute the depth based on the
161   // operands. Use the trace information when possible. For new operands which
162   // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth
163   for (auto *InstrPtr : InsInstrs) { // for each Use
164     unsigned IDepth = 0;
165     DEBUG(dbgs() << "NEW INSTR ";
166           InstrPtr->print(dbgs(), TII);
167           dbgs() << "\n";);
168     for (const MachineOperand &MO : InstrPtr->operands()) {
169       // Check for virtual register operand.
170       if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
171         continue;
172       if (!MO.isUse())
173         continue;
174       unsigned DepthOp = 0;
175       unsigned LatencyOp = 0;
176       DenseMap<unsigned, unsigned>::iterator II =
177           InstrIdxForVirtReg.find(MO.getReg());
178       if (II != InstrIdxForVirtReg.end()) {
179         // Operand is new virtual register not in trace
180         assert(II->second < InstrDepth.size() && "Bad Index");
181         MachineInstr *DefInstr = InsInstrs[II->second];
182         assert(DefInstr &&
183                "There must be a definition for a new virtual register");
184         DepthOp = InstrDepth[II->second];
185         int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg());
186         int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg());
187         LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx,
188                                                       InstrPtr, UseIdx);
189       } else {
190         MachineInstr *DefInstr = getOperandDef(MO);
191         if (DefInstr) {
192           DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth;
193           LatencyOp = TSchedModel.computeOperandLatency(
194               DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
195               InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
196         }
197       }
198       IDepth = std::max(IDepth, DepthOp + LatencyOp);
199     }
200     InstrDepth.push_back(IDepth);
201   }
202   unsigned NewRootIdx = InsInstrs.size() - 1;
203   return InstrDepth[NewRootIdx];
204 }
205 
206 /// Computes instruction latency as max of latency of defined operands.
207 ///
208 /// \param Root is a machine instruction that could be replaced by NewRoot.
209 /// It is used to compute a more accurate latency information for NewRoot in
210 /// case there is a dependent instruction in the same trace (\p BlockTrace)
211 /// \param NewRoot is the instruction for which the latency is computed
212 /// \param BlockTrace is a trace of machine instructions
213 ///
214 /// \returns Latency of \p NewRoot
215 unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,
216                                      MachineTraceMetrics::Trace BlockTrace) {
217   assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
218          "Missing machine model\n");
219 
220   // Check each definition in NewRoot and compute the latency
221   unsigned NewRootLatency = 0;
222 
223   for (const MachineOperand &MO : NewRoot->operands()) {
224     // Check for virtual register operand.
225     if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
226       continue;
227     if (!MO.isDef())
228       continue;
229     // Get the first instruction that uses MO
230     MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg());
231     RI++;
232     MachineInstr *UseMO = RI->getParent();
233     unsigned LatencyOp = 0;
234     if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) {
235       LatencyOp = TSchedModel.computeOperandLatency(
236           NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO,
237           UseMO->findRegisterUseOperandIdx(MO.getReg()));
238     } else {
239       LatencyOp = TSchedModel.computeInstrLatency(NewRoot);
240     }
241     NewRootLatency = std::max(NewRootLatency, LatencyOp);
242   }
243   return NewRootLatency;
244 }
245 
246 /// The combiner's goal may differ based on which pattern it is attempting
247 /// to optimize.
248 enum class CombinerObjective {
249   MustReduceDepth, // The data dependency chain must be improved.
250   Default          // The critical path must not be lengthened.
251 };
252 
253 static CombinerObjective getCombinerObjective(MachineCombinerPattern P) {
254   // TODO: If C++ ever gets a real enum class, make this part of the
255   // MachineCombinerPattern class.
256   switch (P) {
257   case MachineCombinerPattern::REASSOC_AX_BY:
258   case MachineCombinerPattern::REASSOC_AX_YB:
259   case MachineCombinerPattern::REASSOC_XA_BY:
260   case MachineCombinerPattern::REASSOC_XA_YB:
261     return CombinerObjective::MustReduceDepth;
262   default:
263     return CombinerObjective::Default;
264   }
265 }
266 
267 /// Estimate the latency of the new and original instruction sequence by summing
268 /// up the latencies of the inserted and deleted instructions. This assumes
269 /// that the inserted and deleted instructions are dependent instruction chains,
270 /// which might not hold in all cases.
271 std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences(
272     MachineInstr &MI, SmallVectorImpl<MachineInstr *> &InsInstrs,
273     SmallVectorImpl<MachineInstr *> &DelInstrs,
274     MachineTraceMetrics::Trace BlockTrace) {
275   assert(!InsInstrs.empty() && "Only support sequences that insert instrs.");
276   unsigned NewRootLatency = 0;
277   // NewRoot is the last instruction in the \p InsInstrs vector.
278   MachineInstr *NewRoot = InsInstrs.back();
279   for (unsigned i = 0; i < InsInstrs.size() - 1; i++)
280     NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]);
281   NewRootLatency += getLatency(&MI, NewRoot, BlockTrace);
282 
283   unsigned RootLatency = 0;
284   for (auto I : DelInstrs)
285     RootLatency += TSchedModel.computeInstrLatency(I);
286 
287   return {NewRootLatency, RootLatency};
288 }
289 
290 /// The DAGCombine code sequence ends in MI (Machine Instruction) Root.
291 /// The new code sequence ends in MI NewRoot. A necessary condition for the new
292 /// sequence to replace the old sequence is that it cannot lengthen the critical
293 /// path. The definition of "improve" may be restricted by specifying that the
294 /// new path improves the data dependency chain (MustReduceDepth).
295 bool MachineCombiner::improvesCriticalPathLen(
296     MachineBasicBlock *MBB, MachineInstr *Root,
297     MachineTraceMetrics::Trace BlockTrace,
298     SmallVectorImpl<MachineInstr *> &InsInstrs,
299     SmallVectorImpl<MachineInstr *> &DelInstrs,
300     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
301     MachineCombinerPattern Pattern,
302     bool SlackIsAccurate) {
303   assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
304          "Missing machine model\n");
305   // Get depth and latency of NewRoot and Root.
306   unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
307   unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth;
308 
309   DEBUG(dbgs() << "DEPENDENCE DATA FOR " << *Root << "\n";
310         dbgs() << " NewRootDepth: " << NewRootDepth << "\n";
311         dbgs() << " RootDepth: " << RootDepth << "\n");
312 
313   // For a transform such as reassociation, the cost equation is
314   // conservatively calculated so that we must improve the depth (data
315   // dependency cycles) in the critical path to proceed with the transform.
316   // Being conservative also protects against inaccuracies in the underlying
317   // machine trace metrics and CPU models.
318   if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth)
319     return NewRootDepth < RootDepth;
320 
321   // A more flexible cost calculation for the critical path includes the slack
322   // of the original code sequence. This may allow the transform to proceed
323   // even if the instruction depths (data dependency cycles) become worse.
324 
325   // Account for the latency of the inserted and deleted instructions by
326   unsigned NewRootLatency, RootLatency;
327   std::tie(NewRootLatency, RootLatency) =
328       getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace);
329 
330   unsigned RootSlack = BlockTrace.getInstrSlack(*Root);
331   unsigned NewCycleCount = NewRootDepth + NewRootLatency;
332   unsigned OldCycleCount = RootDepth + RootLatency +
333                            (SlackIsAccurate ? RootSlack : 0);
334   DEBUG(dbgs() << " NewRootLatency: " << NewRootLatency << "\n";
335         dbgs() << " RootLatency: " << RootLatency << "\n";
336         dbgs() << " RootSlack: " << RootSlack << " SlackIsAccurate="
337                << SlackIsAccurate << "\n";
338         dbgs() << " NewRootDepth + NewRootLatency = "
339                << NewCycleCount << "\n";
340         dbgs() << " RootDepth + RootLatency + RootSlack = "
341                << OldCycleCount << "\n";
342         );
343 
344   return NewCycleCount <= OldCycleCount;
345 }
346 
347 /// helper routine to convert instructions into SC
348 void MachineCombiner::instr2instrSC(
349     SmallVectorImpl<MachineInstr *> &Instrs,
350     SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) {
351   for (auto *InstrPtr : Instrs) {
352     unsigned Opc = InstrPtr->getOpcode();
353     unsigned Idx = TII->get(Opc).getSchedClass();
354     const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx);
355     InstrsSC.push_back(SC);
356   }
357 }
358 
359 /// True when the new instructions do not increase resource length
360 bool MachineCombiner::preservesResourceLen(
361     MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace,
362     SmallVectorImpl<MachineInstr *> &InsInstrs,
363     SmallVectorImpl<MachineInstr *> &DelInstrs) {
364   if (!TSchedModel.hasInstrSchedModel())
365     return true;
366 
367   // Compute current resource length
368 
369   //ArrayRef<const MachineBasicBlock *> MBBarr(MBB);
370   SmallVector <const MachineBasicBlock *, 1> MBBarr;
371   MBBarr.push_back(MBB);
372   unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr);
373 
374   // Deal with SC rather than Instructions.
375   SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC;
376   SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC;
377 
378   instr2instrSC(InsInstrs, InsInstrsSC);
379   instr2instrSC(DelInstrs, DelInstrsSC);
380 
381   ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC);
382   ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC);
383 
384   // Compute new resource length.
385   unsigned ResLenAfterCombine =
386       BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr);
387 
388   DEBUG(dbgs() << "RESOURCE DATA: \n";
389         dbgs() << " resource len before: " << ResLenBeforeCombine
390                << " after: " << ResLenAfterCombine << "\n";);
391 
392   return ResLenAfterCombine <= ResLenBeforeCombine;
393 }
394 
395 /// \returns true when new instruction sequence should be generated
396 /// independent if it lengthens critical path or not
397 bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize) {
398   if (OptSize && (NewSize < OldSize))
399     return true;
400   if (!TSchedModel.hasInstrSchedModelOrItineraries())
401     return true;
402   return false;
403 }
404 
405 /// Inserts InsInstrs and deletes DelInstrs. Incrementally updates instruction
406 /// depths if requested.
407 ///
408 /// \param MBB basic block to insert instructions in
409 /// \param MI current machine instruction
410 /// \param InsInstrs new instructions to insert in \p MBB
411 /// \param DelInstrs instruction to delete from \p MBB
412 /// \param MinInstr is a pointer to the machine trace information
413 /// \param RegUnits set of live registers, needed to compute instruction depths
414 /// \param IncrementalUpdate if true, compute instruction depths incrementally,
415 ///                          otherwise invalidate the trace
416 static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI,
417                                      SmallVector<MachineInstr *, 16> InsInstrs,
418                                      SmallVector<MachineInstr *, 16> DelInstrs,
419                                      MachineTraceMetrics::Ensemble *MinInstr,
420                                      SparseSet<LiveRegUnit> &RegUnits,
421                                      bool IncrementalUpdate) {
422   for (auto *InstrPtr : InsInstrs)
423     MBB->insert((MachineBasicBlock::iterator)&MI, InstrPtr);
424 
425   for (auto *InstrPtr : DelInstrs) {
426     InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval();
427     // Erase all LiveRegs defined by the removed instruction
428     for (auto I = RegUnits.begin(); I != RegUnits.end(); ) {
429       if (I->MI == InstrPtr)
430         I = RegUnits.erase(I);
431       else
432         I++;
433     }
434   }
435 
436   if (IncrementalUpdate)
437     for (auto *InstrPtr : InsInstrs)
438       MinInstr->updateDepth(MBB, *InstrPtr, RegUnits);
439   else
440     MinInstr->invalidate(MBB);
441 
442   NumInstCombined++;
443 }
444 
445 // Check that the difference between original and new latency is decreasing for
446 // later patterns. This helps to discover sub-optimal pattern orderings.
447 void MachineCombiner::verifyPatternOrder(
448     MachineBasicBlock *MBB, MachineInstr &Root,
449     SmallVector<MachineCombinerPattern, 16> &Patterns) {
450   long PrevLatencyDiff = std::numeric_limits<long>::max();
451   (void)PrevLatencyDiff; // Variable is used in assert only.
452   for (auto P : Patterns) {
453     SmallVector<MachineInstr *, 16> InsInstrs;
454     SmallVector<MachineInstr *, 16> DelInstrs;
455     DenseMap<unsigned, unsigned> InstrIdxForVirtReg;
456     TII->genAlternativeCodeSequence(Root, P, InsInstrs, DelInstrs,
457                                     InstrIdxForVirtReg);
458     // Found pattern, but did not generate alternative sequence.
459     // This can happen e.g. when an immediate could not be materialized
460     // in a single instruction.
461     if (InsInstrs.empty() || !TSchedModel.hasInstrSchedModelOrItineraries())
462       continue;
463 
464     unsigned NewRootLatency, RootLatency;
465     std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences(
466         Root, InsInstrs, DelInstrs, MinInstr->getTrace(MBB));
467     long CurrentLatencyDiff = ((long)RootLatency) - ((long)NewRootLatency);
468     assert(CurrentLatencyDiff <= PrevLatencyDiff &&
469            "Current pattern is better than previous pattern.");
470     PrevLatencyDiff = CurrentLatencyDiff;
471   }
472 }
473 
474 /// Substitute a slow code sequence with a faster one by
475 /// evaluating instruction combining pattern.
476 /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction
477 /// combining based on machine trace metrics. Only combine a sequence of
478 /// instructions  when this neither lengthens the critical path nor increases
479 /// resource pressure. When optimizing for codesize always combine when the new
480 /// sequence is shorter.
481 bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
482   bool Changed = false;
483   DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n");
484 
485   bool IncrementalUpdate = false;
486   auto BlockIter = MBB->begin();
487   decltype(BlockIter) LastUpdate;
488   // Check if the block is in a loop.
489   const MachineLoop *ML = MLI->getLoopFor(MBB);
490   if (!MinInstr)
491     MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
492 
493   SparseSet<LiveRegUnit> RegUnits;
494   RegUnits.setUniverse(TRI->getNumRegUnits());
495 
496   while (BlockIter != MBB->end()) {
497     auto &MI = *BlockIter++;
498 
499     DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";);
500     SmallVector<MachineCombinerPattern, 16> Patterns;
501     // The motivating example is:
502     //
503     //     MUL  Other        MUL_op1 MUL_op2  Other
504     //      \    /               \      |    /
505     //      ADD/SUB      =>        MADD/MSUB
506     //      (=Root)                (=NewRoot)
507 
508     // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is
509     // usually beneficial for code size it unfortunately can hurt performance
510     // when the ADD is on the critical path, but the MUL is not. With the
511     // substitution the MUL becomes part of the critical path (in form of the
512     // MADD) and can lengthen it on architectures where the MADD latency is
513     // longer than the ADD latency.
514     //
515     // For each instruction we check if it can be the root of a combiner
516     // pattern. Then for each pattern the new code sequence in form of MI is
517     // generated and evaluated. When the efficiency criteria (don't lengthen
518     // critical path, don't use more resources) is met the new sequence gets
519     // hooked up into the basic block before the old sequence is removed.
520     //
521     // The algorithm does not try to evaluate all patterns and pick the best.
522     // This is only an artificial restriction though. In practice there is
523     // mostly one pattern, and getMachineCombinerPatterns() can order patterns
524     // based on an internal cost heuristic. If
525     // machine-combiner-verify-pattern-order is enabled, all patterns are
526     // checked to ensure later patterns do not provide better latency savings.
527 
528     if (!TII->getMachineCombinerPatterns(MI, Patterns))
529       continue;
530 
531     if (VerifyPatternOrder)
532       verifyPatternOrder(MBB, MI, Patterns);
533 
534     for (auto P : Patterns) {
535       SmallVector<MachineInstr *, 16> InsInstrs;
536       SmallVector<MachineInstr *, 16> DelInstrs;
537       DenseMap<unsigned, unsigned> InstrIdxForVirtReg;
538       TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs,
539                                       InstrIdxForVirtReg);
540       unsigned NewInstCount = InsInstrs.size();
541       unsigned OldInstCount = DelInstrs.size();
542       // Found pattern, but did not generate alternative sequence.
543       // This can happen e.g. when an immediate could not be materialized
544       // in a single instruction.
545       if (!NewInstCount)
546         continue;
547 
548       bool SubstituteAlways = false;
549       if (ML && TII->isThroughputPattern(P))
550         SubstituteAlways = true;
551 
552       if (IncrementalUpdate) {
553         // Update depths since the last incremental update.
554         MinInstr->updateDepths(LastUpdate, BlockIter, RegUnits);
555         LastUpdate = BlockIter;
556       }
557 
558       // Substitute when we optimize for codesize and the new sequence has
559       // fewer instructions OR
560       // the new sequence neither lengthens the critical path nor increases
561       // resource pressure.
562       if (SubstituteAlways || doSubstitute(NewInstCount, OldInstCount)) {
563         insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr,
564                                  RegUnits, IncrementalUpdate);
565         // Eagerly stop after the first pattern fires.
566         Changed = true;
567         break;
568       } else {
569         // For big basic blocks, we only compute the full trace the first time
570         // we hit this. We do not invalidate the trace, but instead update the
571         // instruction depths incrementally.
572         // NOTE: Only the instruction depths up to MI are accurate. All other
573         // trace information is not updated.
574         MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB);
575         Traces->verifyAnalysis();
576         if (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, DelInstrs,
577                                     InstrIdxForVirtReg, P,
578                                     !IncrementalUpdate) &&
579             preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs)) {
580           if (MBB->size() > inc_threshold) {
581             // Use incremental depth updates for basic blocks above treshold
582             IncrementalUpdate = true;
583             LastUpdate = BlockIter;
584           }
585 
586           insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr,
587                                    RegUnits, IncrementalUpdate);
588 
589           // Eagerly stop after the first pattern fires.
590           Changed = true;
591           break;
592         }
593         // Cleanup instructions of the alternative code sequence. There is no
594         // use for them.
595         MachineFunction *MF = MBB->getParent();
596         for (auto *InstrPtr : InsInstrs)
597           MF->DeleteMachineInstr(InstrPtr);
598       }
599       InstrIdxForVirtReg.clear();
600     }
601   }
602 
603   if (Changed && IncrementalUpdate)
604     Traces->invalidate(MBB);
605   return Changed;
606 }
607 
608 bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
609   const TargetSubtargetInfo &STI = MF.getSubtarget();
610   TII = STI.getInstrInfo();
611   TRI = STI.getRegisterInfo();
612   SchedModel = STI.getSchedModel();
613   TSchedModel.init(SchedModel, &STI, TII);
614   MRI = &MF.getRegInfo();
615   MLI = &getAnalysis<MachineLoopInfo>();
616   Traces = &getAnalysis<MachineTraceMetrics>();
617   MinInstr = nullptr;
618   OptSize = MF.getFunction().optForSize();
619 
620   DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
621   if (!TII->useMachineCombiner()) {
622     DEBUG(dbgs() << "  Skipping pass: Target does not support machine combiner\n");
623     return false;
624   }
625 
626   bool Changed = false;
627 
628   // Try to combine instructions.
629   for (auto &MBB : MF)
630     Changed |= combineInstructions(&MBB);
631 
632   return Changed;
633 }
634