1 //===-- BranchFolding.cpp - Fold machine code branch instructions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass forwards branches to unconditional branches to make them branch
11 // directly to the target block.  This pass often results in dead MBB's, which
12 // it then removes.
13 //
14 // Note that this pass must be run after register allocation, it cannot handle
15 // SSA form. It also must handle virtual registers for targets that emit virtual
16 // ISA (e.g. NVPTX).
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "BranchFolding.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/CodeGen/Analysis.h"
25 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
26 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineJumpTableInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineLoopInfo.h"
31 #include "llvm/CodeGen/MachineModuleInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/Passes.h"
34 #include "llvm/CodeGen/TargetPassConfig.h"
35 #include "llvm/IR/DebugInfoMetadata.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Target/TargetInstrInfo.h"
42 #include "llvm/Target/TargetRegisterInfo.h"
43 #include "llvm/Target/TargetSubtargetInfo.h"
44 #include <algorithm>
45 using namespace llvm;
46 
47 #define DEBUG_TYPE "branchfolding"
48 
49 STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
50 STATISTIC(NumBranchOpts, "Number of branches optimized");
51 STATISTIC(NumTailMerge , "Number of block tails merged");
52 STATISTIC(NumHoist     , "Number of times common instructions are hoisted");
53 STATISTIC(NumTailCalls,  "Number of tail calls optimized");
54 
55 static cl::opt<cl::boolOrDefault> FlagEnableTailMerge("enable-tail-merge",
56                               cl::init(cl::BOU_UNSET), cl::Hidden);
57 
58 // Throttle for huge numbers of predecessors (compile speed problems)
59 static cl::opt<unsigned>
60 TailMergeThreshold("tail-merge-threshold",
61           cl::desc("Max number of predecessors to consider tail merging"),
62           cl::init(150), cl::Hidden);
63 
64 // Heuristic for tail merging (and, inversely, tail duplication).
65 // TODO: This should be replaced with a target query.
66 static cl::opt<unsigned>
67 TailMergeSize("tail-merge-size",
68           cl::desc("Min number of instructions to consider tail merging"),
69                               cl::init(3), cl::Hidden);
70 
71 namespace {
72   /// BranchFolderPass - Wrap branch folder in a machine function pass.
73   class BranchFolderPass : public MachineFunctionPass {
74   public:
75     static char ID;
76     explicit BranchFolderPass(): MachineFunctionPass(ID) {}
77 
78     bool runOnMachineFunction(MachineFunction &MF) override;
79 
80     void getAnalysisUsage(AnalysisUsage &AU) const override {
81       AU.addRequired<MachineBlockFrequencyInfo>();
82       AU.addRequired<MachineBranchProbabilityInfo>();
83       AU.addRequired<TargetPassConfig>();
84       MachineFunctionPass::getAnalysisUsage(AU);
85     }
86   };
87 }
88 
89 char BranchFolderPass::ID = 0;
90 char &llvm::BranchFolderPassID = BranchFolderPass::ID;
91 
92 INITIALIZE_PASS(BranchFolderPass, "branch-folder",
93                 "Control Flow Optimizer", false, false)
94 
95 bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) {
96   if (skipFunction(*MF.getFunction()))
97     return false;
98 
99   TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
100   // TailMerge can create jump into if branches that make CFG irreducible for
101   // HW that requires structurized CFG.
102   bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() &&
103                          PassConfig->getEnableTailMerge();
104   BranchFolder::MBFIWrapper MBBFreqInfo(
105       getAnalysis<MachineBlockFrequencyInfo>());
106   BranchFolder Folder(EnableTailMerge, /*CommonHoist=*/true, MBBFreqInfo,
107                       getAnalysis<MachineBranchProbabilityInfo>());
108   return Folder.OptimizeFunction(MF, MF.getSubtarget().getInstrInfo(),
109                                  MF.getSubtarget().getRegisterInfo(),
110                                  getAnalysisIfAvailable<MachineModuleInfo>());
111 }
112 
113 BranchFolder::BranchFolder(bool defaultEnableTailMerge, bool CommonHoist,
114                            MBFIWrapper &FreqInfo,
115                            const MachineBranchProbabilityInfo &ProbInfo,
116                            unsigned MinTailLength)
117     : EnableHoistCommonCode(CommonHoist), MinCommonTailLength(MinTailLength),
118       MBBFreqInfo(FreqInfo), MBPI(ProbInfo) {
119   if (MinCommonTailLength == 0)
120     MinCommonTailLength = TailMergeSize;
121   switch (FlagEnableTailMerge) {
122   case cl::BOU_UNSET: EnableTailMerge = defaultEnableTailMerge; break;
123   case cl::BOU_TRUE: EnableTailMerge = true; break;
124   case cl::BOU_FALSE: EnableTailMerge = false; break;
125   }
126 }
127 
128 void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) {
129   assert(MBB->pred_empty() && "MBB must be dead!");
130   DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
131 
132   MachineFunction *MF = MBB->getParent();
133   // drop all successors.
134   while (!MBB->succ_empty())
135     MBB->removeSuccessor(MBB->succ_end()-1);
136 
137   // Avoid matching if this pointer gets reused.
138   TriedMerging.erase(MBB);
139 
140   // Remove the block.
141   MF->erase(MBB);
142   FuncletMembership.erase(MBB);
143   if (MLI)
144     MLI->removeBlock(MBB);
145 }
146 
147 bool BranchFolder::OptimizeFunction(MachineFunction &MF,
148                                     const TargetInstrInfo *tii,
149                                     const TargetRegisterInfo *tri,
150                                     MachineModuleInfo *mmi,
151                                     MachineLoopInfo *mli, bool AfterPlacement) {
152   if (!tii) return false;
153 
154   TriedMerging.clear();
155 
156   AfterBlockPlacement = AfterPlacement;
157   TII = tii;
158   TRI = tri;
159   MMI = mmi;
160   MLI = mli;
161 
162   MachineRegisterInfo &MRI = MF.getRegInfo();
163   UpdateLiveIns = MRI.tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF);
164   if (!UpdateLiveIns)
165     MRI.invalidateLiveness();
166 
167   // Fix CFG.  The later algorithms expect it to be right.
168   bool MadeChange = false;
169   for (MachineBasicBlock &MBB : MF) {
170     MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
171     SmallVector<MachineOperand, 4> Cond;
172     if (!TII->analyzeBranch(MBB, TBB, FBB, Cond, true))
173       MadeChange |= MBB.CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
174   }
175 
176   // Recalculate funclet membership.
177   FuncletMembership = getFuncletMembership(MF);
178 
179   bool MadeChangeThisIteration = true;
180   while (MadeChangeThisIteration) {
181     MadeChangeThisIteration    = TailMergeBlocks(MF);
182     // No need to clean up if tail merging does not change anything after the
183     // block placement.
184     if (!AfterBlockPlacement || MadeChangeThisIteration)
185       MadeChangeThisIteration |= OptimizeBranches(MF);
186     if (EnableHoistCommonCode)
187       MadeChangeThisIteration |= HoistCommonCode(MF);
188     MadeChange |= MadeChangeThisIteration;
189   }
190 
191   // See if any jump tables have become dead as the code generator
192   // did its thing.
193   MachineJumpTableInfo *JTI = MF.getJumpTableInfo();
194   if (!JTI)
195     return MadeChange;
196 
197   // Walk the function to find jump tables that are live.
198   BitVector JTIsLive(JTI->getJumpTables().size());
199   for (const MachineBasicBlock &BB : MF) {
200     for (const MachineInstr &I : BB)
201       for (const MachineOperand &Op : I.operands()) {
202         if (!Op.isJTI()) continue;
203 
204         // Remember that this JT is live.
205         JTIsLive.set(Op.getIndex());
206       }
207   }
208 
209   // Finally, remove dead jump tables.  This happens when the
210   // indirect jump was unreachable (and thus deleted).
211   for (unsigned i = 0, e = JTIsLive.size(); i != e; ++i)
212     if (!JTIsLive.test(i)) {
213       JTI->RemoveJumpTable(i);
214       MadeChange = true;
215     }
216 
217   return MadeChange;
218 }
219 
220 //===----------------------------------------------------------------------===//
221 //  Tail Merging of Blocks
222 //===----------------------------------------------------------------------===//
223 
224 /// HashMachineInstr - Compute a hash value for MI and its operands.
225 static unsigned HashMachineInstr(const MachineInstr &MI) {
226   unsigned Hash = MI.getOpcode();
227   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
228     const MachineOperand &Op = MI.getOperand(i);
229 
230     // Merge in bits from the operand if easy. We can't use MachineOperand's
231     // hash_code here because it's not deterministic and we sort by hash value
232     // later.
233     unsigned OperandHash = 0;
234     switch (Op.getType()) {
235     case MachineOperand::MO_Register:
236       OperandHash = Op.getReg();
237       break;
238     case MachineOperand::MO_Immediate:
239       OperandHash = Op.getImm();
240       break;
241     case MachineOperand::MO_MachineBasicBlock:
242       OperandHash = Op.getMBB()->getNumber();
243       break;
244     case MachineOperand::MO_FrameIndex:
245     case MachineOperand::MO_ConstantPoolIndex:
246     case MachineOperand::MO_JumpTableIndex:
247       OperandHash = Op.getIndex();
248       break;
249     case MachineOperand::MO_GlobalAddress:
250     case MachineOperand::MO_ExternalSymbol:
251       // Global address / external symbol are too hard, don't bother, but do
252       // pull in the offset.
253       OperandHash = Op.getOffset();
254       break;
255     default:
256       break;
257     }
258 
259     Hash += ((OperandHash << 3) | Op.getType()) << (i & 31);
260   }
261   return Hash;
262 }
263 
264 /// HashEndOfMBB - Hash the last instruction in the MBB.
265 static unsigned HashEndOfMBB(const MachineBasicBlock &MBB) {
266   MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr();
267   if (I == MBB.end())
268     return 0;
269 
270   return HashMachineInstr(*I);
271 }
272 
273 /// ComputeCommonTailLength - Given two machine basic blocks, compute the number
274 /// of instructions they actually have in common together at their end.  Return
275 /// iterators for the first shared instruction in each block.
276 static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1,
277                                         MachineBasicBlock *MBB2,
278                                         MachineBasicBlock::iterator &I1,
279                                         MachineBasicBlock::iterator &I2) {
280   I1 = MBB1->end();
281   I2 = MBB2->end();
282 
283   unsigned TailLen = 0;
284   while (I1 != MBB1->begin() && I2 != MBB2->begin()) {
285     --I1; --I2;
286     // Skip debugging pseudos; necessary to avoid changing the code.
287     while (I1->isDebugValue()) {
288       if (I1==MBB1->begin()) {
289         while (I2->isDebugValue()) {
290           if (I2==MBB2->begin())
291             // I1==DBG at begin; I2==DBG at begin
292             return TailLen;
293           --I2;
294         }
295         ++I2;
296         // I1==DBG at begin; I2==non-DBG, or first of DBGs not at begin
297         return TailLen;
298       }
299       --I1;
300     }
301     // I1==first (untested) non-DBG preceding known match
302     while (I2->isDebugValue()) {
303       if (I2==MBB2->begin()) {
304         ++I1;
305         // I1==non-DBG, or first of DBGs not at begin; I2==DBG at begin
306         return TailLen;
307       }
308       --I2;
309     }
310     // I1, I2==first (untested) non-DBGs preceding known match
311     if (!I1->isIdenticalTo(*I2) ||
312         // FIXME: This check is dubious. It's used to get around a problem where
313         // people incorrectly expect inline asm directives to remain in the same
314         // relative order. This is untenable because normal compiler
315         // optimizations (like this one) may reorder and/or merge these
316         // directives.
317         I1->isInlineAsm()) {
318       ++I1; ++I2;
319       break;
320     }
321     ++TailLen;
322   }
323   // Back past possible debugging pseudos at beginning of block.  This matters
324   // when one block differs from the other only by whether debugging pseudos
325   // are present at the beginning. (This way, the various checks later for
326   // I1==MBB1->begin() work as expected.)
327   if (I1 == MBB1->begin() && I2 != MBB2->begin()) {
328     --I2;
329     while (I2->isDebugValue()) {
330       if (I2 == MBB2->begin())
331         return TailLen;
332       --I2;
333     }
334     ++I2;
335   }
336   if (I2 == MBB2->begin() && I1 != MBB1->begin()) {
337     --I1;
338     while (I1->isDebugValue()) {
339       if (I1 == MBB1->begin())
340         return TailLen;
341       --I1;
342     }
343     ++I1;
344   }
345   return TailLen;
346 }
347 
348 void BranchFolder::ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
349                                            MachineBasicBlock *NewDest) {
350   TII->ReplaceTailWithBranchTo(OldInst, NewDest);
351 
352   if (UpdateLiveIns) {
353     NewDest->clearLiveIns();
354     computeLiveIns(LiveRegs, *TRI, *NewDest);
355   }
356 
357   ++NumTailMerge;
358 }
359 
360 MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
361                                             MachineBasicBlock::iterator BBI1,
362                                             const BasicBlock *BB) {
363   if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
364     return nullptr;
365 
366   MachineFunction &MF = *CurMBB.getParent();
367 
368   // Create the fall-through block.
369   MachineFunction::iterator MBBI = CurMBB.getIterator();
370   MachineBasicBlock *NewMBB =MF.CreateMachineBasicBlock(BB);
371   CurMBB.getParent()->insert(++MBBI, NewMBB);
372 
373   // Move all the successors of this block to the specified block.
374   NewMBB->transferSuccessors(&CurMBB);
375 
376   // Add an edge from CurMBB to NewMBB for the fall-through.
377   CurMBB.addSuccessor(NewMBB);
378 
379   // Splice the code over.
380   NewMBB->splice(NewMBB->end(), &CurMBB, BBI1, CurMBB.end());
381 
382   // NewMBB belongs to the same loop as CurMBB.
383   if (MLI)
384     if (MachineLoop *ML = MLI->getLoopFor(&CurMBB))
385       ML->addBasicBlockToLoop(NewMBB, MLI->getBase());
386 
387   // NewMBB inherits CurMBB's block frequency.
388   MBBFreqInfo.setBlockFreq(NewMBB, MBBFreqInfo.getBlockFreq(&CurMBB));
389 
390   if (UpdateLiveIns)
391     computeLiveIns(LiveRegs, *TRI, *NewMBB);
392 
393   // Add the new block to the funclet.
394   const auto &FuncletI = FuncletMembership.find(&CurMBB);
395   if (FuncletI != FuncletMembership.end()) {
396     auto n = FuncletI->second;
397     FuncletMembership[NewMBB] = n;
398   }
399 
400   return NewMBB;
401 }
402 
403 /// EstimateRuntime - Make a rough estimate for how long it will take to run
404 /// the specified code.
405 static unsigned EstimateRuntime(MachineBasicBlock::iterator I,
406                                 MachineBasicBlock::iterator E) {
407   unsigned Time = 0;
408   for (; I != E; ++I) {
409     if (I->isDebugValue())
410       continue;
411     if (I->isCall())
412       Time += 10;
413     else if (I->mayLoad() || I->mayStore())
414       Time += 2;
415     else
416       ++Time;
417   }
418   return Time;
419 }
420 
421 // CurMBB needs to add an unconditional branch to SuccMBB (we removed these
422 // branches temporarily for tail merging).  In the case where CurMBB ends
423 // with a conditional branch to the next block, optimize by reversing the
424 // test and conditionally branching to SuccMBB instead.
425 static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
426                     const TargetInstrInfo *TII) {
427   MachineFunction *MF = CurMBB->getParent();
428   MachineFunction::iterator I = std::next(MachineFunction::iterator(CurMBB));
429   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
430   SmallVector<MachineOperand, 4> Cond;
431   DebugLoc dl = CurMBB->findBranchDebugLoc();
432   if (I != MF->end() && !TII->analyzeBranch(*CurMBB, TBB, FBB, Cond, true)) {
433     MachineBasicBlock *NextBB = &*I;
434     if (TBB == NextBB && !Cond.empty() && !FBB) {
435       if (!TII->reverseBranchCondition(Cond)) {
436         TII->removeBranch(*CurMBB);
437         TII->insertBranch(*CurMBB, SuccBB, nullptr, Cond, dl);
438         return;
439       }
440     }
441   }
442   TII->insertBranch(*CurMBB, SuccBB, nullptr,
443                     SmallVector<MachineOperand, 0>(), dl);
444 }
445 
446 bool
447 BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const {
448   if (getHash() < o.getHash())
449     return true;
450   if (getHash() > o.getHash())
451     return false;
452   if (getBlock()->getNumber() < o.getBlock()->getNumber())
453     return true;
454   if (getBlock()->getNumber() > o.getBlock()->getNumber())
455     return false;
456   // _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing
457   // an object with itself.
458 #ifndef _GLIBCXX_DEBUG
459   llvm_unreachable("Predecessor appears twice");
460 #else
461   return false;
462 #endif
463 }
464 
465 BlockFrequency
466 BranchFolder::MBFIWrapper::getBlockFreq(const MachineBasicBlock *MBB) const {
467   auto I = MergedBBFreq.find(MBB);
468 
469   if (I != MergedBBFreq.end())
470     return I->second;
471 
472   return MBFI.getBlockFreq(MBB);
473 }
474 
475 void BranchFolder::MBFIWrapper::setBlockFreq(const MachineBasicBlock *MBB,
476                                              BlockFrequency F) {
477   MergedBBFreq[MBB] = F;
478 }
479 
480 raw_ostream &
481 BranchFolder::MBFIWrapper::printBlockFreq(raw_ostream &OS,
482                                           const MachineBasicBlock *MBB) const {
483   return MBFI.printBlockFreq(OS, getBlockFreq(MBB));
484 }
485 
486 raw_ostream &
487 BranchFolder::MBFIWrapper::printBlockFreq(raw_ostream &OS,
488                                           const BlockFrequency Freq) const {
489   return MBFI.printBlockFreq(OS, Freq);
490 }
491 
492 void BranchFolder::MBFIWrapper::view(const Twine &Name, bool isSimple) {
493   MBFI.view(Name, isSimple);
494 }
495 
496 uint64_t
497 BranchFolder::MBFIWrapper::getEntryFreq() const {
498   return MBFI.getEntryFreq();
499 }
500 
501 /// CountTerminators - Count the number of terminators in the given
502 /// block and set I to the position of the first non-terminator, if there
503 /// is one, or MBB->end() otherwise.
504 static unsigned CountTerminators(MachineBasicBlock *MBB,
505                                  MachineBasicBlock::iterator &I) {
506   I = MBB->end();
507   unsigned NumTerms = 0;
508   for (;;) {
509     if (I == MBB->begin()) {
510       I = MBB->end();
511       break;
512     }
513     --I;
514     if (!I->isTerminator()) break;
515     ++NumTerms;
516   }
517   return NumTerms;
518 }
519 
520 /// A no successor, non-return block probably ends in unreachable and is cold.
521 /// Also consider a block that ends in an indirect branch to be a return block,
522 /// since many targets use plain indirect branches to return.
523 static bool blockEndsInUnreachable(const MachineBasicBlock *MBB) {
524   if (!MBB->succ_empty())
525     return false;
526   if (MBB->empty())
527     return true;
528   return !(MBB->back().isReturn() || MBB->back().isIndirectBranch());
529 }
530 
531 /// ProfitableToMerge - Check if two machine basic blocks have a common tail
532 /// and decide if it would be profitable to merge those tails.  Return the
533 /// length of the common tail and iterators to the first common instruction
534 /// in each block.
535 /// MBB1, MBB2      The blocks to check
536 /// MinCommonTailLength  Minimum size of tail block to be merged.
537 /// CommonTailLen   Out parameter to record the size of the shared tail between
538 ///                 MBB1 and MBB2
539 /// I1, I2          Iterator references that will be changed to point to the first
540 ///                 instruction in the common tail shared by MBB1,MBB2
541 /// SuccBB          A common successor of MBB1, MBB2 which are in a canonical form
542 ///                 relative to SuccBB
543 /// PredBB          The layout predecessor of SuccBB, if any.
544 /// FuncletMembership  map from block to funclet #.
545 /// AfterPlacement  True if we are merging blocks after layout. Stricter
546 ///                 thresholds apply to prevent undoing tail-duplication.
547 static bool
548 ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2,
549                   unsigned MinCommonTailLength, unsigned &CommonTailLen,
550                   MachineBasicBlock::iterator &I1,
551                   MachineBasicBlock::iterator &I2, MachineBasicBlock *SuccBB,
552                   MachineBasicBlock *PredBB,
553                   DenseMap<const MachineBasicBlock *, int> &FuncletMembership,
554                   bool AfterPlacement) {
555   // It is never profitable to tail-merge blocks from two different funclets.
556   if (!FuncletMembership.empty()) {
557     auto Funclet1 = FuncletMembership.find(MBB1);
558     assert(Funclet1 != FuncletMembership.end());
559     auto Funclet2 = FuncletMembership.find(MBB2);
560     assert(Funclet2 != FuncletMembership.end());
561     if (Funclet1->second != Funclet2->second)
562       return false;
563   }
564 
565   CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2);
566   if (CommonTailLen == 0)
567     return false;
568   DEBUG(dbgs() << "Common tail length of BB#" << MBB1->getNumber()
569                << " and BB#" << MBB2->getNumber() << " is " << CommonTailLen
570                << '\n');
571 
572   // It's almost always profitable to merge any number of non-terminator
573   // instructions with the block that falls through into the common successor.
574   // This is true only for a single successor. For multiple successors, we are
575   // trading a conditional branch for an unconditional one.
576   // TODO: Re-visit successor size for non-layout tail merging.
577   if ((MBB1 == PredBB || MBB2 == PredBB) &&
578       (!AfterPlacement || MBB1->succ_size() == 1)) {
579     MachineBasicBlock::iterator I;
580     unsigned NumTerms = CountTerminators(MBB1 == PredBB ? MBB2 : MBB1, I);
581     if (CommonTailLen > NumTerms)
582       return true;
583   }
584 
585   // If these are identical non-return blocks with no successors, merge them.
586   // Such blocks are typically cold calls to noreturn functions like abort, and
587   // are unlikely to become a fallthrough target after machine block placement.
588   // Tail merging these blocks is unlikely to create additional unconditional
589   // branches, and will reduce the size of this cold code.
590   if (I1 == MBB1->begin() && I2 == MBB2->begin() &&
591       blockEndsInUnreachable(MBB1) && blockEndsInUnreachable(MBB2))
592     return true;
593 
594   // If one of the blocks can be completely merged and happens to be in
595   // a position where the other could fall through into it, merge any number
596   // of instructions, because it can be done without a branch.
597   // TODO: If the blocks are not adjacent, move one of them so that they are?
598   if (MBB1->isLayoutSuccessor(MBB2) && I2 == MBB2->begin())
599     return true;
600   if (MBB2->isLayoutSuccessor(MBB1) && I1 == MBB1->begin())
601     return true;
602 
603   // If both blocks are identical and end in a branch, merge them unless they
604   // both have a fallthrough predecessor and successor.
605   // We can only do this after block placement because it depends on whether
606   // there are fallthroughs, and we don't know until after layout.
607   if (AfterPlacement && I1 == MBB1->begin() && I2 == MBB2->begin()) {
608     auto BothFallThrough = [](MachineBasicBlock *MBB) {
609       if (MBB->succ_size() != 0 && !MBB->canFallThrough())
610         return false;
611       MachineFunction::iterator I(MBB);
612       MachineFunction *MF = MBB->getParent();
613       return (MBB != &*MF->begin()) && std::prev(I)->canFallThrough();
614     };
615     if (!BothFallThrough(MBB1) || !BothFallThrough(MBB2))
616       return true;
617   }
618 
619   // If both blocks have an unconditional branch temporarily stripped out,
620   // count that as an additional common instruction for the following
621   // heuristics. This heuristic is only accurate for single-succ blocks, so to
622   // make sure that during layout merging and duplicating don't crash, we check
623   // for that when merging during layout.
624   unsigned EffectiveTailLen = CommonTailLen;
625   if (SuccBB && MBB1 != PredBB && MBB2 != PredBB &&
626       (MBB1->succ_size() == 1 || !AfterPlacement) &&
627       !MBB1->back().isBarrier() &&
628       !MBB2->back().isBarrier())
629     ++EffectiveTailLen;
630 
631   // Check if the common tail is long enough to be worthwhile.
632   if (EffectiveTailLen >= MinCommonTailLength)
633     return true;
634 
635   // If we are optimizing for code size, 2 instructions in common is enough if
636   // we don't have to split a block.  At worst we will be introducing 1 new
637   // branch instruction, which is likely to be smaller than the 2
638   // instructions that would be deleted in the merge.
639   MachineFunction *MF = MBB1->getParent();
640   return EffectiveTailLen >= 2 && MF->getFunction()->optForSize() &&
641          (I1 == MBB1->begin() || I2 == MBB2->begin());
642 }
643 
644 unsigned BranchFolder::ComputeSameTails(unsigned CurHash,
645                                         unsigned MinCommonTailLength,
646                                         MachineBasicBlock *SuccBB,
647                                         MachineBasicBlock *PredBB) {
648   unsigned maxCommonTailLength = 0U;
649   SameTails.clear();
650   MachineBasicBlock::iterator TrialBBI1, TrialBBI2;
651   MPIterator HighestMPIter = std::prev(MergePotentials.end());
652   for (MPIterator CurMPIter = std::prev(MergePotentials.end()),
653                   B = MergePotentials.begin();
654        CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) {
655     for (MPIterator I = std::prev(CurMPIter); I->getHash() == CurHash; --I) {
656       unsigned CommonTailLen;
657       if (ProfitableToMerge(CurMPIter->getBlock(), I->getBlock(),
658                             MinCommonTailLength,
659                             CommonTailLen, TrialBBI1, TrialBBI2,
660                             SuccBB, PredBB,
661                             FuncletMembership,
662                             AfterBlockPlacement)) {
663         if (CommonTailLen > maxCommonTailLength) {
664           SameTails.clear();
665           maxCommonTailLength = CommonTailLen;
666           HighestMPIter = CurMPIter;
667           SameTails.push_back(SameTailElt(CurMPIter, TrialBBI1));
668         }
669         if (HighestMPIter == CurMPIter &&
670             CommonTailLen == maxCommonTailLength)
671           SameTails.push_back(SameTailElt(I, TrialBBI2));
672       }
673       if (I == B)
674         break;
675     }
676   }
677   return maxCommonTailLength;
678 }
679 
680 void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
681                                         MachineBasicBlock *SuccBB,
682                                         MachineBasicBlock *PredBB) {
683   MPIterator CurMPIter, B;
684   for (CurMPIter = std::prev(MergePotentials.end()),
685       B = MergePotentials.begin();
686        CurMPIter->getHash() == CurHash; --CurMPIter) {
687     // Put the unconditional branch back, if we need one.
688     MachineBasicBlock *CurMBB = CurMPIter->getBlock();
689     if (SuccBB && CurMBB != PredBB)
690       FixTail(CurMBB, SuccBB, TII);
691     if (CurMPIter == B)
692       break;
693   }
694   if (CurMPIter->getHash() != CurHash)
695     CurMPIter++;
696   MergePotentials.erase(CurMPIter, MergePotentials.end());
697 }
698 
699 bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
700                                              MachineBasicBlock *SuccBB,
701                                              unsigned maxCommonTailLength,
702                                              unsigned &commonTailIndex) {
703   commonTailIndex = 0;
704   unsigned TimeEstimate = ~0U;
705   for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
706     // Use PredBB if possible; that doesn't require a new branch.
707     if (SameTails[i].getBlock() == PredBB) {
708       commonTailIndex = i;
709       break;
710     }
711     // Otherwise, make a (fairly bogus) choice based on estimate of
712     // how long it will take the various blocks to execute.
713     unsigned t = EstimateRuntime(SameTails[i].getBlock()->begin(),
714                                  SameTails[i].getTailStartPos());
715     if (t <= TimeEstimate) {
716       TimeEstimate = t;
717       commonTailIndex = i;
718     }
719   }
720 
721   MachineBasicBlock::iterator BBI =
722     SameTails[commonTailIndex].getTailStartPos();
723   MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
724 
725   DEBUG(dbgs() << "\nSplitting BB#" << MBB->getNumber() << ", size "
726                << maxCommonTailLength);
727 
728   // If the split block unconditionally falls-thru to SuccBB, it will be
729   // merged. In control flow terms it should then take SuccBB's name. e.g. If
730   // SuccBB is an inner loop, the common tail is still part of the inner loop.
731   const BasicBlock *BB = (SuccBB && MBB->succ_size() == 1) ?
732     SuccBB->getBasicBlock() : MBB->getBasicBlock();
733   MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI, BB);
734   if (!newMBB) {
735     DEBUG(dbgs() << "... failed!");
736     return false;
737   }
738 
739   SameTails[commonTailIndex].setBlock(newMBB);
740   SameTails[commonTailIndex].setTailStartPos(newMBB->begin());
741 
742   // If we split PredBB, newMBB is the new predecessor.
743   if (PredBB == MBB)
744     PredBB = newMBB;
745 
746   return true;
747 }
748 
749 void BranchFolder::MergeCommonTailDebugLocs(unsigned commonTailIndex) {
750   MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
751 
752   std::vector<MachineBasicBlock::iterator> NextCommonInsts(SameTails.size());
753   for (unsigned int i = 0 ; i != SameTails.size() ; ++i) {
754     if (i != commonTailIndex)
755       NextCommonInsts[i] = SameTails[i].getTailStartPos();
756     else {
757       assert(SameTails[i].getTailStartPos() == MBB->begin() &&
758           "MBB is not a common tail only block");
759     }
760   }
761 
762   for (auto &MI : *MBB) {
763     if (MI.isDebugValue())
764       continue;
765     DebugLoc DL = MI.getDebugLoc();
766     for (unsigned int i = 0 ; i < NextCommonInsts.size() ; i++) {
767       if (i == commonTailIndex)
768         continue;
769 
770       auto &Pos = NextCommonInsts[i];
771       assert(Pos != SameTails[i].getBlock()->end() &&
772           "Reached BB end within common tail");
773       while (Pos->isDebugValue()) {
774         ++Pos;
775         assert(Pos != SameTails[i].getBlock()->end() &&
776             "Reached BB end within common tail");
777       }
778       assert(MI.isIdenticalTo(*Pos) && "Expected matching MIIs!");
779       DL = DILocation::getMergedLocation(DL, Pos->getDebugLoc());
780       NextCommonInsts[i] = ++Pos;
781     }
782     MI.setDebugLoc(DL);
783   }
784 }
785 
786 static void
787 mergeOperations(MachineBasicBlock::iterator MBBIStartPos,
788                 MachineBasicBlock &MBBCommon) {
789   MachineBasicBlock *MBB = MBBIStartPos->getParent();
790   // Note CommonTailLen does not necessarily matches the size of
791   // the common BB nor all its instructions because of debug
792   // instructions differences.
793   unsigned CommonTailLen = 0;
794   for (auto E = MBB->end(); MBBIStartPos != E; ++MBBIStartPos)
795     ++CommonTailLen;
796 
797   MachineBasicBlock::reverse_iterator MBBI = MBB->rbegin();
798   MachineBasicBlock::reverse_iterator MBBIE = MBB->rend();
799   MachineBasicBlock::reverse_iterator MBBICommon = MBBCommon.rbegin();
800   MachineBasicBlock::reverse_iterator MBBIECommon = MBBCommon.rend();
801 
802   while (CommonTailLen--) {
803     assert(MBBI != MBBIE && "Reached BB end within common tail length!");
804     (void)MBBIE;
805 
806     if (MBBI->isDebugValue()) {
807       ++MBBI;
808       continue;
809     }
810 
811     while ((MBBICommon != MBBIECommon) && MBBICommon->isDebugValue())
812       ++MBBICommon;
813 
814     assert(MBBICommon != MBBIECommon &&
815            "Reached BB end within common tail length!");
816     assert(MBBICommon->isIdenticalTo(*MBBI) && "Expected matching MIIs!");
817 
818     // Merge MMOs from memory operations in the common block.
819     if (MBBICommon->mayLoad() || MBBICommon->mayStore())
820       MBBICommon->setMemRefs(MBBICommon->mergeMemRefsWith(*MBBI));
821     // Drop undef flags if they aren't present in all merged instructions.
822     for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) {
823       MachineOperand &MO = MBBICommon->getOperand(I);
824       if (MO.isReg() && MO.isUndef()) {
825         const MachineOperand &OtherMO = MBBI->getOperand(I);
826         if (!OtherMO.isUndef())
827           MO.setIsUndef(false);
828       }
829     }
830 
831     ++MBBI;
832     ++MBBICommon;
833   }
834 }
835 
836 // See if any of the blocks in MergePotentials (which all have SuccBB as a
837 // successor, or all have no successor if it is null) can be tail-merged.
838 // If there is a successor, any blocks in MergePotentials that are not
839 // tail-merged and are not immediately before Succ must have an unconditional
840 // branch to Succ added (but the predecessor/successor lists need no
841 // adjustment). The lone predecessor of Succ that falls through into Succ,
842 // if any, is given in PredBB.
843 // MinCommonTailLength - Except for the special cases below, tail-merge if
844 // there are at least this many instructions in common.
845 bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
846                                       MachineBasicBlock *PredBB,
847                                       unsigned MinCommonTailLength) {
848   bool MadeChange = false;
849 
850   DEBUG(dbgs() << "\nTryTailMergeBlocks: ";
851         for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
852           dbgs() << "BB#" << MergePotentials[i].getBlock()->getNumber()
853                  << (i == e-1 ? "" : ", ");
854         dbgs() << "\n";
855         if (SuccBB) {
856           dbgs() << "  with successor BB#" << SuccBB->getNumber() << '\n';
857           if (PredBB)
858             dbgs() << "  which has fall-through from BB#"
859                    << PredBB->getNumber() << "\n";
860         }
861         dbgs() << "Looking for common tails of at least "
862                << MinCommonTailLength << " instruction"
863                << (MinCommonTailLength == 1 ? "" : "s") << '\n';
864        );
865 
866   // Sort by hash value so that blocks with identical end sequences sort
867   // together.
868   array_pod_sort(MergePotentials.begin(), MergePotentials.end());
869 
870   // Walk through equivalence sets looking for actual exact matches.
871   while (MergePotentials.size() > 1) {
872     unsigned CurHash = MergePotentials.back().getHash();
873 
874     // Build SameTails, identifying the set of blocks with this hash code
875     // and with the maximum number of instructions in common.
876     unsigned maxCommonTailLength = ComputeSameTails(CurHash,
877                                                     MinCommonTailLength,
878                                                     SuccBB, PredBB);
879 
880     // If we didn't find any pair that has at least MinCommonTailLength
881     // instructions in common, remove all blocks with this hash code and retry.
882     if (SameTails.empty()) {
883       RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
884       continue;
885     }
886 
887     // If one of the blocks is the entire common tail (and not the entry
888     // block, which we can't jump to), we can treat all blocks with this same
889     // tail at once.  Use PredBB if that is one of the possibilities, as that
890     // will not introduce any extra branches.
891     MachineBasicBlock *EntryBB =
892         &MergePotentials.front().getBlock()->getParent()->front();
893     unsigned commonTailIndex = SameTails.size();
894     // If there are two blocks, check to see if one can be made to fall through
895     // into the other.
896     if (SameTails.size() == 2 &&
897         SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) &&
898         SameTails[1].tailIsWholeBlock())
899       commonTailIndex = 1;
900     else if (SameTails.size() == 2 &&
901              SameTails[1].getBlock()->isLayoutSuccessor(
902                                                      SameTails[0].getBlock()) &&
903              SameTails[0].tailIsWholeBlock())
904       commonTailIndex = 0;
905     else {
906       // Otherwise just pick one, favoring the fall-through predecessor if
907       // there is one.
908       for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
909         MachineBasicBlock *MBB = SameTails[i].getBlock();
910         if (MBB == EntryBB && SameTails[i].tailIsWholeBlock())
911           continue;
912         if (MBB == PredBB) {
913           commonTailIndex = i;
914           break;
915         }
916         if (SameTails[i].tailIsWholeBlock())
917           commonTailIndex = i;
918       }
919     }
920 
921     if (commonTailIndex == SameTails.size() ||
922         (SameTails[commonTailIndex].getBlock() == PredBB &&
923          !SameTails[commonTailIndex].tailIsWholeBlock())) {
924       // None of the blocks consist entirely of the common tail.
925       // Split a block so that one does.
926       if (!CreateCommonTailOnlyBlock(PredBB, SuccBB,
927                                      maxCommonTailLength, commonTailIndex)) {
928         RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
929         continue;
930       }
931     }
932 
933     MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
934 
935     // Recompute common tail MBB's edge weights and block frequency.
936     setCommonTailEdgeWeights(*MBB);
937 
938     // Merge debug locations across identical instructions for common tail.
939     MergeCommonTailDebugLocs(commonTailIndex);
940 
941     // MBB is common tail.  Adjust all other BB's to jump to this one.
942     // Traversal must be forwards so erases work.
943     DEBUG(dbgs() << "\nUsing common tail in BB#" << MBB->getNumber()
944                  << " for ");
945     for (unsigned int i=0, e = SameTails.size(); i != e; ++i) {
946       if (commonTailIndex == i)
947         continue;
948       DEBUG(dbgs() << "BB#" << SameTails[i].getBlock()->getNumber()
949                    << (i == e-1 ? "" : ", "));
950       // Merge operations (MMOs, undef flags)
951       mergeOperations(SameTails[i].getTailStartPos(), *MBB);
952       // Hack the end off BB i, making it jump to BB commonTailIndex instead.
953       ReplaceTailWithBranchTo(SameTails[i].getTailStartPos(), MBB);
954       // BB i is no longer a predecessor of SuccBB; remove it from the worklist.
955       MergePotentials.erase(SameTails[i].getMPIter());
956     }
957     DEBUG(dbgs() << "\n");
958     // We leave commonTailIndex in the worklist in case there are other blocks
959     // that match it with a smaller number of instructions.
960     MadeChange = true;
961   }
962   return MadeChange;
963 }
964 
965 bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
966   bool MadeChange = false;
967   if (!EnableTailMerge) return MadeChange;
968 
969   // First find blocks with no successors.
970   // Block placement does not create new tail merging opportunities for these
971   // blocks.
972   if (!AfterBlockPlacement) {
973     MergePotentials.clear();
974     for (MachineBasicBlock &MBB : MF) {
975       if (MergePotentials.size() == TailMergeThreshold)
976         break;
977       if (!TriedMerging.count(&MBB) && MBB.succ_empty())
978         MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(MBB), &MBB));
979     }
980 
981     // If this is a large problem, avoid visiting the same basic blocks
982     // multiple times.
983     if (MergePotentials.size() == TailMergeThreshold)
984       for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
985         TriedMerging.insert(MergePotentials[i].getBlock());
986 
987     // See if we can do any tail merging on those.
988     if (MergePotentials.size() >= 2)
989       MadeChange |= TryTailMergeBlocks(nullptr, nullptr, MinCommonTailLength);
990   }
991 
992   // Look at blocks (IBB) with multiple predecessors (PBB).
993   // We change each predecessor to a canonical form, by
994   // (1) temporarily removing any unconditional branch from the predecessor
995   // to IBB, and
996   // (2) alter conditional branches so they branch to the other block
997   // not IBB; this may require adding back an unconditional branch to IBB
998   // later, where there wasn't one coming in.  E.g.
999   //   Bcc IBB
1000   //   fallthrough to QBB
1001   // here becomes
1002   //   Bncc QBB
1003   // with a conceptual B to IBB after that, which never actually exists.
1004   // With those changes, we see whether the predecessors' tails match,
1005   // and merge them if so.  We change things out of canonical form and
1006   // back to the way they were later in the process.  (OptimizeBranches
1007   // would undo some of this, but we can't use it, because we'd get into
1008   // a compile-time infinite loop repeatedly doing and undoing the same
1009   // transformations.)
1010 
1011   for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
1012        I != E; ++I) {
1013     if (I->pred_size() < 2) continue;
1014     SmallPtrSet<MachineBasicBlock *, 8> UniquePreds;
1015     MachineBasicBlock *IBB = &*I;
1016     MachineBasicBlock *PredBB = &*std::prev(I);
1017     MergePotentials.clear();
1018     MachineLoop *ML;
1019 
1020     // Bail if merging after placement and IBB is the loop header because
1021     // -- If merging predecessors that belong to the same loop as IBB, the
1022     // common tail of merged predecessors may become the loop top if block
1023     // placement is called again and the predecessors may branch to this common
1024     // tail and require more branches. This can be relaxed if
1025     // MachineBlockPlacement::findBestLoopTop is more flexible.
1026     // --If merging predecessors that do not belong to the same loop as IBB, the
1027     // loop info of IBB's loop and the other loops may be affected. Calling the
1028     // block placement again may make big change to the layout and eliminate the
1029     // reason to do tail merging here.
1030     if (AfterBlockPlacement && MLI) {
1031       ML = MLI->getLoopFor(IBB);
1032       if (ML && IBB == ML->getHeader())
1033         continue;
1034     }
1035 
1036     for (MachineBasicBlock *PBB : I->predecessors()) {
1037       if (MergePotentials.size() == TailMergeThreshold)
1038         break;
1039 
1040       if (TriedMerging.count(PBB))
1041         continue;
1042 
1043       // Skip blocks that loop to themselves, can't tail merge these.
1044       if (PBB == IBB)
1045         continue;
1046 
1047       // Visit each predecessor only once.
1048       if (!UniquePreds.insert(PBB).second)
1049         continue;
1050 
1051       // Skip blocks which may jump to a landing pad. Can't tail merge these.
1052       if (PBB->hasEHPadSuccessor())
1053         continue;
1054 
1055       // After block placement, only consider predecessors that belong to the
1056       // same loop as IBB.  The reason is the same as above when skipping loop
1057       // header.
1058       if (AfterBlockPlacement && MLI)
1059         if (ML != MLI->getLoopFor(PBB))
1060           continue;
1061 
1062       MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
1063       SmallVector<MachineOperand, 4> Cond;
1064       if (!TII->analyzeBranch(*PBB, TBB, FBB, Cond, true)) {
1065         // Failing case: IBB is the target of a cbr, and we cannot reverse the
1066         // branch.
1067         SmallVector<MachineOperand, 4> NewCond(Cond);
1068         if (!Cond.empty() && TBB == IBB) {
1069           if (TII->reverseBranchCondition(NewCond))
1070             continue;
1071           // This is the QBB case described above
1072           if (!FBB) {
1073             auto Next = ++PBB->getIterator();
1074             if (Next != MF.end())
1075               FBB = &*Next;
1076           }
1077         }
1078 
1079         // Failing case: the only way IBB can be reached from PBB is via
1080         // exception handling.  Happens for landing pads.  Would be nice to have
1081         // a bit in the edge so we didn't have to do all this.
1082         if (IBB->isEHPad()) {
1083           MachineFunction::iterator IP = ++PBB->getIterator();
1084           MachineBasicBlock *PredNextBB = nullptr;
1085           if (IP != MF.end())
1086             PredNextBB = &*IP;
1087           if (!TBB) {
1088             if (IBB != PredNextBB)      // fallthrough
1089               continue;
1090           } else if (FBB) {
1091             if (TBB != IBB && FBB != IBB)   // cbr then ubr
1092               continue;
1093           } else if (Cond.empty()) {
1094             if (TBB != IBB)               // ubr
1095               continue;
1096           } else {
1097             if (TBB != IBB && IBB != PredNextBB)  // cbr
1098               continue;
1099           }
1100         }
1101 
1102         // Remove the unconditional branch at the end, if any.
1103         if (TBB && (Cond.empty() || FBB)) {
1104           DebugLoc dl = PBB->findBranchDebugLoc();
1105           TII->removeBranch(*PBB);
1106           if (!Cond.empty())
1107             // reinsert conditional branch only, for now
1108             TII->insertBranch(*PBB, (TBB == IBB) ? FBB : TBB, nullptr,
1109                               NewCond, dl);
1110         }
1111 
1112         MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(*PBB), PBB));
1113       }
1114     }
1115 
1116     // If this is a large problem, avoid visiting the same basic blocks multiple
1117     // times.
1118     if (MergePotentials.size() == TailMergeThreshold)
1119       for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
1120         TriedMerging.insert(MergePotentials[i].getBlock());
1121 
1122     if (MergePotentials.size() >= 2)
1123       MadeChange |= TryTailMergeBlocks(IBB, PredBB, MinCommonTailLength);
1124 
1125     // Reinsert an unconditional branch if needed. The 1 below can occur as a
1126     // result of removing blocks in TryTailMergeBlocks.
1127     PredBB = &*std::prev(I); // this may have been changed in TryTailMergeBlocks
1128     if (MergePotentials.size() == 1 &&
1129         MergePotentials.begin()->getBlock() != PredBB)
1130       FixTail(MergePotentials.begin()->getBlock(), IBB, TII);
1131   }
1132 
1133   return MadeChange;
1134 }
1135 
1136 void BranchFolder::setCommonTailEdgeWeights(MachineBasicBlock &TailMBB) {
1137   SmallVector<BlockFrequency, 2> EdgeFreqLs(TailMBB.succ_size());
1138   BlockFrequency AccumulatedMBBFreq;
1139 
1140   // Aggregate edge frequency of successor edge j:
1141   //  edgeFreq(j) = sum (freq(bb) * edgeProb(bb, j)),
1142   //  where bb is a basic block that is in SameTails.
1143   for (const auto &Src : SameTails) {
1144     const MachineBasicBlock *SrcMBB = Src.getBlock();
1145     BlockFrequency BlockFreq = MBBFreqInfo.getBlockFreq(SrcMBB);
1146     AccumulatedMBBFreq += BlockFreq;
1147 
1148     // It is not necessary to recompute edge weights if TailBB has less than two
1149     // successors.
1150     if (TailMBB.succ_size() <= 1)
1151       continue;
1152 
1153     auto EdgeFreq = EdgeFreqLs.begin();
1154 
1155     for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
1156          SuccI != SuccE; ++SuccI, ++EdgeFreq)
1157       *EdgeFreq += BlockFreq * MBPI.getEdgeProbability(SrcMBB, *SuccI);
1158   }
1159 
1160   MBBFreqInfo.setBlockFreq(&TailMBB, AccumulatedMBBFreq);
1161 
1162   if (TailMBB.succ_size() <= 1)
1163     return;
1164 
1165   auto SumEdgeFreq =
1166       std::accumulate(EdgeFreqLs.begin(), EdgeFreqLs.end(), BlockFrequency(0))
1167           .getFrequency();
1168   auto EdgeFreq = EdgeFreqLs.begin();
1169 
1170   if (SumEdgeFreq > 0) {
1171     for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
1172          SuccI != SuccE; ++SuccI, ++EdgeFreq) {
1173       auto Prob = BranchProbability::getBranchProbability(
1174           EdgeFreq->getFrequency(), SumEdgeFreq);
1175       TailMBB.setSuccProbability(SuccI, Prob);
1176     }
1177   }
1178 }
1179 
1180 //===----------------------------------------------------------------------===//
1181 //  Branch Optimization
1182 //===----------------------------------------------------------------------===//
1183 
1184 bool BranchFolder::OptimizeBranches(MachineFunction &MF) {
1185   bool MadeChange = false;
1186 
1187   // Make sure blocks are numbered in order
1188   MF.RenumberBlocks();
1189   // Renumbering blocks alters funclet membership, recalculate it.
1190   FuncletMembership = getFuncletMembership(MF);
1191 
1192   for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
1193        I != E; ) {
1194     MachineBasicBlock *MBB = &*I++;
1195     MadeChange |= OptimizeBlock(MBB);
1196 
1197     // If it is dead, remove it.
1198     if (MBB->pred_empty()) {
1199       RemoveDeadBlock(MBB);
1200       MadeChange = true;
1201       ++NumDeadBlocks;
1202     }
1203   }
1204 
1205   return MadeChange;
1206 }
1207 
1208 // Blocks should be considered empty if they contain only debug info;
1209 // else the debug info would affect codegen.
1210 static bool IsEmptyBlock(MachineBasicBlock *MBB) {
1211   return MBB->getFirstNonDebugInstr() == MBB->end();
1212 }
1213 
1214 // Blocks with only debug info and branches should be considered the same
1215 // as blocks with only branches.
1216 static bool IsBranchOnlyBlock(MachineBasicBlock *MBB) {
1217   MachineBasicBlock::iterator I = MBB->getFirstNonDebugInstr();
1218   assert(I != MBB->end() && "empty block!");
1219   return I->isBranch();
1220 }
1221 
1222 /// IsBetterFallthrough - Return true if it would be clearly better to
1223 /// fall-through to MBB1 than to fall through into MBB2.  This has to return
1224 /// a strict ordering, returning true for both (MBB1,MBB2) and (MBB2,MBB1) will
1225 /// result in infinite loops.
1226 static bool IsBetterFallthrough(MachineBasicBlock *MBB1,
1227                                 MachineBasicBlock *MBB2) {
1228   // Right now, we use a simple heuristic.  If MBB2 ends with a call, and
1229   // MBB1 doesn't, we prefer to fall through into MBB1.  This allows us to
1230   // optimize branches that branch to either a return block or an assert block
1231   // into a fallthrough to the return.
1232   MachineBasicBlock::iterator MBB1I = MBB1->getLastNonDebugInstr();
1233   MachineBasicBlock::iterator MBB2I = MBB2->getLastNonDebugInstr();
1234   if (MBB1I == MBB1->end() || MBB2I == MBB2->end())
1235     return false;
1236 
1237   // If there is a clear successor ordering we make sure that one block
1238   // will fall through to the next
1239   if (MBB1->isSuccessor(MBB2)) return true;
1240   if (MBB2->isSuccessor(MBB1)) return false;
1241 
1242   return MBB2I->isCall() && !MBB1I->isCall();
1243 }
1244 
1245 /// getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch
1246 /// instructions on the block.
1247 static DebugLoc getBranchDebugLoc(MachineBasicBlock &MBB) {
1248   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
1249   if (I != MBB.end() && I->isBranch())
1250     return I->getDebugLoc();
1251   return DebugLoc();
1252 }
1253 
1254 bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
1255   bool MadeChange = false;
1256   MachineFunction &MF = *MBB->getParent();
1257 ReoptimizeBlock:
1258 
1259   MachineFunction::iterator FallThrough = MBB->getIterator();
1260   ++FallThrough;
1261 
1262   // Make sure MBB and FallThrough belong to the same funclet.
1263   bool SameFunclet = true;
1264   if (!FuncletMembership.empty() && FallThrough != MF.end()) {
1265     auto MBBFunclet = FuncletMembership.find(MBB);
1266     assert(MBBFunclet != FuncletMembership.end());
1267     auto FallThroughFunclet = FuncletMembership.find(&*FallThrough);
1268     assert(FallThroughFunclet != FuncletMembership.end());
1269     SameFunclet = MBBFunclet->second == FallThroughFunclet->second;
1270   }
1271 
1272   // If this block is empty, make everyone use its fall-through, not the block
1273   // explicitly.  Landing pads should not do this since the landing-pad table
1274   // points to this block.  Blocks with their addresses taken shouldn't be
1275   // optimized away.
1276   if (IsEmptyBlock(MBB) && !MBB->isEHPad() && !MBB->hasAddressTaken() &&
1277       SameFunclet) {
1278     // Dead block?  Leave for cleanup later.
1279     if (MBB->pred_empty()) return MadeChange;
1280 
1281     if (FallThrough == MF.end()) {
1282       // TODO: Simplify preds to not branch here if possible!
1283     } else if (FallThrough->isEHPad()) {
1284       // Don't rewrite to a landing pad fallthough.  That could lead to the case
1285       // where a BB jumps to more than one landing pad.
1286       // TODO: Is it ever worth rewriting predecessors which don't already
1287       // jump to a landing pad, and so can safely jump to the fallthrough?
1288     } else if (MBB->isSuccessor(&*FallThrough)) {
1289       // Rewrite all predecessors of the old block to go to the fallthrough
1290       // instead.
1291       while (!MBB->pred_empty()) {
1292         MachineBasicBlock *Pred = *(MBB->pred_end()-1);
1293         Pred->ReplaceUsesOfBlockWith(MBB, &*FallThrough);
1294       }
1295       // If MBB was the target of a jump table, update jump tables to go to the
1296       // fallthrough instead.
1297       if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
1298         MJTI->ReplaceMBBInJumpTables(MBB, &*FallThrough);
1299       MadeChange = true;
1300     }
1301     return MadeChange;
1302   }
1303 
1304   // Check to see if we can simplify the terminator of the block before this
1305   // one.
1306   MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB));
1307 
1308   MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr;
1309   SmallVector<MachineOperand, 4> PriorCond;
1310   bool PriorUnAnalyzable =
1311       TII->analyzeBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, true);
1312   if (!PriorUnAnalyzable) {
1313     // If the CFG for the prior block has extra edges, remove them.
1314     MadeChange |= PrevBB.CorrectExtraCFGEdges(PriorTBB, PriorFBB,
1315                                               !PriorCond.empty());
1316 
1317     // If the previous branch is conditional and both conditions go to the same
1318     // destination, remove the branch, replacing it with an unconditional one or
1319     // a fall-through.
1320     if (PriorTBB && PriorTBB == PriorFBB) {
1321       DebugLoc dl = getBranchDebugLoc(PrevBB);
1322       TII->removeBranch(PrevBB);
1323       PriorCond.clear();
1324       if (PriorTBB != MBB)
1325         TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
1326       MadeChange = true;
1327       ++NumBranchOpts;
1328       goto ReoptimizeBlock;
1329     }
1330 
1331     // If the previous block unconditionally falls through to this block and
1332     // this block has no other predecessors, move the contents of this block
1333     // into the prior block. This doesn't usually happen when SimplifyCFG
1334     // has been used, but it can happen if tail merging splits a fall-through
1335     // predecessor of a block.
1336     // This has to check PrevBB->succ_size() because EH edges are ignored by
1337     // AnalyzeBranch.
1338     if (PriorCond.empty() && !PriorTBB && MBB->pred_size() == 1 &&
1339         PrevBB.succ_size() == 1 &&
1340         !MBB->hasAddressTaken() && !MBB->isEHPad()) {
1341       DEBUG(dbgs() << "\nMerging into block: " << PrevBB
1342                    << "From MBB: " << *MBB);
1343       // Remove redundant DBG_VALUEs first.
1344       if (PrevBB.begin() != PrevBB.end()) {
1345         MachineBasicBlock::iterator PrevBBIter = PrevBB.end();
1346         --PrevBBIter;
1347         MachineBasicBlock::iterator MBBIter = MBB->begin();
1348         // Check if DBG_VALUE at the end of PrevBB is identical to the
1349         // DBG_VALUE at the beginning of MBB.
1350         while (PrevBBIter != PrevBB.begin() && MBBIter != MBB->end()
1351                && PrevBBIter->isDebugValue() && MBBIter->isDebugValue()) {
1352           if (!MBBIter->isIdenticalTo(*PrevBBIter))
1353             break;
1354           MachineInstr &DuplicateDbg = *MBBIter;
1355           ++MBBIter; -- PrevBBIter;
1356           DuplicateDbg.eraseFromParent();
1357         }
1358       }
1359       PrevBB.splice(PrevBB.end(), MBB, MBB->begin(), MBB->end());
1360       PrevBB.removeSuccessor(PrevBB.succ_begin());
1361       assert(PrevBB.succ_empty());
1362       PrevBB.transferSuccessors(MBB);
1363       MadeChange = true;
1364       return MadeChange;
1365     }
1366 
1367     // If the previous branch *only* branches to *this* block (conditional or
1368     // not) remove the branch.
1369     if (PriorTBB == MBB && !PriorFBB) {
1370       TII->removeBranch(PrevBB);
1371       MadeChange = true;
1372       ++NumBranchOpts;
1373       goto ReoptimizeBlock;
1374     }
1375 
1376     // If the prior block branches somewhere else on the condition and here if
1377     // the condition is false, remove the uncond second branch.
1378     if (PriorFBB == MBB) {
1379       DebugLoc dl = getBranchDebugLoc(PrevBB);
1380       TII->removeBranch(PrevBB);
1381       TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
1382       MadeChange = true;
1383       ++NumBranchOpts;
1384       goto ReoptimizeBlock;
1385     }
1386 
1387     // If the prior block branches here on true and somewhere else on false, and
1388     // if the branch condition is reversible, reverse the branch to create a
1389     // fall-through.
1390     if (PriorTBB == MBB) {
1391       SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
1392       if (!TII->reverseBranchCondition(NewPriorCond)) {
1393         DebugLoc dl = getBranchDebugLoc(PrevBB);
1394         TII->removeBranch(PrevBB);
1395         TII->insertBranch(PrevBB, PriorFBB, nullptr, NewPriorCond, dl);
1396         MadeChange = true;
1397         ++NumBranchOpts;
1398         goto ReoptimizeBlock;
1399       }
1400     }
1401 
1402     // If this block has no successors (e.g. it is a return block or ends with
1403     // a call to a no-return function like abort or __cxa_throw) and if the pred
1404     // falls through into this block, and if it would otherwise fall through
1405     // into the block after this, move this block to the end of the function.
1406     //
1407     // We consider it more likely that execution will stay in the function (e.g.
1408     // due to loops) than it is to exit it.  This asserts in loops etc, moving
1409     // the assert condition out of the loop body.
1410     if (MBB->succ_empty() && !PriorCond.empty() && !PriorFBB &&
1411         MachineFunction::iterator(PriorTBB) == FallThrough &&
1412         !MBB->canFallThrough()) {
1413       bool DoTransform = true;
1414 
1415       // We have to be careful that the succs of PredBB aren't both no-successor
1416       // blocks.  If neither have successors and if PredBB is the second from
1417       // last block in the function, we'd just keep swapping the two blocks for
1418       // last.  Only do the swap if one is clearly better to fall through than
1419       // the other.
1420       if (FallThrough == --MF.end() &&
1421           !IsBetterFallthrough(PriorTBB, MBB))
1422         DoTransform = false;
1423 
1424       if (DoTransform) {
1425         // Reverse the branch so we will fall through on the previous true cond.
1426         SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
1427         if (!TII->reverseBranchCondition(NewPriorCond)) {
1428           DEBUG(dbgs() << "\nMoving MBB: " << *MBB
1429                        << "To make fallthrough to: " << *PriorTBB << "\n");
1430 
1431           DebugLoc dl = getBranchDebugLoc(PrevBB);
1432           TII->removeBranch(PrevBB);
1433           TII->insertBranch(PrevBB, MBB, nullptr, NewPriorCond, dl);
1434 
1435           // Move this block to the end of the function.
1436           MBB->moveAfter(&MF.back());
1437           MadeChange = true;
1438           ++NumBranchOpts;
1439           return MadeChange;
1440         }
1441       }
1442     }
1443   }
1444 
1445   if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 &&
1446       MF.getFunction()->optForSize()) {
1447     // Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch
1448     // direction, thereby defeating careful block placement and regressing
1449     // performance. Therefore, only consider this for optsize functions.
1450     MachineInstr &TailCall = *MBB->getFirstNonDebugInstr();
1451     if (TII->isUnconditionalTailCall(TailCall)) {
1452       MachineBasicBlock *Pred = *MBB->pred_begin();
1453       MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
1454       SmallVector<MachineOperand, 4> PredCond;
1455       bool PredAnalyzable =
1456           !TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true);
1457 
1458       if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB) {
1459         // The predecessor has a conditional branch to this block which consists
1460         // of only a tail call. Try to fold the tail call into the conditional
1461         // branch.
1462         if (TII->canMakeTailCallConditional(PredCond, TailCall)) {
1463           // TODO: It would be nice if analyzeBranch() could provide a pointer
1464           // to the branch insturction so replaceBranchWithTailCall() doesn't
1465           // have to search for it.
1466           TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall);
1467           ++NumTailCalls;
1468           Pred->removeSuccessor(MBB);
1469           MadeChange = true;
1470           return MadeChange;
1471         }
1472       }
1473       // If the predecessor is falling through to this block, we could reverse
1474       // the branch condition and fold the tail call into that. However, after
1475       // that we might have to re-arrange the CFG to fall through to the other
1476       // block and there is a high risk of regressing code size rather than
1477       // improving it.
1478     }
1479   }
1480 
1481   // Analyze the branch in the current block.
1482   MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr;
1483   SmallVector<MachineOperand, 4> CurCond;
1484   bool CurUnAnalyzable =
1485       TII->analyzeBranch(*MBB, CurTBB, CurFBB, CurCond, true);
1486   if (!CurUnAnalyzable) {
1487     // If the CFG for the prior block has extra edges, remove them.
1488     MadeChange |= MBB->CorrectExtraCFGEdges(CurTBB, CurFBB, !CurCond.empty());
1489 
1490     // If this is a two-way branch, and the FBB branches to this block, reverse
1491     // the condition so the single-basic-block loop is faster.  Instead of:
1492     //    Loop: xxx; jcc Out; jmp Loop
1493     // we want:
1494     //    Loop: xxx; jncc Loop; jmp Out
1495     if (CurTBB && CurFBB && CurFBB == MBB && CurTBB != MBB) {
1496       SmallVector<MachineOperand, 4> NewCond(CurCond);
1497       if (!TII->reverseBranchCondition(NewCond)) {
1498         DebugLoc dl = getBranchDebugLoc(*MBB);
1499         TII->removeBranch(*MBB);
1500         TII->insertBranch(*MBB, CurFBB, CurTBB, NewCond, dl);
1501         MadeChange = true;
1502         ++NumBranchOpts;
1503         goto ReoptimizeBlock;
1504       }
1505     }
1506 
1507     // If this branch is the only thing in its block, see if we can forward
1508     // other blocks across it.
1509     if (CurTBB && CurCond.empty() && !CurFBB &&
1510         IsBranchOnlyBlock(MBB) && CurTBB != MBB &&
1511         !MBB->hasAddressTaken() && !MBB->isEHPad()) {
1512       DebugLoc dl = getBranchDebugLoc(*MBB);
1513       // This block may contain just an unconditional branch.  Because there can
1514       // be 'non-branch terminators' in the block, try removing the branch and
1515       // then seeing if the block is empty.
1516       TII->removeBranch(*MBB);
1517       // If the only things remaining in the block are debug info, remove these
1518       // as well, so this will behave the same as an empty block in non-debug
1519       // mode.
1520       if (IsEmptyBlock(MBB)) {
1521         // Make the block empty, losing the debug info (we could probably
1522         // improve this in some cases.)
1523         MBB->erase(MBB->begin(), MBB->end());
1524       }
1525       // If this block is just an unconditional branch to CurTBB, we can
1526       // usually completely eliminate the block.  The only case we cannot
1527       // completely eliminate the block is when the block before this one
1528       // falls through into MBB and we can't understand the prior block's branch
1529       // condition.
1530       if (MBB->empty()) {
1531         bool PredHasNoFallThrough = !PrevBB.canFallThrough();
1532         if (PredHasNoFallThrough || !PriorUnAnalyzable ||
1533             !PrevBB.isSuccessor(MBB)) {
1534           // If the prior block falls through into us, turn it into an
1535           // explicit branch to us to make updates simpler.
1536           if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) &&
1537               PriorTBB != MBB && PriorFBB != MBB) {
1538             if (!PriorTBB) {
1539               assert(PriorCond.empty() && !PriorFBB &&
1540                      "Bad branch analysis");
1541               PriorTBB = MBB;
1542             } else {
1543               assert(!PriorFBB && "Machine CFG out of date!");
1544               PriorFBB = MBB;
1545             }
1546             DebugLoc pdl = getBranchDebugLoc(PrevBB);
1547             TII->removeBranch(PrevBB);
1548             TII->insertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, pdl);
1549           }
1550 
1551           // Iterate through all the predecessors, revectoring each in-turn.
1552           size_t PI = 0;
1553           bool DidChange = false;
1554           bool HasBranchToSelf = false;
1555           while(PI != MBB->pred_size()) {
1556             MachineBasicBlock *PMBB = *(MBB->pred_begin() + PI);
1557             if (PMBB == MBB) {
1558               // If this block has an uncond branch to itself, leave it.
1559               ++PI;
1560               HasBranchToSelf = true;
1561             } else {
1562               DidChange = true;
1563               PMBB->ReplaceUsesOfBlockWith(MBB, CurTBB);
1564               // If this change resulted in PMBB ending in a conditional
1565               // branch where both conditions go to the same destination,
1566               // change this to an unconditional branch (and fix the CFG).
1567               MachineBasicBlock *NewCurTBB = nullptr, *NewCurFBB = nullptr;
1568               SmallVector<MachineOperand, 4> NewCurCond;
1569               bool NewCurUnAnalyzable = TII->analyzeBranch(
1570                   *PMBB, NewCurTBB, NewCurFBB, NewCurCond, true);
1571               if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) {
1572                 DebugLoc pdl = getBranchDebugLoc(*PMBB);
1573                 TII->removeBranch(*PMBB);
1574                 NewCurCond.clear();
1575                 TII->insertBranch(*PMBB, NewCurTBB, nullptr, NewCurCond, pdl);
1576                 MadeChange = true;
1577                 ++NumBranchOpts;
1578                 PMBB->CorrectExtraCFGEdges(NewCurTBB, nullptr, false);
1579               }
1580             }
1581           }
1582 
1583           // Change any jumptables to go to the new MBB.
1584           if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
1585             MJTI->ReplaceMBBInJumpTables(MBB, CurTBB);
1586           if (DidChange) {
1587             ++NumBranchOpts;
1588             MadeChange = true;
1589             if (!HasBranchToSelf) return MadeChange;
1590           }
1591         }
1592       }
1593 
1594       // Add the branch back if the block is more than just an uncond branch.
1595       TII->insertBranch(*MBB, CurTBB, nullptr, CurCond, dl);
1596     }
1597   }
1598 
1599   // If the prior block doesn't fall through into this block, and if this
1600   // block doesn't fall through into some other block, see if we can find a
1601   // place to move this block where a fall-through will happen.
1602   if (!PrevBB.canFallThrough()) {
1603 
1604     // Now we know that there was no fall-through into this block, check to
1605     // see if it has a fall-through into its successor.
1606     bool CurFallsThru = MBB->canFallThrough();
1607 
1608     if (!MBB->isEHPad()) {
1609       // Check all the predecessors of this block.  If one of them has no fall
1610       // throughs, move this block right after it.
1611       for (MachineBasicBlock *PredBB : MBB->predecessors()) {
1612         // Analyze the branch at the end of the pred.
1613         MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
1614         SmallVector<MachineOperand, 4> PredCond;
1615         if (PredBB != MBB && !PredBB->canFallThrough() &&
1616             !TII->analyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true) &&
1617             (!CurFallsThru || !CurTBB || !CurFBB) &&
1618             (!CurFallsThru || MBB->getNumber() >= PredBB->getNumber())) {
1619           // If the current block doesn't fall through, just move it.
1620           // If the current block can fall through and does not end with a
1621           // conditional branch, we need to append an unconditional jump to
1622           // the (current) next block.  To avoid a possible compile-time
1623           // infinite loop, move blocks only backward in this case.
1624           // Also, if there are already 2 branches here, we cannot add a third;
1625           // this means we have the case
1626           // Bcc next
1627           // B elsewhere
1628           // next:
1629           if (CurFallsThru) {
1630             MachineBasicBlock *NextBB = &*std::next(MBB->getIterator());
1631             CurCond.clear();
1632             TII->insertBranch(*MBB, NextBB, nullptr, CurCond, DebugLoc());
1633           }
1634           MBB->moveAfter(PredBB);
1635           MadeChange = true;
1636           goto ReoptimizeBlock;
1637         }
1638       }
1639     }
1640 
1641     if (!CurFallsThru) {
1642       // Check all successors to see if we can move this block before it.
1643       for (MachineBasicBlock *SuccBB : MBB->successors()) {
1644         // Analyze the branch at the end of the block before the succ.
1645         MachineFunction::iterator SuccPrev = --SuccBB->getIterator();
1646 
1647         // If this block doesn't already fall-through to that successor, and if
1648         // the succ doesn't already have a block that can fall through into it,
1649         // and if the successor isn't an EH destination, we can arrange for the
1650         // fallthrough to happen.
1651         if (SuccBB != MBB && &*SuccPrev != MBB &&
1652             !SuccPrev->canFallThrough() && !CurUnAnalyzable &&
1653             !SuccBB->isEHPad()) {
1654           MBB->moveBefore(SuccBB);
1655           MadeChange = true;
1656           goto ReoptimizeBlock;
1657         }
1658       }
1659 
1660       // Okay, there is no really great place to put this block.  If, however,
1661       // the block before this one would be a fall-through if this block were
1662       // removed, move this block to the end of the function. There is no real
1663       // advantage in "falling through" to an EH block, so we don't want to
1664       // perform this transformation for that case.
1665       //
1666       // Also, Windows EH introduced the possibility of an arbitrary number of
1667       // successors to a given block.  The analyzeBranch call does not consider
1668       // exception handling and so we can get in a state where a block
1669       // containing a call is followed by multiple EH blocks that would be
1670       // rotated infinitely at the end of the function if the transformation
1671       // below were performed for EH "FallThrough" blocks.  Therefore, even if
1672       // that appears not to be happening anymore, we should assume that it is
1673       // possible and not remove the "!FallThrough()->isEHPad" condition below.
1674       MachineBasicBlock *PrevTBB = nullptr, *PrevFBB = nullptr;
1675       SmallVector<MachineOperand, 4> PrevCond;
1676       if (FallThrough != MF.end() &&
1677           !FallThrough->isEHPad() &&
1678           !TII->analyzeBranch(PrevBB, PrevTBB, PrevFBB, PrevCond, true) &&
1679           PrevBB.isSuccessor(&*FallThrough)) {
1680         MBB->moveAfter(&MF.back());
1681         MadeChange = true;
1682         return MadeChange;
1683       }
1684     }
1685   }
1686 
1687   return MadeChange;
1688 }
1689 
1690 //===----------------------------------------------------------------------===//
1691 //  Hoist Common Code
1692 //===----------------------------------------------------------------------===//
1693 
1694 bool BranchFolder::HoistCommonCode(MachineFunction &MF) {
1695   bool MadeChange = false;
1696   for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ) {
1697     MachineBasicBlock *MBB = &*I++;
1698     MadeChange |= HoistCommonCodeInSuccs(MBB);
1699   }
1700 
1701   return MadeChange;
1702 }
1703 
1704 /// findFalseBlock - BB has a fallthrough. Find its 'false' successor given
1705 /// its 'true' successor.
1706 static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
1707                                          MachineBasicBlock *TrueBB) {
1708   for (MachineBasicBlock *SuccBB : BB->successors())
1709     if (SuccBB != TrueBB)
1710       return SuccBB;
1711   return nullptr;
1712 }
1713 
1714 template <class Container>
1715 static void addRegAndItsAliases(unsigned Reg, const TargetRegisterInfo *TRI,
1716                                 Container &Set) {
1717   if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1718     for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1719       Set.insert(*AI);
1720   } else {
1721     Set.insert(Reg);
1722   }
1723 }
1724 
1725 /// findHoistingInsertPosAndDeps - Find the location to move common instructions
1726 /// in successors to. The location is usually just before the terminator,
1727 /// however if the terminator is a conditional branch and its previous
1728 /// instruction is the flag setting instruction, the previous instruction is
1729 /// the preferred location. This function also gathers uses and defs of the
1730 /// instructions from the insertion point to the end of the block. The data is
1731 /// used by HoistCommonCodeInSuccs to ensure safety.
1732 static
1733 MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
1734                                                   const TargetInstrInfo *TII,
1735                                                   const TargetRegisterInfo *TRI,
1736                                                   SmallSet<unsigned,4> &Uses,
1737                                                   SmallSet<unsigned,4> &Defs) {
1738   MachineBasicBlock::iterator Loc = MBB->getFirstTerminator();
1739   if (!TII->isUnpredicatedTerminator(*Loc))
1740     return MBB->end();
1741 
1742   for (const MachineOperand &MO : Loc->operands()) {
1743     if (!MO.isReg())
1744       continue;
1745     unsigned Reg = MO.getReg();
1746     if (!Reg)
1747       continue;
1748     if (MO.isUse()) {
1749       addRegAndItsAliases(Reg, TRI, Uses);
1750     } else {
1751       if (!MO.isDead())
1752         // Don't try to hoist code in the rare case the terminator defines a
1753         // register that is later used.
1754         return MBB->end();
1755 
1756       // If the terminator defines a register, make sure we don't hoist
1757       // the instruction whose def might be clobbered by the terminator.
1758       addRegAndItsAliases(Reg, TRI, Defs);
1759     }
1760   }
1761 
1762   if (Uses.empty())
1763     return Loc;
1764   if (Loc == MBB->begin())
1765     return MBB->end();
1766 
1767   // The terminator is probably a conditional branch, try not to separate the
1768   // branch from condition setting instruction.
1769   MachineBasicBlock::iterator PI =
1770     skipDebugInstructionsBackward(std::prev(Loc), MBB->begin());
1771 
1772   bool IsDef = false;
1773   for (const MachineOperand &MO : PI->operands()) {
1774     // If PI has a regmask operand, it is probably a call. Separate away.
1775     if (MO.isRegMask())
1776       return Loc;
1777     if (!MO.isReg() || MO.isUse())
1778       continue;
1779     unsigned Reg = MO.getReg();
1780     if (!Reg)
1781       continue;
1782     if (Uses.count(Reg)) {
1783       IsDef = true;
1784       break;
1785     }
1786   }
1787   if (!IsDef)
1788     // The condition setting instruction is not just before the conditional
1789     // branch.
1790     return Loc;
1791 
1792   // Be conservative, don't insert instruction above something that may have
1793   // side-effects. And since it's potentially bad to separate flag setting
1794   // instruction from the conditional branch, just abort the optimization
1795   // completely.
1796   // Also avoid moving code above predicated instruction since it's hard to
1797   // reason about register liveness with predicated instruction.
1798   bool DontMoveAcrossStore = true;
1799   if (!PI->isSafeToMove(nullptr, DontMoveAcrossStore) || TII->isPredicated(*PI))
1800     return MBB->end();
1801 
1802 
1803   // Find out what registers are live. Note this routine is ignoring other live
1804   // registers which are only used by instructions in successor blocks.
1805   for (const MachineOperand &MO : PI->operands()) {
1806     if (!MO.isReg())
1807       continue;
1808     unsigned Reg = MO.getReg();
1809     if (!Reg)
1810       continue;
1811     if (MO.isUse()) {
1812       addRegAndItsAliases(Reg, TRI, Uses);
1813     } else {
1814       if (Uses.erase(Reg)) {
1815         if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1816           for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
1817             Uses.erase(*SubRegs); // Use sub-registers to be conservative
1818         }
1819       }
1820       addRegAndItsAliases(Reg, TRI, Defs);
1821     }
1822   }
1823 
1824   return PI;
1825 }
1826 
1827 bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
1828   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
1829   SmallVector<MachineOperand, 4> Cond;
1830   if (TII->analyzeBranch(*MBB, TBB, FBB, Cond, true) || !TBB || Cond.empty())
1831     return false;
1832 
1833   if (!FBB) FBB = findFalseBlock(MBB, TBB);
1834   if (!FBB)
1835     // Malformed bcc? True and false blocks are the same?
1836     return false;
1837 
1838   // Restrict the optimization to cases where MBB is the only predecessor,
1839   // it is an obvious win.
1840   if (TBB->pred_size() > 1 || FBB->pred_size() > 1)
1841     return false;
1842 
1843   // Find a suitable position to hoist the common instructions to. Also figure
1844   // out which registers are used or defined by instructions from the insertion
1845   // point to the end of the block.
1846   SmallSet<unsigned, 4> Uses, Defs;
1847   MachineBasicBlock::iterator Loc =
1848     findHoistingInsertPosAndDeps(MBB, TII, TRI, Uses, Defs);
1849   if (Loc == MBB->end())
1850     return false;
1851 
1852   bool HasDups = false;
1853   SmallVector<unsigned, 4> LocalDefs;
1854   SmallSet<unsigned, 4> LocalDefsSet;
1855   MachineBasicBlock::iterator TIB = TBB->begin();
1856   MachineBasicBlock::iterator FIB = FBB->begin();
1857   MachineBasicBlock::iterator TIE = TBB->end();
1858   MachineBasicBlock::iterator FIE = FBB->end();
1859   while (TIB != TIE && FIB != FIE) {
1860     // Skip dbg_value instructions. These do not count.
1861     TIB = skipDebugInstructionsForward(TIB, TIE);
1862     FIB = skipDebugInstructionsForward(FIB, FIE);
1863     if (TIB == TIE || FIB == FIE)
1864       break;
1865 
1866     if (!TIB->isIdenticalTo(*FIB, MachineInstr::CheckKillDead))
1867       break;
1868 
1869     if (TII->isPredicated(*TIB))
1870       // Hard to reason about register liveness with predicated instruction.
1871       break;
1872 
1873     bool IsSafe = true;
1874     for (MachineOperand &MO : TIB->operands()) {
1875       // Don't attempt to hoist instructions with register masks.
1876       if (MO.isRegMask()) {
1877         IsSafe = false;
1878         break;
1879       }
1880       if (!MO.isReg())
1881         continue;
1882       unsigned Reg = MO.getReg();
1883       if (!Reg)
1884         continue;
1885       if (MO.isDef()) {
1886         if (Uses.count(Reg)) {
1887           // Avoid clobbering a register that's used by the instruction at
1888           // the point of insertion.
1889           IsSafe = false;
1890           break;
1891         }
1892 
1893         if (Defs.count(Reg) && !MO.isDead()) {
1894           // Don't hoist the instruction if the def would be clobber by the
1895           // instruction at the point insertion. FIXME: This is overly
1896           // conservative. It should be possible to hoist the instructions
1897           // in BB2 in the following example:
1898           // BB1:
1899           // r1, eflag = op1 r2, r3
1900           // brcc eflag
1901           //
1902           // BB2:
1903           // r1 = op2, ...
1904           //    = op3, r1<kill>
1905           IsSafe = false;
1906           break;
1907         }
1908       } else if (!LocalDefsSet.count(Reg)) {
1909         if (Defs.count(Reg)) {
1910           // Use is defined by the instruction at the point of insertion.
1911           IsSafe = false;
1912           break;
1913         }
1914 
1915         if (MO.isKill() && Uses.count(Reg))
1916           // Kills a register that's read by the instruction at the point of
1917           // insertion. Remove the kill marker.
1918           MO.setIsKill(false);
1919       }
1920     }
1921     if (!IsSafe)
1922       break;
1923 
1924     bool DontMoveAcrossStore = true;
1925     if (!TIB->isSafeToMove(nullptr, DontMoveAcrossStore))
1926       break;
1927 
1928     // Remove kills from LocalDefsSet, these registers had short live ranges.
1929     for (const MachineOperand &MO : TIB->operands()) {
1930       if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1931         continue;
1932       unsigned Reg = MO.getReg();
1933       if (!Reg || !LocalDefsSet.count(Reg))
1934         continue;
1935       if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1936         for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1937           LocalDefsSet.erase(*AI);
1938       } else {
1939         LocalDefsSet.erase(Reg);
1940       }
1941     }
1942 
1943     // Track local defs so we can update liveins.
1944     for (const MachineOperand &MO : TIB->operands()) {
1945       if (!MO.isReg() || !MO.isDef() || MO.isDead())
1946         continue;
1947       unsigned Reg = MO.getReg();
1948       if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg))
1949         continue;
1950       LocalDefs.push_back(Reg);
1951       addRegAndItsAliases(Reg, TRI, LocalDefsSet);
1952     }
1953 
1954     HasDups = true;
1955     ++TIB;
1956     ++FIB;
1957   }
1958 
1959   if (!HasDups)
1960     return false;
1961 
1962   MBB->splice(Loc, TBB, TBB->begin(), TIB);
1963   FBB->erase(FBB->begin(), FIB);
1964 
1965   // Update livein's.
1966   bool AddedLiveIns = false;
1967   for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
1968     unsigned Def = LocalDefs[i];
1969     if (LocalDefsSet.count(Def)) {
1970       TBB->addLiveIn(Def);
1971       FBB->addLiveIn(Def);
1972       AddedLiveIns = true;
1973     }
1974   }
1975 
1976   if (AddedLiveIns) {
1977     TBB->sortUniqueLiveIns();
1978     FBB->sortUniqueLiveIns();
1979   }
1980 
1981   ++NumHoist;
1982   return true;
1983 }
1984