1 //===-- BranchFolding.cpp - Fold machine code branch instructions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass forwards branches to unconditional branches to make them branch
11 // directly to the target block.  This pass often results in dead MBB's, which
12 // it then removes.
13 //
14 // Note that this pass must be run after register allocation, it cannot handle
15 // SSA form. It also must handle virtual registers for targets that emit virtual
16 // ISA (e.g. NVPTX).
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "BranchFolding.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/CodeGen/Analysis.h"
25 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
26 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineJumpTableInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineLoopInfo.h"
31 #include "llvm/CodeGen/MachineModuleInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/Passes.h"
34 #include "llvm/CodeGen/TargetPassConfig.h"
35 #include "llvm/IR/DebugInfoMetadata.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Target/TargetInstrInfo.h"
42 #include "llvm/Target/TargetRegisterInfo.h"
43 #include "llvm/Target/TargetSubtargetInfo.h"
44 #include <algorithm>
45 using namespace llvm;
46 
47 #define DEBUG_TYPE "branchfolding"
48 
49 STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
50 STATISTIC(NumBranchOpts, "Number of branches optimized");
51 STATISTIC(NumTailMerge , "Number of block tails merged");
52 STATISTIC(NumHoist     , "Number of times common instructions are hoisted");
53 STATISTIC(NumTailCalls,  "Number of tail calls optimized");
54 
55 static cl::opt<cl::boolOrDefault> FlagEnableTailMerge("enable-tail-merge",
56                               cl::init(cl::BOU_UNSET), cl::Hidden);
57 
58 // Throttle for huge numbers of predecessors (compile speed problems)
59 static cl::opt<unsigned>
60 TailMergeThreshold("tail-merge-threshold",
61           cl::desc("Max number of predecessors to consider tail merging"),
62           cl::init(150), cl::Hidden);
63 
64 // Heuristic for tail merging (and, inversely, tail duplication).
65 // TODO: This should be replaced with a target query.
66 static cl::opt<unsigned>
67 TailMergeSize("tail-merge-size",
68           cl::desc("Min number of instructions to consider tail merging"),
69                               cl::init(3), cl::Hidden);
70 
71 namespace {
72   /// BranchFolderPass - Wrap branch folder in a machine function pass.
73   class BranchFolderPass : public MachineFunctionPass {
74   public:
75     static char ID;
76     explicit BranchFolderPass(): MachineFunctionPass(ID) {}
77 
78     bool runOnMachineFunction(MachineFunction &MF) override;
79 
80     void getAnalysisUsage(AnalysisUsage &AU) const override {
81       AU.addRequired<MachineBlockFrequencyInfo>();
82       AU.addRequired<MachineBranchProbabilityInfo>();
83       AU.addRequired<TargetPassConfig>();
84       MachineFunctionPass::getAnalysisUsage(AU);
85     }
86   };
87 }
88 
89 char BranchFolderPass::ID = 0;
90 char &llvm::BranchFolderPassID = BranchFolderPass::ID;
91 
92 INITIALIZE_PASS(BranchFolderPass, "branch-folder",
93                 "Control Flow Optimizer", false, false)
94 
95 bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) {
96   if (skipFunction(*MF.getFunction()))
97     return false;
98 
99   TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
100   // TailMerge can create jump into if branches that make CFG irreducible for
101   // HW that requires structurized CFG.
102   bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() &&
103                          PassConfig->getEnableTailMerge();
104   BranchFolder::MBFIWrapper MBBFreqInfo(
105       getAnalysis<MachineBlockFrequencyInfo>());
106   BranchFolder Folder(EnableTailMerge, /*CommonHoist=*/true, MBBFreqInfo,
107                       getAnalysis<MachineBranchProbabilityInfo>());
108   return Folder.OptimizeFunction(MF, MF.getSubtarget().getInstrInfo(),
109                                  MF.getSubtarget().getRegisterInfo(),
110                                  getAnalysisIfAvailable<MachineModuleInfo>());
111 }
112 
113 BranchFolder::BranchFolder(bool defaultEnableTailMerge, bool CommonHoist,
114                            MBFIWrapper &FreqInfo,
115                            const MachineBranchProbabilityInfo &ProbInfo,
116                            unsigned MinTailLength)
117     : EnableHoistCommonCode(CommonHoist), MinCommonTailLength(MinTailLength),
118       MBBFreqInfo(FreqInfo), MBPI(ProbInfo) {
119   if (MinCommonTailLength == 0)
120     MinCommonTailLength = TailMergeSize;
121   switch (FlagEnableTailMerge) {
122   case cl::BOU_UNSET: EnableTailMerge = defaultEnableTailMerge; break;
123   case cl::BOU_TRUE: EnableTailMerge = true; break;
124   case cl::BOU_FALSE: EnableTailMerge = false; break;
125   }
126 }
127 
128 void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) {
129   assert(MBB->pred_empty() && "MBB must be dead!");
130   DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
131 
132   MachineFunction *MF = MBB->getParent();
133   // drop all successors.
134   while (!MBB->succ_empty())
135     MBB->removeSuccessor(MBB->succ_end()-1);
136 
137   // Avoid matching if this pointer gets reused.
138   TriedMerging.erase(MBB);
139 
140   // Remove the block.
141   MF->erase(MBB);
142   FuncletMembership.erase(MBB);
143   if (MLI)
144     MLI->removeBlock(MBB);
145 }
146 
147 bool BranchFolder::OptimizeFunction(MachineFunction &MF,
148                                     const TargetInstrInfo *tii,
149                                     const TargetRegisterInfo *tri,
150                                     MachineModuleInfo *mmi,
151                                     MachineLoopInfo *mli, bool AfterPlacement) {
152   if (!tii) return false;
153 
154   TriedMerging.clear();
155 
156   AfterBlockPlacement = AfterPlacement;
157   TII = tii;
158   TRI = tri;
159   MMI = mmi;
160   MLI = mli;
161 
162   MachineRegisterInfo &MRI = MF.getRegInfo();
163   UpdateLiveIns = MRI.tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF);
164   if (!UpdateLiveIns)
165     MRI.invalidateLiveness();
166 
167   // Fix CFG.  The later algorithms expect it to be right.
168   bool MadeChange = false;
169   for (MachineBasicBlock &MBB : MF) {
170     MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
171     SmallVector<MachineOperand, 4> Cond;
172     if (!TII->analyzeBranch(MBB, TBB, FBB, Cond, true))
173       MadeChange |= MBB.CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
174   }
175 
176   // Recalculate funclet membership.
177   FuncletMembership = getFuncletMembership(MF);
178 
179   bool MadeChangeThisIteration = true;
180   while (MadeChangeThisIteration) {
181     MadeChangeThisIteration    = TailMergeBlocks(MF);
182     // No need to clean up if tail merging does not change anything after the
183     // block placement.
184     if (!AfterBlockPlacement || MadeChangeThisIteration)
185       MadeChangeThisIteration |= OptimizeBranches(MF);
186     if (EnableHoistCommonCode)
187       MadeChangeThisIteration |= HoistCommonCode(MF);
188     MadeChange |= MadeChangeThisIteration;
189   }
190 
191   // See if any jump tables have become dead as the code generator
192   // did its thing.
193   MachineJumpTableInfo *JTI = MF.getJumpTableInfo();
194   if (!JTI)
195     return MadeChange;
196 
197   // Walk the function to find jump tables that are live.
198   BitVector JTIsLive(JTI->getJumpTables().size());
199   for (const MachineBasicBlock &BB : MF) {
200     for (const MachineInstr &I : BB)
201       for (const MachineOperand &Op : I.operands()) {
202         if (!Op.isJTI()) continue;
203 
204         // Remember that this JT is live.
205         JTIsLive.set(Op.getIndex());
206       }
207   }
208 
209   // Finally, remove dead jump tables.  This happens when the
210   // indirect jump was unreachable (and thus deleted).
211   for (unsigned i = 0, e = JTIsLive.size(); i != e; ++i)
212     if (!JTIsLive.test(i)) {
213       JTI->RemoveJumpTable(i);
214       MadeChange = true;
215     }
216 
217   return MadeChange;
218 }
219 
220 //===----------------------------------------------------------------------===//
221 //  Tail Merging of Blocks
222 //===----------------------------------------------------------------------===//
223 
224 /// HashMachineInstr - Compute a hash value for MI and its operands.
225 static unsigned HashMachineInstr(const MachineInstr &MI) {
226   unsigned Hash = MI.getOpcode();
227   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
228     const MachineOperand &Op = MI.getOperand(i);
229 
230     // Merge in bits from the operand if easy. We can't use MachineOperand's
231     // hash_code here because it's not deterministic and we sort by hash value
232     // later.
233     unsigned OperandHash = 0;
234     switch (Op.getType()) {
235     case MachineOperand::MO_Register:
236       OperandHash = Op.getReg();
237       break;
238     case MachineOperand::MO_Immediate:
239       OperandHash = Op.getImm();
240       break;
241     case MachineOperand::MO_MachineBasicBlock:
242       OperandHash = Op.getMBB()->getNumber();
243       break;
244     case MachineOperand::MO_FrameIndex:
245     case MachineOperand::MO_ConstantPoolIndex:
246     case MachineOperand::MO_JumpTableIndex:
247       OperandHash = Op.getIndex();
248       break;
249     case MachineOperand::MO_GlobalAddress:
250     case MachineOperand::MO_ExternalSymbol:
251       // Global address / external symbol are too hard, don't bother, but do
252       // pull in the offset.
253       OperandHash = Op.getOffset();
254       break;
255     default:
256       break;
257     }
258 
259     Hash += ((OperandHash << 3) | Op.getType()) << (i & 31);
260   }
261   return Hash;
262 }
263 
264 /// HashEndOfMBB - Hash the last instruction in the MBB.
265 static unsigned HashEndOfMBB(const MachineBasicBlock &MBB) {
266   MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr();
267   if (I == MBB.end())
268     return 0;
269 
270   return HashMachineInstr(*I);
271 }
272 
273 /// ComputeCommonTailLength - Given two machine basic blocks, compute the number
274 /// of instructions they actually have in common together at their end.  Return
275 /// iterators for the first shared instruction in each block.
276 static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1,
277                                         MachineBasicBlock *MBB2,
278                                         MachineBasicBlock::iterator &I1,
279                                         MachineBasicBlock::iterator &I2) {
280   I1 = MBB1->end();
281   I2 = MBB2->end();
282 
283   unsigned TailLen = 0;
284   while (I1 != MBB1->begin() && I2 != MBB2->begin()) {
285     --I1; --I2;
286     // Skip debugging pseudos; necessary to avoid changing the code.
287     while (I1->isDebugValue()) {
288       if (I1==MBB1->begin()) {
289         while (I2->isDebugValue()) {
290           if (I2==MBB2->begin())
291             // I1==DBG at begin; I2==DBG at begin
292             return TailLen;
293           --I2;
294         }
295         ++I2;
296         // I1==DBG at begin; I2==non-DBG, or first of DBGs not at begin
297         return TailLen;
298       }
299       --I1;
300     }
301     // I1==first (untested) non-DBG preceding known match
302     while (I2->isDebugValue()) {
303       if (I2==MBB2->begin()) {
304         ++I1;
305         // I1==non-DBG, or first of DBGs not at begin; I2==DBG at begin
306         return TailLen;
307       }
308       --I2;
309     }
310     // I1, I2==first (untested) non-DBGs preceding known match
311     if (!I1->isIdenticalTo(*I2) ||
312         // FIXME: This check is dubious. It's used to get around a problem where
313         // people incorrectly expect inline asm directives to remain in the same
314         // relative order. This is untenable because normal compiler
315         // optimizations (like this one) may reorder and/or merge these
316         // directives.
317         I1->isInlineAsm()) {
318       ++I1; ++I2;
319       break;
320     }
321     ++TailLen;
322   }
323   // Back past possible debugging pseudos at beginning of block.  This matters
324   // when one block differs from the other only by whether debugging pseudos
325   // are present at the beginning. (This way, the various checks later for
326   // I1==MBB1->begin() work as expected.)
327   if (I1 == MBB1->begin() && I2 != MBB2->begin()) {
328     --I2;
329     while (I2->isDebugValue()) {
330       if (I2 == MBB2->begin())
331         return TailLen;
332       --I2;
333     }
334     ++I2;
335   }
336   if (I2 == MBB2->begin() && I1 != MBB1->begin()) {
337     --I1;
338     while (I1->isDebugValue()) {
339       if (I1 == MBB1->begin())
340         return TailLen;
341       --I1;
342     }
343     ++I1;
344   }
345   return TailLen;
346 }
347 
348 void BranchFolder::ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
349                                            MachineBasicBlock *NewDest) {
350   TII->ReplaceTailWithBranchTo(OldInst, NewDest);
351 
352   if (UpdateLiveIns) {
353     NewDest->clearLiveIns();
354     computeLiveIns(LiveRegs, *TRI, *NewDest);
355   }
356 
357   ++NumTailMerge;
358 }
359 
360 MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
361                                             MachineBasicBlock::iterator BBI1,
362                                             const BasicBlock *BB) {
363   if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
364     return nullptr;
365 
366   MachineFunction &MF = *CurMBB.getParent();
367 
368   // Create the fall-through block.
369   MachineFunction::iterator MBBI = CurMBB.getIterator();
370   MachineBasicBlock *NewMBB =MF.CreateMachineBasicBlock(BB);
371   CurMBB.getParent()->insert(++MBBI, NewMBB);
372 
373   // Move all the successors of this block to the specified block.
374   NewMBB->transferSuccessors(&CurMBB);
375 
376   // Add an edge from CurMBB to NewMBB for the fall-through.
377   CurMBB.addSuccessor(NewMBB);
378 
379   // Splice the code over.
380   NewMBB->splice(NewMBB->end(), &CurMBB, BBI1, CurMBB.end());
381 
382   // NewMBB belongs to the same loop as CurMBB.
383   if (MLI)
384     if (MachineLoop *ML = MLI->getLoopFor(&CurMBB))
385       ML->addBasicBlockToLoop(NewMBB, MLI->getBase());
386 
387   // NewMBB inherits CurMBB's block frequency.
388   MBBFreqInfo.setBlockFreq(NewMBB, MBBFreqInfo.getBlockFreq(&CurMBB));
389 
390   if (UpdateLiveIns)
391     computeLiveIns(LiveRegs, *TRI, *NewMBB);
392 
393   // Add the new block to the funclet.
394   const auto &FuncletI = FuncletMembership.find(&CurMBB);
395   if (FuncletI != FuncletMembership.end()) {
396     auto n = FuncletI->second;
397     FuncletMembership[NewMBB] = n;
398   }
399 
400   return NewMBB;
401 }
402 
403 /// EstimateRuntime - Make a rough estimate for how long it will take to run
404 /// the specified code.
405 static unsigned EstimateRuntime(MachineBasicBlock::iterator I,
406                                 MachineBasicBlock::iterator E) {
407   unsigned Time = 0;
408   for (; I != E; ++I) {
409     if (I->isDebugValue())
410       continue;
411     if (I->isCall())
412       Time += 10;
413     else if (I->mayLoad() || I->mayStore())
414       Time += 2;
415     else
416       ++Time;
417   }
418   return Time;
419 }
420 
421 // CurMBB needs to add an unconditional branch to SuccMBB (we removed these
422 // branches temporarily for tail merging).  In the case where CurMBB ends
423 // with a conditional branch to the next block, optimize by reversing the
424 // test and conditionally branching to SuccMBB instead.
425 static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
426                     const TargetInstrInfo *TII) {
427   MachineFunction *MF = CurMBB->getParent();
428   MachineFunction::iterator I = std::next(MachineFunction::iterator(CurMBB));
429   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
430   SmallVector<MachineOperand, 4> Cond;
431   DebugLoc dl = CurMBB->findBranchDebugLoc();
432   if (I != MF->end() && !TII->analyzeBranch(*CurMBB, TBB, FBB, Cond, true)) {
433     MachineBasicBlock *NextBB = &*I;
434     if (TBB == NextBB && !Cond.empty() && !FBB) {
435       if (!TII->reverseBranchCondition(Cond)) {
436         TII->removeBranch(*CurMBB);
437         TII->insertBranch(*CurMBB, SuccBB, nullptr, Cond, dl);
438         return;
439       }
440     }
441   }
442   TII->insertBranch(*CurMBB, SuccBB, nullptr,
443                     SmallVector<MachineOperand, 0>(), dl);
444 }
445 
446 bool
447 BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const {
448   if (getHash() < o.getHash())
449     return true;
450   if (getHash() > o.getHash())
451     return false;
452   if (getBlock()->getNumber() < o.getBlock()->getNumber())
453     return true;
454   if (getBlock()->getNumber() > o.getBlock()->getNumber())
455     return false;
456   // _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing
457   // an object with itself.
458 #ifndef _GLIBCXX_DEBUG
459   llvm_unreachable("Predecessor appears twice");
460 #else
461   return false;
462 #endif
463 }
464 
465 BlockFrequency
466 BranchFolder::MBFIWrapper::getBlockFreq(const MachineBasicBlock *MBB) const {
467   auto I = MergedBBFreq.find(MBB);
468 
469   if (I != MergedBBFreq.end())
470     return I->second;
471 
472   return MBFI.getBlockFreq(MBB);
473 }
474 
475 void BranchFolder::MBFIWrapper::setBlockFreq(const MachineBasicBlock *MBB,
476                                              BlockFrequency F) {
477   MergedBBFreq[MBB] = F;
478 }
479 
480 raw_ostream &
481 BranchFolder::MBFIWrapper::printBlockFreq(raw_ostream &OS,
482                                           const MachineBasicBlock *MBB) const {
483   return MBFI.printBlockFreq(OS, getBlockFreq(MBB));
484 }
485 
486 raw_ostream &
487 BranchFolder::MBFIWrapper::printBlockFreq(raw_ostream &OS,
488                                           const BlockFrequency Freq) const {
489   return MBFI.printBlockFreq(OS, Freq);
490 }
491 
492 void BranchFolder::MBFIWrapper::view(const Twine &Name, bool isSimple) {
493   MBFI.view(Name, isSimple);
494 }
495 
496 uint64_t
497 BranchFolder::MBFIWrapper::getEntryFreq() const {
498   return MBFI.getEntryFreq();
499 }
500 
501 /// CountTerminators - Count the number of terminators in the given
502 /// block and set I to the position of the first non-terminator, if there
503 /// is one, or MBB->end() otherwise.
504 static unsigned CountTerminators(MachineBasicBlock *MBB,
505                                  MachineBasicBlock::iterator &I) {
506   I = MBB->end();
507   unsigned NumTerms = 0;
508   for (;;) {
509     if (I == MBB->begin()) {
510       I = MBB->end();
511       break;
512     }
513     --I;
514     if (!I->isTerminator()) break;
515     ++NumTerms;
516   }
517   return NumTerms;
518 }
519 
520 /// A no successor, non-return block probably ends in unreachable and is cold.
521 /// Also consider a block that ends in an indirect branch to be a return block,
522 /// since many targets use plain indirect branches to return.
523 static bool blockEndsInUnreachable(const MachineBasicBlock *MBB) {
524   if (!MBB->succ_empty())
525     return false;
526   if (MBB->empty())
527     return true;
528   return !(MBB->back().isReturn() || MBB->back().isIndirectBranch());
529 }
530 
531 /// ProfitableToMerge - Check if two machine basic blocks have a common tail
532 /// and decide if it would be profitable to merge those tails.  Return the
533 /// length of the common tail and iterators to the first common instruction
534 /// in each block.
535 /// MBB1, MBB2      The blocks to check
536 /// MinCommonTailLength  Minimum size of tail block to be merged.
537 /// CommonTailLen   Out parameter to record the size of the shared tail between
538 ///                 MBB1 and MBB2
539 /// I1, I2          Iterator references that will be changed to point to the first
540 ///                 instruction in the common tail shared by MBB1,MBB2
541 /// SuccBB          A common successor of MBB1, MBB2 which are in a canonical form
542 ///                 relative to SuccBB
543 /// PredBB          The layout predecessor of SuccBB, if any.
544 /// FuncletMembership  map from block to funclet #.
545 /// AfterPlacement  True if we are merging blocks after layout. Stricter
546 ///                 thresholds apply to prevent undoing tail-duplication.
547 static bool
548 ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2,
549                   unsigned MinCommonTailLength, unsigned &CommonTailLen,
550                   MachineBasicBlock::iterator &I1,
551                   MachineBasicBlock::iterator &I2, MachineBasicBlock *SuccBB,
552                   MachineBasicBlock *PredBB,
553                   DenseMap<const MachineBasicBlock *, int> &FuncletMembership,
554                   bool AfterPlacement) {
555   // It is never profitable to tail-merge blocks from two different funclets.
556   if (!FuncletMembership.empty()) {
557     auto Funclet1 = FuncletMembership.find(MBB1);
558     assert(Funclet1 != FuncletMembership.end());
559     auto Funclet2 = FuncletMembership.find(MBB2);
560     assert(Funclet2 != FuncletMembership.end());
561     if (Funclet1->second != Funclet2->second)
562       return false;
563   }
564 
565   CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2);
566   if (CommonTailLen == 0)
567     return false;
568   DEBUG(dbgs() << "Common tail length of BB#" << MBB1->getNumber()
569                << " and BB#" << MBB2->getNumber() << " is " << CommonTailLen
570                << '\n');
571 
572   // It's almost always profitable to merge any number of non-terminator
573   // instructions with the block that falls through into the common successor.
574   // This is true only for a single successor. For multiple successors, we are
575   // trading a conditional branch for an unconditional one.
576   // TODO: Re-visit successor size for non-layout tail merging.
577   if ((MBB1 == PredBB || MBB2 == PredBB) &&
578       (!AfterPlacement || MBB1->succ_size() == 1)) {
579     MachineBasicBlock::iterator I;
580     unsigned NumTerms = CountTerminators(MBB1 == PredBB ? MBB2 : MBB1, I);
581     if (CommonTailLen > NumTerms)
582       return true;
583   }
584 
585   // If these are identical non-return blocks with no successors, merge them.
586   // Such blocks are typically cold calls to noreturn functions like abort, and
587   // are unlikely to become a fallthrough target after machine block placement.
588   // Tail merging these blocks is unlikely to create additional unconditional
589   // branches, and will reduce the size of this cold code.
590   if (I1 == MBB1->begin() && I2 == MBB2->begin() &&
591       blockEndsInUnreachable(MBB1) && blockEndsInUnreachable(MBB2))
592     return true;
593 
594   // If one of the blocks can be completely merged and happens to be in
595   // a position where the other could fall through into it, merge any number
596   // of instructions, because it can be done without a branch.
597   // TODO: If the blocks are not adjacent, move one of them so that they are?
598   if (MBB1->isLayoutSuccessor(MBB2) && I2 == MBB2->begin())
599     return true;
600   if (MBB2->isLayoutSuccessor(MBB1) && I1 == MBB1->begin())
601     return true;
602 
603   // If both blocks have an unconditional branch temporarily stripped out,
604   // count that as an additional common instruction for the following
605   // heuristics. This heuristic is only accurate for single-succ blocks, so to
606   // make sure that during layout merging and duplicating don't crash, we check
607   // for that when merging during layout.
608   unsigned EffectiveTailLen = CommonTailLen;
609   if (SuccBB && MBB1 != PredBB && MBB2 != PredBB &&
610       (MBB1->succ_size() == 1 || !AfterPlacement) &&
611       !MBB1->back().isBarrier() &&
612       !MBB2->back().isBarrier())
613     ++EffectiveTailLen;
614 
615   // Check if the common tail is long enough to be worthwhile.
616   if (EffectiveTailLen >= MinCommonTailLength)
617     return true;
618 
619   // If we are optimizing for code size, 2 instructions in common is enough if
620   // we don't have to split a block.  At worst we will be introducing 1 new
621   // branch instruction, which is likely to be smaller than the 2
622   // instructions that would be deleted in the merge.
623   MachineFunction *MF = MBB1->getParent();
624   return EffectiveTailLen >= 2 && MF->getFunction()->optForSize() &&
625          (I1 == MBB1->begin() || I2 == MBB2->begin());
626 }
627 
628 unsigned BranchFolder::ComputeSameTails(unsigned CurHash,
629                                         unsigned MinCommonTailLength,
630                                         MachineBasicBlock *SuccBB,
631                                         MachineBasicBlock *PredBB) {
632   unsigned maxCommonTailLength = 0U;
633   SameTails.clear();
634   MachineBasicBlock::iterator TrialBBI1, TrialBBI2;
635   MPIterator HighestMPIter = std::prev(MergePotentials.end());
636   for (MPIterator CurMPIter = std::prev(MergePotentials.end()),
637                   B = MergePotentials.begin();
638        CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) {
639     for (MPIterator I = std::prev(CurMPIter); I->getHash() == CurHash; --I) {
640       unsigned CommonTailLen;
641       if (ProfitableToMerge(CurMPIter->getBlock(), I->getBlock(),
642                             MinCommonTailLength,
643                             CommonTailLen, TrialBBI1, TrialBBI2,
644                             SuccBB, PredBB,
645                             FuncletMembership,
646                             AfterBlockPlacement)) {
647         if (CommonTailLen > maxCommonTailLength) {
648           SameTails.clear();
649           maxCommonTailLength = CommonTailLen;
650           HighestMPIter = CurMPIter;
651           SameTails.push_back(SameTailElt(CurMPIter, TrialBBI1));
652         }
653         if (HighestMPIter == CurMPIter &&
654             CommonTailLen == maxCommonTailLength)
655           SameTails.push_back(SameTailElt(I, TrialBBI2));
656       }
657       if (I == B)
658         break;
659     }
660   }
661   return maxCommonTailLength;
662 }
663 
664 void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
665                                         MachineBasicBlock *SuccBB,
666                                         MachineBasicBlock *PredBB) {
667   MPIterator CurMPIter, B;
668   for (CurMPIter = std::prev(MergePotentials.end()),
669       B = MergePotentials.begin();
670        CurMPIter->getHash() == CurHash; --CurMPIter) {
671     // Put the unconditional branch back, if we need one.
672     MachineBasicBlock *CurMBB = CurMPIter->getBlock();
673     if (SuccBB && CurMBB != PredBB)
674       FixTail(CurMBB, SuccBB, TII);
675     if (CurMPIter == B)
676       break;
677   }
678   if (CurMPIter->getHash() != CurHash)
679     CurMPIter++;
680   MergePotentials.erase(CurMPIter, MergePotentials.end());
681 }
682 
683 bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
684                                              MachineBasicBlock *SuccBB,
685                                              unsigned maxCommonTailLength,
686                                              unsigned &commonTailIndex) {
687   commonTailIndex = 0;
688   unsigned TimeEstimate = ~0U;
689   for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
690     // Use PredBB if possible; that doesn't require a new branch.
691     if (SameTails[i].getBlock() == PredBB) {
692       commonTailIndex = i;
693       break;
694     }
695     // Otherwise, make a (fairly bogus) choice based on estimate of
696     // how long it will take the various blocks to execute.
697     unsigned t = EstimateRuntime(SameTails[i].getBlock()->begin(),
698                                  SameTails[i].getTailStartPos());
699     if (t <= TimeEstimate) {
700       TimeEstimate = t;
701       commonTailIndex = i;
702     }
703   }
704 
705   MachineBasicBlock::iterator BBI =
706     SameTails[commonTailIndex].getTailStartPos();
707   MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
708 
709   DEBUG(dbgs() << "\nSplitting BB#" << MBB->getNumber() << ", size "
710                << maxCommonTailLength);
711 
712   // If the split block unconditionally falls-thru to SuccBB, it will be
713   // merged. In control flow terms it should then take SuccBB's name. e.g. If
714   // SuccBB is an inner loop, the common tail is still part of the inner loop.
715   const BasicBlock *BB = (SuccBB && MBB->succ_size() == 1) ?
716     SuccBB->getBasicBlock() : MBB->getBasicBlock();
717   MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI, BB);
718   if (!newMBB) {
719     DEBUG(dbgs() << "... failed!");
720     return false;
721   }
722 
723   SameTails[commonTailIndex].setBlock(newMBB);
724   SameTails[commonTailIndex].setTailStartPos(newMBB->begin());
725 
726   // If we split PredBB, newMBB is the new predecessor.
727   if (PredBB == MBB)
728     PredBB = newMBB;
729 
730   return true;
731 }
732 
733 void BranchFolder::MergeCommonTailDebugLocs(unsigned commonTailIndex) {
734   MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
735 
736   std::vector<MachineBasicBlock::iterator> NextCommonInsts(SameTails.size());
737   for (unsigned int i = 0 ; i != SameTails.size() ; ++i) {
738     if (i != commonTailIndex)
739       NextCommonInsts[i] = SameTails[i].getTailStartPos();
740     else {
741       assert(SameTails[i].getTailStartPos() == MBB->begin() &&
742           "MBB is not a common tail only block");
743     }
744   }
745 
746   for (auto &MI : *MBB) {
747     if (MI.isDebugValue())
748       continue;
749     DebugLoc DL = MI.getDebugLoc();
750     for (unsigned int i = 0 ; i < NextCommonInsts.size() ; i++) {
751       if (i == commonTailIndex)
752         continue;
753 
754       auto &Pos = NextCommonInsts[i];
755       assert(Pos != SameTails[i].getBlock()->end() &&
756           "Reached BB end within common tail");
757       while (Pos->isDebugValue()) {
758         ++Pos;
759         assert(Pos != SameTails[i].getBlock()->end() &&
760             "Reached BB end within common tail");
761       }
762       assert(MI.isIdenticalTo(*Pos) && "Expected matching MIIs!");
763       DL = DILocation::getMergedLocation(DL, Pos->getDebugLoc());
764       NextCommonInsts[i] = ++Pos;
765     }
766     MI.setDebugLoc(DL);
767   }
768 }
769 
770 static void
771 mergeOperations(MachineBasicBlock::iterator MBBIStartPos,
772                 MachineBasicBlock &MBBCommon) {
773   MachineBasicBlock *MBB = MBBIStartPos->getParent();
774   // Note CommonTailLen does not necessarily matches the size of
775   // the common BB nor all its instructions because of debug
776   // instructions differences.
777   unsigned CommonTailLen = 0;
778   for (auto E = MBB->end(); MBBIStartPos != E; ++MBBIStartPos)
779     ++CommonTailLen;
780 
781   MachineBasicBlock::reverse_iterator MBBI = MBB->rbegin();
782   MachineBasicBlock::reverse_iterator MBBIE = MBB->rend();
783   MachineBasicBlock::reverse_iterator MBBICommon = MBBCommon.rbegin();
784   MachineBasicBlock::reverse_iterator MBBIECommon = MBBCommon.rend();
785 
786   while (CommonTailLen--) {
787     assert(MBBI != MBBIE && "Reached BB end within common tail length!");
788     (void)MBBIE;
789 
790     if (MBBI->isDebugValue()) {
791       ++MBBI;
792       continue;
793     }
794 
795     while ((MBBICommon != MBBIECommon) && MBBICommon->isDebugValue())
796       ++MBBICommon;
797 
798     assert(MBBICommon != MBBIECommon &&
799            "Reached BB end within common tail length!");
800     assert(MBBICommon->isIdenticalTo(*MBBI) && "Expected matching MIIs!");
801 
802     // Merge MMOs from memory operations in the common block.
803     if (MBBICommon->mayLoad() || MBBICommon->mayStore())
804       MBBICommon->setMemRefs(MBBICommon->mergeMemRefsWith(*MBBI));
805     // Drop undef flags if they aren't present in all merged instructions.
806     for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) {
807       MachineOperand &MO = MBBICommon->getOperand(I);
808       if (MO.isReg() && MO.isUndef()) {
809         const MachineOperand &OtherMO = MBBI->getOperand(I);
810         if (!OtherMO.isUndef())
811           MO.setIsUndef(false);
812       }
813     }
814 
815     ++MBBI;
816     ++MBBICommon;
817   }
818 }
819 
820 // See if any of the blocks in MergePotentials (which all have SuccBB as a
821 // successor, or all have no successor if it is null) can be tail-merged.
822 // If there is a successor, any blocks in MergePotentials that are not
823 // tail-merged and are not immediately before Succ must have an unconditional
824 // branch to Succ added (but the predecessor/successor lists need no
825 // adjustment). The lone predecessor of Succ that falls through into Succ,
826 // if any, is given in PredBB.
827 // MinCommonTailLength - Except for the special cases below, tail-merge if
828 // there are at least this many instructions in common.
829 bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
830                                       MachineBasicBlock *PredBB,
831                                       unsigned MinCommonTailLength) {
832   bool MadeChange = false;
833 
834   DEBUG(dbgs() << "\nTryTailMergeBlocks: ";
835         for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
836           dbgs() << "BB#" << MergePotentials[i].getBlock()->getNumber()
837                  << (i == e-1 ? "" : ", ");
838         dbgs() << "\n";
839         if (SuccBB) {
840           dbgs() << "  with successor BB#" << SuccBB->getNumber() << '\n';
841           if (PredBB)
842             dbgs() << "  which has fall-through from BB#"
843                    << PredBB->getNumber() << "\n";
844         }
845         dbgs() << "Looking for common tails of at least "
846                << MinCommonTailLength << " instruction"
847                << (MinCommonTailLength == 1 ? "" : "s") << '\n';
848        );
849 
850   // Sort by hash value so that blocks with identical end sequences sort
851   // together.
852   array_pod_sort(MergePotentials.begin(), MergePotentials.end());
853 
854   // Walk through equivalence sets looking for actual exact matches.
855   while (MergePotentials.size() > 1) {
856     unsigned CurHash = MergePotentials.back().getHash();
857 
858     // Build SameTails, identifying the set of blocks with this hash code
859     // and with the maximum number of instructions in common.
860     unsigned maxCommonTailLength = ComputeSameTails(CurHash,
861                                                     MinCommonTailLength,
862                                                     SuccBB, PredBB);
863 
864     // If we didn't find any pair that has at least MinCommonTailLength
865     // instructions in common, remove all blocks with this hash code and retry.
866     if (SameTails.empty()) {
867       RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
868       continue;
869     }
870 
871     // If one of the blocks is the entire common tail (and not the entry
872     // block, which we can't jump to), we can treat all blocks with this same
873     // tail at once.  Use PredBB if that is one of the possibilities, as that
874     // will not introduce any extra branches.
875     MachineBasicBlock *EntryBB =
876         &MergePotentials.front().getBlock()->getParent()->front();
877     unsigned commonTailIndex = SameTails.size();
878     // If there are two blocks, check to see if one can be made to fall through
879     // into the other.
880     if (SameTails.size() == 2 &&
881         SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) &&
882         SameTails[1].tailIsWholeBlock())
883       commonTailIndex = 1;
884     else if (SameTails.size() == 2 &&
885              SameTails[1].getBlock()->isLayoutSuccessor(
886                                                      SameTails[0].getBlock()) &&
887              SameTails[0].tailIsWholeBlock())
888       commonTailIndex = 0;
889     else {
890       // Otherwise just pick one, favoring the fall-through predecessor if
891       // there is one.
892       for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
893         MachineBasicBlock *MBB = SameTails[i].getBlock();
894         if (MBB == EntryBB && SameTails[i].tailIsWholeBlock())
895           continue;
896         if (MBB == PredBB) {
897           commonTailIndex = i;
898           break;
899         }
900         if (SameTails[i].tailIsWholeBlock())
901           commonTailIndex = i;
902       }
903     }
904 
905     if (commonTailIndex == SameTails.size() ||
906         (SameTails[commonTailIndex].getBlock() == PredBB &&
907          !SameTails[commonTailIndex].tailIsWholeBlock())) {
908       // None of the blocks consist entirely of the common tail.
909       // Split a block so that one does.
910       if (!CreateCommonTailOnlyBlock(PredBB, SuccBB,
911                                      maxCommonTailLength, commonTailIndex)) {
912         RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
913         continue;
914       }
915     }
916 
917     MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
918 
919     // Recompute common tail MBB's edge weights and block frequency.
920     setCommonTailEdgeWeights(*MBB);
921 
922     // Merge debug locations across identical instructions for common tail.
923     MergeCommonTailDebugLocs(commonTailIndex);
924 
925     // MBB is common tail.  Adjust all other BB's to jump to this one.
926     // Traversal must be forwards so erases work.
927     DEBUG(dbgs() << "\nUsing common tail in BB#" << MBB->getNumber()
928                  << " for ");
929     for (unsigned int i=0, e = SameTails.size(); i != e; ++i) {
930       if (commonTailIndex == i)
931         continue;
932       DEBUG(dbgs() << "BB#" << SameTails[i].getBlock()->getNumber()
933                    << (i == e-1 ? "" : ", "));
934       // Merge operations (MMOs, undef flags)
935       mergeOperations(SameTails[i].getTailStartPos(), *MBB);
936       // Hack the end off BB i, making it jump to BB commonTailIndex instead.
937       ReplaceTailWithBranchTo(SameTails[i].getTailStartPos(), MBB);
938       // BB i is no longer a predecessor of SuccBB; remove it from the worklist.
939       MergePotentials.erase(SameTails[i].getMPIter());
940     }
941     DEBUG(dbgs() << "\n");
942     // We leave commonTailIndex in the worklist in case there are other blocks
943     // that match it with a smaller number of instructions.
944     MadeChange = true;
945   }
946   return MadeChange;
947 }
948 
949 bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
950   bool MadeChange = false;
951   if (!EnableTailMerge) return MadeChange;
952 
953   // First find blocks with no successors.
954   // Block placement does not create new tail merging opportunities for these
955   // blocks.
956   if (!AfterBlockPlacement) {
957     MergePotentials.clear();
958     for (MachineBasicBlock &MBB : MF) {
959       if (MergePotentials.size() == TailMergeThreshold)
960         break;
961       if (!TriedMerging.count(&MBB) && MBB.succ_empty())
962         MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(MBB), &MBB));
963     }
964 
965     // If this is a large problem, avoid visiting the same basic blocks
966     // multiple times.
967     if (MergePotentials.size() == TailMergeThreshold)
968       for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
969         TriedMerging.insert(MergePotentials[i].getBlock());
970 
971     // See if we can do any tail merging on those.
972     if (MergePotentials.size() >= 2)
973       MadeChange |= TryTailMergeBlocks(nullptr, nullptr, MinCommonTailLength);
974   }
975 
976   // Look at blocks (IBB) with multiple predecessors (PBB).
977   // We change each predecessor to a canonical form, by
978   // (1) temporarily removing any unconditional branch from the predecessor
979   // to IBB, and
980   // (2) alter conditional branches so they branch to the other block
981   // not IBB; this may require adding back an unconditional branch to IBB
982   // later, where there wasn't one coming in.  E.g.
983   //   Bcc IBB
984   //   fallthrough to QBB
985   // here becomes
986   //   Bncc QBB
987   // with a conceptual B to IBB after that, which never actually exists.
988   // With those changes, we see whether the predecessors' tails match,
989   // and merge them if so.  We change things out of canonical form and
990   // back to the way they were later in the process.  (OptimizeBranches
991   // would undo some of this, but we can't use it, because we'd get into
992   // a compile-time infinite loop repeatedly doing and undoing the same
993   // transformations.)
994 
995   for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
996        I != E; ++I) {
997     if (I->pred_size() < 2) continue;
998     SmallPtrSet<MachineBasicBlock *, 8> UniquePreds;
999     MachineBasicBlock *IBB = &*I;
1000     MachineBasicBlock *PredBB = &*std::prev(I);
1001     MergePotentials.clear();
1002     MachineLoop *ML;
1003 
1004     // Bail if merging after placement and IBB is the loop header because
1005     // -- If merging predecessors that belong to the same loop as IBB, the
1006     // common tail of merged predecessors may become the loop top if block
1007     // placement is called again and the predecessors may branch to this common
1008     // tail and require more branches. This can be relaxed if
1009     // MachineBlockPlacement::findBestLoopTop is more flexible.
1010     // --If merging predecessors that do not belong to the same loop as IBB, the
1011     // loop info of IBB's loop and the other loops may be affected. Calling the
1012     // block placement again may make big change to the layout and eliminate the
1013     // reason to do tail merging here.
1014     if (AfterBlockPlacement && MLI) {
1015       ML = MLI->getLoopFor(IBB);
1016       if (ML && IBB == ML->getHeader())
1017         continue;
1018     }
1019 
1020     for (MachineBasicBlock *PBB : I->predecessors()) {
1021       if (MergePotentials.size() == TailMergeThreshold)
1022         break;
1023 
1024       if (TriedMerging.count(PBB))
1025         continue;
1026 
1027       // Skip blocks that loop to themselves, can't tail merge these.
1028       if (PBB == IBB)
1029         continue;
1030 
1031       // Visit each predecessor only once.
1032       if (!UniquePreds.insert(PBB).second)
1033         continue;
1034 
1035       // Skip blocks which may jump to a landing pad. Can't tail merge these.
1036       if (PBB->hasEHPadSuccessor())
1037         continue;
1038 
1039       // After block placement, only consider predecessors that belong to the
1040       // same loop as IBB.  The reason is the same as above when skipping loop
1041       // header.
1042       if (AfterBlockPlacement && MLI)
1043         if (ML != MLI->getLoopFor(PBB))
1044           continue;
1045 
1046       MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
1047       SmallVector<MachineOperand, 4> Cond;
1048       if (!TII->analyzeBranch(*PBB, TBB, FBB, Cond, true)) {
1049         // Failing case: IBB is the target of a cbr, and we cannot reverse the
1050         // branch.
1051         SmallVector<MachineOperand, 4> NewCond(Cond);
1052         if (!Cond.empty() && TBB == IBB) {
1053           if (TII->reverseBranchCondition(NewCond))
1054             continue;
1055           // This is the QBB case described above
1056           if (!FBB) {
1057             auto Next = ++PBB->getIterator();
1058             if (Next != MF.end())
1059               FBB = &*Next;
1060           }
1061         }
1062 
1063         // Failing case: the only way IBB can be reached from PBB is via
1064         // exception handling.  Happens for landing pads.  Would be nice to have
1065         // a bit in the edge so we didn't have to do all this.
1066         if (IBB->isEHPad()) {
1067           MachineFunction::iterator IP = ++PBB->getIterator();
1068           MachineBasicBlock *PredNextBB = nullptr;
1069           if (IP != MF.end())
1070             PredNextBB = &*IP;
1071           if (!TBB) {
1072             if (IBB != PredNextBB)      // fallthrough
1073               continue;
1074           } else if (FBB) {
1075             if (TBB != IBB && FBB != IBB)   // cbr then ubr
1076               continue;
1077           } else if (Cond.empty()) {
1078             if (TBB != IBB)               // ubr
1079               continue;
1080           } else {
1081             if (TBB != IBB && IBB != PredNextBB)  // cbr
1082               continue;
1083           }
1084         }
1085 
1086         // Remove the unconditional branch at the end, if any.
1087         if (TBB && (Cond.empty() || FBB)) {
1088           DebugLoc dl = PBB->findBranchDebugLoc();
1089           TII->removeBranch(*PBB);
1090           if (!Cond.empty())
1091             // reinsert conditional branch only, for now
1092             TII->insertBranch(*PBB, (TBB == IBB) ? FBB : TBB, nullptr,
1093                               NewCond, dl);
1094         }
1095 
1096         MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(*PBB), PBB));
1097       }
1098     }
1099 
1100     // If this is a large problem, avoid visiting the same basic blocks multiple
1101     // times.
1102     if (MergePotentials.size() == TailMergeThreshold)
1103       for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
1104         TriedMerging.insert(MergePotentials[i].getBlock());
1105 
1106     if (MergePotentials.size() >= 2)
1107       MadeChange |= TryTailMergeBlocks(IBB, PredBB, MinCommonTailLength);
1108 
1109     // Reinsert an unconditional branch if needed. The 1 below can occur as a
1110     // result of removing blocks in TryTailMergeBlocks.
1111     PredBB = &*std::prev(I); // this may have been changed in TryTailMergeBlocks
1112     if (MergePotentials.size() == 1 &&
1113         MergePotentials.begin()->getBlock() != PredBB)
1114       FixTail(MergePotentials.begin()->getBlock(), IBB, TII);
1115   }
1116 
1117   return MadeChange;
1118 }
1119 
1120 void BranchFolder::setCommonTailEdgeWeights(MachineBasicBlock &TailMBB) {
1121   SmallVector<BlockFrequency, 2> EdgeFreqLs(TailMBB.succ_size());
1122   BlockFrequency AccumulatedMBBFreq;
1123 
1124   // Aggregate edge frequency of successor edge j:
1125   //  edgeFreq(j) = sum (freq(bb) * edgeProb(bb, j)),
1126   //  where bb is a basic block that is in SameTails.
1127   for (const auto &Src : SameTails) {
1128     const MachineBasicBlock *SrcMBB = Src.getBlock();
1129     BlockFrequency BlockFreq = MBBFreqInfo.getBlockFreq(SrcMBB);
1130     AccumulatedMBBFreq += BlockFreq;
1131 
1132     // It is not necessary to recompute edge weights if TailBB has less than two
1133     // successors.
1134     if (TailMBB.succ_size() <= 1)
1135       continue;
1136 
1137     auto EdgeFreq = EdgeFreqLs.begin();
1138 
1139     for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
1140          SuccI != SuccE; ++SuccI, ++EdgeFreq)
1141       *EdgeFreq += BlockFreq * MBPI.getEdgeProbability(SrcMBB, *SuccI);
1142   }
1143 
1144   MBBFreqInfo.setBlockFreq(&TailMBB, AccumulatedMBBFreq);
1145 
1146   if (TailMBB.succ_size() <= 1)
1147     return;
1148 
1149   auto SumEdgeFreq =
1150       std::accumulate(EdgeFreqLs.begin(), EdgeFreqLs.end(), BlockFrequency(0))
1151           .getFrequency();
1152   auto EdgeFreq = EdgeFreqLs.begin();
1153 
1154   if (SumEdgeFreq > 0) {
1155     for (auto SuccI = TailMBB.succ_begin(), SuccE = TailMBB.succ_end();
1156          SuccI != SuccE; ++SuccI, ++EdgeFreq) {
1157       auto Prob = BranchProbability::getBranchProbability(
1158           EdgeFreq->getFrequency(), SumEdgeFreq);
1159       TailMBB.setSuccProbability(SuccI, Prob);
1160     }
1161   }
1162 }
1163 
1164 //===----------------------------------------------------------------------===//
1165 //  Branch Optimization
1166 //===----------------------------------------------------------------------===//
1167 
1168 bool BranchFolder::OptimizeBranches(MachineFunction &MF) {
1169   bool MadeChange = false;
1170 
1171   // Make sure blocks are numbered in order
1172   MF.RenumberBlocks();
1173   // Renumbering blocks alters funclet membership, recalculate it.
1174   FuncletMembership = getFuncletMembership(MF);
1175 
1176   for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
1177        I != E; ) {
1178     MachineBasicBlock *MBB = &*I++;
1179     MadeChange |= OptimizeBlock(MBB);
1180 
1181     // If it is dead, remove it.
1182     if (MBB->pred_empty()) {
1183       RemoveDeadBlock(MBB);
1184       MadeChange = true;
1185       ++NumDeadBlocks;
1186     }
1187   }
1188 
1189   return MadeChange;
1190 }
1191 
1192 // Blocks should be considered empty if they contain only debug info;
1193 // else the debug info would affect codegen.
1194 static bool IsEmptyBlock(MachineBasicBlock *MBB) {
1195   return MBB->getFirstNonDebugInstr() == MBB->end();
1196 }
1197 
1198 // Blocks with only debug info and branches should be considered the same
1199 // as blocks with only branches.
1200 static bool IsBranchOnlyBlock(MachineBasicBlock *MBB) {
1201   MachineBasicBlock::iterator I = MBB->getFirstNonDebugInstr();
1202   assert(I != MBB->end() && "empty block!");
1203   return I->isBranch();
1204 }
1205 
1206 /// IsBetterFallthrough - Return true if it would be clearly better to
1207 /// fall-through to MBB1 than to fall through into MBB2.  This has to return
1208 /// a strict ordering, returning true for both (MBB1,MBB2) and (MBB2,MBB1) will
1209 /// result in infinite loops.
1210 static bool IsBetterFallthrough(MachineBasicBlock *MBB1,
1211                                 MachineBasicBlock *MBB2) {
1212   // Right now, we use a simple heuristic.  If MBB2 ends with a call, and
1213   // MBB1 doesn't, we prefer to fall through into MBB1.  This allows us to
1214   // optimize branches that branch to either a return block or an assert block
1215   // into a fallthrough to the return.
1216   MachineBasicBlock::iterator MBB1I = MBB1->getLastNonDebugInstr();
1217   MachineBasicBlock::iterator MBB2I = MBB2->getLastNonDebugInstr();
1218   if (MBB1I == MBB1->end() || MBB2I == MBB2->end())
1219     return false;
1220 
1221   // If there is a clear successor ordering we make sure that one block
1222   // will fall through to the next
1223   if (MBB1->isSuccessor(MBB2)) return true;
1224   if (MBB2->isSuccessor(MBB1)) return false;
1225 
1226   return MBB2I->isCall() && !MBB1I->isCall();
1227 }
1228 
1229 /// getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch
1230 /// instructions on the block.
1231 static DebugLoc getBranchDebugLoc(MachineBasicBlock &MBB) {
1232   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
1233   if (I != MBB.end() && I->isBranch())
1234     return I->getDebugLoc();
1235   return DebugLoc();
1236 }
1237 
1238 bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
1239   bool MadeChange = false;
1240   MachineFunction &MF = *MBB->getParent();
1241 ReoptimizeBlock:
1242 
1243   MachineFunction::iterator FallThrough = MBB->getIterator();
1244   ++FallThrough;
1245 
1246   // Make sure MBB and FallThrough belong to the same funclet.
1247   bool SameFunclet = true;
1248   if (!FuncletMembership.empty() && FallThrough != MF.end()) {
1249     auto MBBFunclet = FuncletMembership.find(MBB);
1250     assert(MBBFunclet != FuncletMembership.end());
1251     auto FallThroughFunclet = FuncletMembership.find(&*FallThrough);
1252     assert(FallThroughFunclet != FuncletMembership.end());
1253     SameFunclet = MBBFunclet->second == FallThroughFunclet->second;
1254   }
1255 
1256   // If this block is empty, make everyone use its fall-through, not the block
1257   // explicitly.  Landing pads should not do this since the landing-pad table
1258   // points to this block.  Blocks with their addresses taken shouldn't be
1259   // optimized away.
1260   if (IsEmptyBlock(MBB) && !MBB->isEHPad() && !MBB->hasAddressTaken() &&
1261       SameFunclet) {
1262     // Dead block?  Leave for cleanup later.
1263     if (MBB->pred_empty()) return MadeChange;
1264 
1265     if (FallThrough == MF.end()) {
1266       // TODO: Simplify preds to not branch here if possible!
1267     } else if (FallThrough->isEHPad()) {
1268       // Don't rewrite to a landing pad fallthough.  That could lead to the case
1269       // where a BB jumps to more than one landing pad.
1270       // TODO: Is it ever worth rewriting predecessors which don't already
1271       // jump to a landing pad, and so can safely jump to the fallthrough?
1272     } else if (MBB->isSuccessor(&*FallThrough)) {
1273       // Rewrite all predecessors of the old block to go to the fallthrough
1274       // instead.
1275       while (!MBB->pred_empty()) {
1276         MachineBasicBlock *Pred = *(MBB->pred_end()-1);
1277         Pred->ReplaceUsesOfBlockWith(MBB, &*FallThrough);
1278       }
1279       // If MBB was the target of a jump table, update jump tables to go to the
1280       // fallthrough instead.
1281       if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
1282         MJTI->ReplaceMBBInJumpTables(MBB, &*FallThrough);
1283       MadeChange = true;
1284     }
1285     return MadeChange;
1286   }
1287 
1288   // Check to see if we can simplify the terminator of the block before this
1289   // one.
1290   MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB));
1291 
1292   MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr;
1293   SmallVector<MachineOperand, 4> PriorCond;
1294   bool PriorUnAnalyzable =
1295       TII->analyzeBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, true);
1296   if (!PriorUnAnalyzable) {
1297     // If the CFG for the prior block has extra edges, remove them.
1298     MadeChange |= PrevBB.CorrectExtraCFGEdges(PriorTBB, PriorFBB,
1299                                               !PriorCond.empty());
1300 
1301     // If the previous branch is conditional and both conditions go to the same
1302     // destination, remove the branch, replacing it with an unconditional one or
1303     // a fall-through.
1304     if (PriorTBB && PriorTBB == PriorFBB) {
1305       DebugLoc dl = getBranchDebugLoc(PrevBB);
1306       TII->removeBranch(PrevBB);
1307       PriorCond.clear();
1308       if (PriorTBB != MBB)
1309         TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
1310       MadeChange = true;
1311       ++NumBranchOpts;
1312       goto ReoptimizeBlock;
1313     }
1314 
1315     // If the previous block unconditionally falls through to this block and
1316     // this block has no other predecessors, move the contents of this block
1317     // into the prior block. This doesn't usually happen when SimplifyCFG
1318     // has been used, but it can happen if tail merging splits a fall-through
1319     // predecessor of a block.
1320     // This has to check PrevBB->succ_size() because EH edges are ignored by
1321     // AnalyzeBranch.
1322     if (PriorCond.empty() && !PriorTBB && MBB->pred_size() == 1 &&
1323         PrevBB.succ_size() == 1 &&
1324         !MBB->hasAddressTaken() && !MBB->isEHPad()) {
1325       DEBUG(dbgs() << "\nMerging into block: " << PrevBB
1326                    << "From MBB: " << *MBB);
1327       // Remove redundant DBG_VALUEs first.
1328       if (PrevBB.begin() != PrevBB.end()) {
1329         MachineBasicBlock::iterator PrevBBIter = PrevBB.end();
1330         --PrevBBIter;
1331         MachineBasicBlock::iterator MBBIter = MBB->begin();
1332         // Check if DBG_VALUE at the end of PrevBB is identical to the
1333         // DBG_VALUE at the beginning of MBB.
1334         while (PrevBBIter != PrevBB.begin() && MBBIter != MBB->end()
1335                && PrevBBIter->isDebugValue() && MBBIter->isDebugValue()) {
1336           if (!MBBIter->isIdenticalTo(*PrevBBIter))
1337             break;
1338           MachineInstr &DuplicateDbg = *MBBIter;
1339           ++MBBIter; -- PrevBBIter;
1340           DuplicateDbg.eraseFromParent();
1341         }
1342       }
1343       PrevBB.splice(PrevBB.end(), MBB, MBB->begin(), MBB->end());
1344       PrevBB.removeSuccessor(PrevBB.succ_begin());
1345       assert(PrevBB.succ_empty());
1346       PrevBB.transferSuccessors(MBB);
1347       MadeChange = true;
1348       return MadeChange;
1349     }
1350 
1351     // If the previous branch *only* branches to *this* block (conditional or
1352     // not) remove the branch.
1353     if (PriorTBB == MBB && !PriorFBB) {
1354       TII->removeBranch(PrevBB);
1355       MadeChange = true;
1356       ++NumBranchOpts;
1357       goto ReoptimizeBlock;
1358     }
1359 
1360     // If the prior block branches somewhere else on the condition and here if
1361     // the condition is false, remove the uncond second branch.
1362     if (PriorFBB == MBB) {
1363       DebugLoc dl = getBranchDebugLoc(PrevBB);
1364       TII->removeBranch(PrevBB);
1365       TII->insertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
1366       MadeChange = true;
1367       ++NumBranchOpts;
1368       goto ReoptimizeBlock;
1369     }
1370 
1371     // If the prior block branches here on true and somewhere else on false, and
1372     // if the branch condition is reversible, reverse the branch to create a
1373     // fall-through.
1374     if (PriorTBB == MBB) {
1375       SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
1376       if (!TII->reverseBranchCondition(NewPriorCond)) {
1377         DebugLoc dl = getBranchDebugLoc(PrevBB);
1378         TII->removeBranch(PrevBB);
1379         TII->insertBranch(PrevBB, PriorFBB, nullptr, NewPriorCond, dl);
1380         MadeChange = true;
1381         ++NumBranchOpts;
1382         goto ReoptimizeBlock;
1383       }
1384     }
1385 
1386     // If this block has no successors (e.g. it is a return block or ends with
1387     // a call to a no-return function like abort or __cxa_throw) and if the pred
1388     // falls through into this block, and if it would otherwise fall through
1389     // into the block after this, move this block to the end of the function.
1390     //
1391     // We consider it more likely that execution will stay in the function (e.g.
1392     // due to loops) than it is to exit it.  This asserts in loops etc, moving
1393     // the assert condition out of the loop body.
1394     if (MBB->succ_empty() && !PriorCond.empty() && !PriorFBB &&
1395         MachineFunction::iterator(PriorTBB) == FallThrough &&
1396         !MBB->canFallThrough()) {
1397       bool DoTransform = true;
1398 
1399       // We have to be careful that the succs of PredBB aren't both no-successor
1400       // blocks.  If neither have successors and if PredBB is the second from
1401       // last block in the function, we'd just keep swapping the two blocks for
1402       // last.  Only do the swap if one is clearly better to fall through than
1403       // the other.
1404       if (FallThrough == --MF.end() &&
1405           !IsBetterFallthrough(PriorTBB, MBB))
1406         DoTransform = false;
1407 
1408       if (DoTransform) {
1409         // Reverse the branch so we will fall through on the previous true cond.
1410         SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
1411         if (!TII->reverseBranchCondition(NewPriorCond)) {
1412           DEBUG(dbgs() << "\nMoving MBB: " << *MBB
1413                        << "To make fallthrough to: " << *PriorTBB << "\n");
1414 
1415           DebugLoc dl = getBranchDebugLoc(PrevBB);
1416           TII->removeBranch(PrevBB);
1417           TII->insertBranch(PrevBB, MBB, nullptr, NewPriorCond, dl);
1418 
1419           // Move this block to the end of the function.
1420           MBB->moveAfter(&MF.back());
1421           MadeChange = true;
1422           ++NumBranchOpts;
1423           return MadeChange;
1424         }
1425       }
1426     }
1427   }
1428 
1429   if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 &&
1430       MF.getFunction()->optForSize()) {
1431     // Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch
1432     // direction, thereby defeating careful block placement and regressing
1433     // performance. Therefore, only consider this for optsize functions.
1434     MachineInstr &TailCall = *MBB->getFirstNonDebugInstr();
1435     if (TII->isUnconditionalTailCall(TailCall)) {
1436       MachineBasicBlock *Pred = *MBB->pred_begin();
1437       MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
1438       SmallVector<MachineOperand, 4> PredCond;
1439       bool PredAnalyzable =
1440           !TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true);
1441 
1442       if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB) {
1443         // The predecessor has a conditional branch to this block which consists
1444         // of only a tail call. Try to fold the tail call into the conditional
1445         // branch.
1446         if (TII->canMakeTailCallConditional(PredCond, TailCall)) {
1447           // TODO: It would be nice if analyzeBranch() could provide a pointer
1448           // to the branch insturction so replaceBranchWithTailCall() doesn't
1449           // have to search for it.
1450           TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall);
1451           ++NumTailCalls;
1452           Pred->removeSuccessor(MBB);
1453           MadeChange = true;
1454           return MadeChange;
1455         }
1456       }
1457       // If the predecessor is falling through to this block, we could reverse
1458       // the branch condition and fold the tail call into that. However, after
1459       // that we might have to re-arrange the CFG to fall through to the other
1460       // block and there is a high risk of regressing code size rather than
1461       // improving it.
1462     }
1463   }
1464 
1465   // Analyze the branch in the current block.
1466   MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr;
1467   SmallVector<MachineOperand, 4> CurCond;
1468   bool CurUnAnalyzable =
1469       TII->analyzeBranch(*MBB, CurTBB, CurFBB, CurCond, true);
1470   if (!CurUnAnalyzable) {
1471     // If the CFG for the prior block has extra edges, remove them.
1472     MadeChange |= MBB->CorrectExtraCFGEdges(CurTBB, CurFBB, !CurCond.empty());
1473 
1474     // If this is a two-way branch, and the FBB branches to this block, reverse
1475     // the condition so the single-basic-block loop is faster.  Instead of:
1476     //    Loop: xxx; jcc Out; jmp Loop
1477     // we want:
1478     //    Loop: xxx; jncc Loop; jmp Out
1479     if (CurTBB && CurFBB && CurFBB == MBB && CurTBB != MBB) {
1480       SmallVector<MachineOperand, 4> NewCond(CurCond);
1481       if (!TII->reverseBranchCondition(NewCond)) {
1482         DebugLoc dl = getBranchDebugLoc(*MBB);
1483         TII->removeBranch(*MBB);
1484         TII->insertBranch(*MBB, CurFBB, CurTBB, NewCond, dl);
1485         MadeChange = true;
1486         ++NumBranchOpts;
1487         goto ReoptimizeBlock;
1488       }
1489     }
1490 
1491     // If this branch is the only thing in its block, see if we can forward
1492     // other blocks across it.
1493     if (CurTBB && CurCond.empty() && !CurFBB &&
1494         IsBranchOnlyBlock(MBB) && CurTBB != MBB &&
1495         !MBB->hasAddressTaken() && !MBB->isEHPad()) {
1496       DebugLoc dl = getBranchDebugLoc(*MBB);
1497       // This block may contain just an unconditional branch.  Because there can
1498       // be 'non-branch terminators' in the block, try removing the branch and
1499       // then seeing if the block is empty.
1500       TII->removeBranch(*MBB);
1501       // If the only things remaining in the block are debug info, remove these
1502       // as well, so this will behave the same as an empty block in non-debug
1503       // mode.
1504       if (IsEmptyBlock(MBB)) {
1505         // Make the block empty, losing the debug info (we could probably
1506         // improve this in some cases.)
1507         MBB->erase(MBB->begin(), MBB->end());
1508       }
1509       // If this block is just an unconditional branch to CurTBB, we can
1510       // usually completely eliminate the block.  The only case we cannot
1511       // completely eliminate the block is when the block before this one
1512       // falls through into MBB and we can't understand the prior block's branch
1513       // condition.
1514       if (MBB->empty()) {
1515         bool PredHasNoFallThrough = !PrevBB.canFallThrough();
1516         if (PredHasNoFallThrough || !PriorUnAnalyzable ||
1517             !PrevBB.isSuccessor(MBB)) {
1518           // If the prior block falls through into us, turn it into an
1519           // explicit branch to us to make updates simpler.
1520           if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) &&
1521               PriorTBB != MBB && PriorFBB != MBB) {
1522             if (!PriorTBB) {
1523               assert(PriorCond.empty() && !PriorFBB &&
1524                      "Bad branch analysis");
1525               PriorTBB = MBB;
1526             } else {
1527               assert(!PriorFBB && "Machine CFG out of date!");
1528               PriorFBB = MBB;
1529             }
1530             DebugLoc pdl = getBranchDebugLoc(PrevBB);
1531             TII->removeBranch(PrevBB);
1532             TII->insertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, pdl);
1533           }
1534 
1535           // Iterate through all the predecessors, revectoring each in-turn.
1536           size_t PI = 0;
1537           bool DidChange = false;
1538           bool HasBranchToSelf = false;
1539           while(PI != MBB->pred_size()) {
1540             MachineBasicBlock *PMBB = *(MBB->pred_begin() + PI);
1541             if (PMBB == MBB) {
1542               // If this block has an uncond branch to itself, leave it.
1543               ++PI;
1544               HasBranchToSelf = true;
1545             } else {
1546               DidChange = true;
1547               PMBB->ReplaceUsesOfBlockWith(MBB, CurTBB);
1548               // If this change resulted in PMBB ending in a conditional
1549               // branch where both conditions go to the same destination,
1550               // change this to an unconditional branch (and fix the CFG).
1551               MachineBasicBlock *NewCurTBB = nullptr, *NewCurFBB = nullptr;
1552               SmallVector<MachineOperand, 4> NewCurCond;
1553               bool NewCurUnAnalyzable = TII->analyzeBranch(
1554                   *PMBB, NewCurTBB, NewCurFBB, NewCurCond, true);
1555               if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) {
1556                 DebugLoc pdl = getBranchDebugLoc(*PMBB);
1557                 TII->removeBranch(*PMBB);
1558                 NewCurCond.clear();
1559                 TII->insertBranch(*PMBB, NewCurTBB, nullptr, NewCurCond, pdl);
1560                 MadeChange = true;
1561                 ++NumBranchOpts;
1562                 PMBB->CorrectExtraCFGEdges(NewCurTBB, nullptr, false);
1563               }
1564             }
1565           }
1566 
1567           // Change any jumptables to go to the new MBB.
1568           if (MachineJumpTableInfo *MJTI = MF.getJumpTableInfo())
1569             MJTI->ReplaceMBBInJumpTables(MBB, CurTBB);
1570           if (DidChange) {
1571             ++NumBranchOpts;
1572             MadeChange = true;
1573             if (!HasBranchToSelf) return MadeChange;
1574           }
1575         }
1576       }
1577 
1578       // Add the branch back if the block is more than just an uncond branch.
1579       TII->insertBranch(*MBB, CurTBB, nullptr, CurCond, dl);
1580     }
1581   }
1582 
1583   // If the prior block doesn't fall through into this block, and if this
1584   // block doesn't fall through into some other block, see if we can find a
1585   // place to move this block where a fall-through will happen.
1586   if (!PrevBB.canFallThrough()) {
1587 
1588     // Now we know that there was no fall-through into this block, check to
1589     // see if it has a fall-through into its successor.
1590     bool CurFallsThru = MBB->canFallThrough();
1591 
1592     if (!MBB->isEHPad()) {
1593       // Check all the predecessors of this block.  If one of them has no fall
1594       // throughs, move this block right after it.
1595       for (MachineBasicBlock *PredBB : MBB->predecessors()) {
1596         // Analyze the branch at the end of the pred.
1597         MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
1598         SmallVector<MachineOperand, 4> PredCond;
1599         if (PredBB != MBB && !PredBB->canFallThrough() &&
1600             !TII->analyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true) &&
1601             (!CurFallsThru || !CurTBB || !CurFBB) &&
1602             (!CurFallsThru || MBB->getNumber() >= PredBB->getNumber())) {
1603           // If the current block doesn't fall through, just move it.
1604           // If the current block can fall through and does not end with a
1605           // conditional branch, we need to append an unconditional jump to
1606           // the (current) next block.  To avoid a possible compile-time
1607           // infinite loop, move blocks only backward in this case.
1608           // Also, if there are already 2 branches here, we cannot add a third;
1609           // this means we have the case
1610           // Bcc next
1611           // B elsewhere
1612           // next:
1613           if (CurFallsThru) {
1614             MachineBasicBlock *NextBB = &*std::next(MBB->getIterator());
1615             CurCond.clear();
1616             TII->insertBranch(*MBB, NextBB, nullptr, CurCond, DebugLoc());
1617           }
1618           MBB->moveAfter(PredBB);
1619           MadeChange = true;
1620           goto ReoptimizeBlock;
1621         }
1622       }
1623     }
1624 
1625     if (!CurFallsThru) {
1626       // Check all successors to see if we can move this block before it.
1627       for (MachineBasicBlock *SuccBB : MBB->successors()) {
1628         // Analyze the branch at the end of the block before the succ.
1629         MachineFunction::iterator SuccPrev = --SuccBB->getIterator();
1630 
1631         // If this block doesn't already fall-through to that successor, and if
1632         // the succ doesn't already have a block that can fall through into it,
1633         // and if the successor isn't an EH destination, we can arrange for the
1634         // fallthrough to happen.
1635         if (SuccBB != MBB && &*SuccPrev != MBB &&
1636             !SuccPrev->canFallThrough() && !CurUnAnalyzable &&
1637             !SuccBB->isEHPad()) {
1638           MBB->moveBefore(SuccBB);
1639           MadeChange = true;
1640           goto ReoptimizeBlock;
1641         }
1642       }
1643 
1644       // Okay, there is no really great place to put this block.  If, however,
1645       // the block before this one would be a fall-through if this block were
1646       // removed, move this block to the end of the function. There is no real
1647       // advantage in "falling through" to an EH block, so we don't want to
1648       // perform this transformation for that case.
1649       //
1650       // Also, Windows EH introduced the possibility of an arbitrary number of
1651       // successors to a given block.  The analyzeBranch call does not consider
1652       // exception handling and so we can get in a state where a block
1653       // containing a call is followed by multiple EH blocks that would be
1654       // rotated infinitely at the end of the function if the transformation
1655       // below were performed for EH "FallThrough" blocks.  Therefore, even if
1656       // that appears not to be happening anymore, we should assume that it is
1657       // possible and not remove the "!FallThrough()->isEHPad" condition below.
1658       MachineBasicBlock *PrevTBB = nullptr, *PrevFBB = nullptr;
1659       SmallVector<MachineOperand, 4> PrevCond;
1660       if (FallThrough != MF.end() &&
1661           !FallThrough->isEHPad() &&
1662           !TII->analyzeBranch(PrevBB, PrevTBB, PrevFBB, PrevCond, true) &&
1663           PrevBB.isSuccessor(&*FallThrough)) {
1664         MBB->moveAfter(&MF.back());
1665         MadeChange = true;
1666         return MadeChange;
1667       }
1668     }
1669   }
1670 
1671   return MadeChange;
1672 }
1673 
1674 //===----------------------------------------------------------------------===//
1675 //  Hoist Common Code
1676 //===----------------------------------------------------------------------===//
1677 
1678 bool BranchFolder::HoistCommonCode(MachineFunction &MF) {
1679   bool MadeChange = false;
1680   for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ) {
1681     MachineBasicBlock *MBB = &*I++;
1682     MadeChange |= HoistCommonCodeInSuccs(MBB);
1683   }
1684 
1685   return MadeChange;
1686 }
1687 
1688 /// findFalseBlock - BB has a fallthrough. Find its 'false' successor given
1689 /// its 'true' successor.
1690 static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
1691                                          MachineBasicBlock *TrueBB) {
1692   for (MachineBasicBlock *SuccBB : BB->successors())
1693     if (SuccBB != TrueBB)
1694       return SuccBB;
1695   return nullptr;
1696 }
1697 
1698 template <class Container>
1699 static void addRegAndItsAliases(unsigned Reg, const TargetRegisterInfo *TRI,
1700                                 Container &Set) {
1701   if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1702     for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1703       Set.insert(*AI);
1704   } else {
1705     Set.insert(Reg);
1706   }
1707 }
1708 
1709 /// findHoistingInsertPosAndDeps - Find the location to move common instructions
1710 /// in successors to. The location is usually just before the terminator,
1711 /// however if the terminator is a conditional branch and its previous
1712 /// instruction is the flag setting instruction, the previous instruction is
1713 /// the preferred location. This function also gathers uses and defs of the
1714 /// instructions from the insertion point to the end of the block. The data is
1715 /// used by HoistCommonCodeInSuccs to ensure safety.
1716 static
1717 MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
1718                                                   const TargetInstrInfo *TII,
1719                                                   const TargetRegisterInfo *TRI,
1720                                                   SmallSet<unsigned,4> &Uses,
1721                                                   SmallSet<unsigned,4> &Defs) {
1722   MachineBasicBlock::iterator Loc = MBB->getFirstTerminator();
1723   if (!TII->isUnpredicatedTerminator(*Loc))
1724     return MBB->end();
1725 
1726   for (const MachineOperand &MO : Loc->operands()) {
1727     if (!MO.isReg())
1728       continue;
1729     unsigned Reg = MO.getReg();
1730     if (!Reg)
1731       continue;
1732     if (MO.isUse()) {
1733       addRegAndItsAliases(Reg, TRI, Uses);
1734     } else {
1735       if (!MO.isDead())
1736         // Don't try to hoist code in the rare case the terminator defines a
1737         // register that is later used.
1738         return MBB->end();
1739 
1740       // If the terminator defines a register, make sure we don't hoist
1741       // the instruction whose def might be clobbered by the terminator.
1742       addRegAndItsAliases(Reg, TRI, Defs);
1743     }
1744   }
1745 
1746   if (Uses.empty())
1747     return Loc;
1748   if (Loc == MBB->begin())
1749     return MBB->end();
1750 
1751   // The terminator is probably a conditional branch, try not to separate the
1752   // branch from condition setting instruction.
1753   MachineBasicBlock::iterator PI =
1754     skipDebugInstructionsBackward(std::prev(Loc), MBB->begin());
1755 
1756   bool IsDef = false;
1757   for (const MachineOperand &MO : PI->operands()) {
1758     // If PI has a regmask operand, it is probably a call. Separate away.
1759     if (MO.isRegMask())
1760       return Loc;
1761     if (!MO.isReg() || MO.isUse())
1762       continue;
1763     unsigned Reg = MO.getReg();
1764     if (!Reg)
1765       continue;
1766     if (Uses.count(Reg)) {
1767       IsDef = true;
1768       break;
1769     }
1770   }
1771   if (!IsDef)
1772     // The condition setting instruction is not just before the conditional
1773     // branch.
1774     return Loc;
1775 
1776   // Be conservative, don't insert instruction above something that may have
1777   // side-effects. And since it's potentially bad to separate flag setting
1778   // instruction from the conditional branch, just abort the optimization
1779   // completely.
1780   // Also avoid moving code above predicated instruction since it's hard to
1781   // reason about register liveness with predicated instruction.
1782   bool DontMoveAcrossStore = true;
1783   if (!PI->isSafeToMove(nullptr, DontMoveAcrossStore) || TII->isPredicated(*PI))
1784     return MBB->end();
1785 
1786 
1787   // Find out what registers are live. Note this routine is ignoring other live
1788   // registers which are only used by instructions in successor blocks.
1789   for (const MachineOperand &MO : PI->operands()) {
1790     if (!MO.isReg())
1791       continue;
1792     unsigned Reg = MO.getReg();
1793     if (!Reg)
1794       continue;
1795     if (MO.isUse()) {
1796       addRegAndItsAliases(Reg, TRI, Uses);
1797     } else {
1798       if (Uses.erase(Reg)) {
1799         if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1800           for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
1801             Uses.erase(*SubRegs); // Use sub-registers to be conservative
1802         }
1803       }
1804       addRegAndItsAliases(Reg, TRI, Defs);
1805     }
1806   }
1807 
1808   return PI;
1809 }
1810 
1811 bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
1812   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
1813   SmallVector<MachineOperand, 4> Cond;
1814   if (TII->analyzeBranch(*MBB, TBB, FBB, Cond, true) || !TBB || Cond.empty())
1815     return false;
1816 
1817   if (!FBB) FBB = findFalseBlock(MBB, TBB);
1818   if (!FBB)
1819     // Malformed bcc? True and false blocks are the same?
1820     return false;
1821 
1822   // Restrict the optimization to cases where MBB is the only predecessor,
1823   // it is an obvious win.
1824   if (TBB->pred_size() > 1 || FBB->pred_size() > 1)
1825     return false;
1826 
1827   // Find a suitable position to hoist the common instructions to. Also figure
1828   // out which registers are used or defined by instructions from the insertion
1829   // point to the end of the block.
1830   SmallSet<unsigned, 4> Uses, Defs;
1831   MachineBasicBlock::iterator Loc =
1832     findHoistingInsertPosAndDeps(MBB, TII, TRI, Uses, Defs);
1833   if (Loc == MBB->end())
1834     return false;
1835 
1836   bool HasDups = false;
1837   SmallVector<unsigned, 4> LocalDefs;
1838   SmallSet<unsigned, 4> LocalDefsSet;
1839   MachineBasicBlock::iterator TIB = TBB->begin();
1840   MachineBasicBlock::iterator FIB = FBB->begin();
1841   MachineBasicBlock::iterator TIE = TBB->end();
1842   MachineBasicBlock::iterator FIE = FBB->end();
1843   while (TIB != TIE && FIB != FIE) {
1844     // Skip dbg_value instructions. These do not count.
1845     TIB = skipDebugInstructionsForward(TIB, TIE);
1846     FIB = skipDebugInstructionsForward(FIB, FIE);
1847     if (TIB == TIE || FIB == FIE)
1848       break;
1849 
1850     if (!TIB->isIdenticalTo(*FIB, MachineInstr::CheckKillDead))
1851       break;
1852 
1853     if (TII->isPredicated(*TIB))
1854       // Hard to reason about register liveness with predicated instruction.
1855       break;
1856 
1857     bool IsSafe = true;
1858     for (MachineOperand &MO : TIB->operands()) {
1859       // Don't attempt to hoist instructions with register masks.
1860       if (MO.isRegMask()) {
1861         IsSafe = false;
1862         break;
1863       }
1864       if (!MO.isReg())
1865         continue;
1866       unsigned Reg = MO.getReg();
1867       if (!Reg)
1868         continue;
1869       if (MO.isDef()) {
1870         if (Uses.count(Reg)) {
1871           // Avoid clobbering a register that's used by the instruction at
1872           // the point of insertion.
1873           IsSafe = false;
1874           break;
1875         }
1876 
1877         if (Defs.count(Reg) && !MO.isDead()) {
1878           // Don't hoist the instruction if the def would be clobber by the
1879           // instruction at the point insertion. FIXME: This is overly
1880           // conservative. It should be possible to hoist the instructions
1881           // in BB2 in the following example:
1882           // BB1:
1883           // r1, eflag = op1 r2, r3
1884           // brcc eflag
1885           //
1886           // BB2:
1887           // r1 = op2, ...
1888           //    = op3, r1<kill>
1889           IsSafe = false;
1890           break;
1891         }
1892       } else if (!LocalDefsSet.count(Reg)) {
1893         if (Defs.count(Reg)) {
1894           // Use is defined by the instruction at the point of insertion.
1895           IsSafe = false;
1896           break;
1897         }
1898 
1899         if (MO.isKill() && Uses.count(Reg))
1900           // Kills a register that's read by the instruction at the point of
1901           // insertion. Remove the kill marker.
1902           MO.setIsKill(false);
1903       }
1904     }
1905     if (!IsSafe)
1906       break;
1907 
1908     bool DontMoveAcrossStore = true;
1909     if (!TIB->isSafeToMove(nullptr, DontMoveAcrossStore))
1910       break;
1911 
1912     // Remove kills from LocalDefsSet, these registers had short live ranges.
1913     for (const MachineOperand &MO : TIB->operands()) {
1914       if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1915         continue;
1916       unsigned Reg = MO.getReg();
1917       if (!Reg || !LocalDefsSet.count(Reg))
1918         continue;
1919       if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1920         for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1921           LocalDefsSet.erase(*AI);
1922       } else {
1923         LocalDefsSet.erase(Reg);
1924       }
1925     }
1926 
1927     // Track local defs so we can update liveins.
1928     for (const MachineOperand &MO : TIB->operands()) {
1929       if (!MO.isReg() || !MO.isDef() || MO.isDead())
1930         continue;
1931       unsigned Reg = MO.getReg();
1932       if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg))
1933         continue;
1934       LocalDefs.push_back(Reg);
1935       addRegAndItsAliases(Reg, TRI, LocalDefsSet);
1936     }
1937 
1938     HasDups = true;
1939     ++TIB;
1940     ++FIB;
1941   }
1942 
1943   if (!HasDups)
1944     return false;
1945 
1946   MBB->splice(Loc, TBB, TBB->begin(), TIB);
1947   FBB->erase(FBB->begin(), FIB);
1948 
1949   // Update livein's.
1950   bool AddedLiveIns = false;
1951   for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
1952     unsigned Def = LocalDefs[i];
1953     if (LocalDefsSet.count(Def)) {
1954       TBB->addLiveIn(Def);
1955       FBB->addLiveIn(Def);
1956       AddedLiveIns = true;
1957     }
1958   }
1959 
1960   if (AddedLiveIns) {
1961     TBB->sortUniqueLiveIns();
1962     FBB->sortUniqueLiveIns();
1963   }
1964 
1965   ++NumHoist;
1966   return true;
1967 }
1968