1 //===- bolt/Passes/SplitFunctions.cpp - Pass for splitting function code --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SplitFunctions pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "bolt/Passes/SplitFunctions.h"
14 #include "bolt/Core/BinaryFunction.h"
15 #include "bolt/Core/ParallelUtilities.h"
16 #include "llvm/Support/CommandLine.h"
17 #include "llvm/Support/FormatVariadic.h"
18 #include <algorithm>
19 #include <random>
20 #include <vector>
21 
22 #define DEBUG_TYPE "bolt-opts"
23 
24 using namespace llvm;
25 using namespace bolt;
26 
27 namespace {
28 class DeprecatedSplitFunctionOptionParser : public cl::parser<bool> {
29 public:
30   explicit DeprecatedSplitFunctionOptionParser(cl::Option &O)
31       : cl::parser<bool>(O) {}
32 
33   bool parse(cl::Option &O, StringRef ArgName, StringRef Arg, bool &Value) {
34     if (Arg == "2" || Arg == "3") {
35       Value = true;
36       errs() << formatv("BOLT-WARNING: specifying non-boolean value \"{0}\" "
37                         "for option -{1} is deprecated\n",
38                         Arg, ArgName);
39       return false;
40     }
41     return cl::parser<bool>::parse(O, ArgName, Arg, Value);
42   }
43 };
44 } // namespace
45 
46 namespace opts {
47 
48 extern cl::OptionCategory BoltOptCategory;
49 
50 extern cl::opt<bool> SplitEH;
51 extern cl::opt<unsigned> ExecutionCountThreshold;
52 extern cl::opt<uint32_t> RandomSeed;
53 
54 static cl::opt<bool> AggressiveSplitting(
55     "split-all-cold", cl::desc("outline as many cold basic blocks as possible"),
56     cl::cat(BoltOptCategory));
57 
58 static cl::opt<unsigned> SplitAlignThreshold(
59     "split-align-threshold",
60     cl::desc("when deciding to split a function, apply this alignment "
61              "while doing the size comparison (see -split-threshold). "
62              "Default value: 2."),
63     cl::init(2),
64 
65     cl::Hidden, cl::cat(BoltOptCategory));
66 
67 static cl::opt<bool, false, DeprecatedSplitFunctionOptionParser>
68     SplitFunctions("split-functions",
69                    cl::desc("split functions into hot and cold regions"),
70                    cl::cat(BoltOptCategory));
71 
72 static cl::opt<unsigned> SplitThreshold(
73     "split-threshold",
74     cl::desc("split function only if its main size is reduced by more than "
75              "given amount of bytes. Default value: 0, i.e. split iff the "
76              "size is reduced. Note that on some architectures the size can "
77              "increase after splitting."),
78     cl::init(0), cl::Hidden, cl::cat(BoltOptCategory));
79 
80 static cl::opt<bool>
81     RandomSplit("split-random",
82                 cl::desc("split functions randomly into hot/cold regions"),
83                 cl::Hidden);
84 } // namespace opts
85 
86 namespace {
87 struct SplitCold {
88   bool canSplit(const BinaryFunction &BF) {
89     if (!BF.hasValidProfile())
90       return false;
91 
92     bool AllCold = true;
93     for (const BinaryBasicBlock &BB : BF) {
94       const uint64_t ExecCount = BB.getExecutionCount();
95       if (ExecCount == BinaryBasicBlock::COUNT_NO_PROFILE)
96         return false;
97       if (ExecCount != 0)
98         AllCold = false;
99     }
100 
101     return !AllCold;
102   }
103 
104   bool canOutline(const BinaryBasicBlock &BB) {
105     return BB.getExecutionCount() == 0;
106   }
107 
108   void partition(BinaryFunction::reverse_order_iterator Start,
109                  BinaryFunction::reverse_order_iterator End) const {
110     for (auto I = Start; I != End; ++I) {
111       BinaryBasicBlock *BB = *I;
112       if (!BB->canOutline())
113         break;
114       BB->setIsCold(true);
115     }
116   }
117 };
118 
119 struct SplitRandom {
120   std::minstd_rand0 *Gen;
121 
122   explicit SplitRandom(std::minstd_rand0 &Gen) : Gen(&Gen) {}
123 
124   bool canSplit(const BinaryFunction &BF) { return true; }
125   bool canOutline(const BinaryBasicBlock &BB) { return true; }
126 
127   void partition(BinaryFunction::reverse_order_iterator Start,
128                  BinaryFunction::reverse_order_iterator End) const {
129     using It = decltype(Start);
130 
131     const It OutlineableBegin = Start;
132     const It OutlineableEnd =
133         std::find_if(OutlineableBegin, End,
134                      [](BinaryBasicBlock *BB) { return !BB->canOutline(); });
135     const It::difference_type NumOutlineableBlocks =
136         OutlineableEnd - OutlineableBegin;
137 
138     // We want to split at least one block unless there are not blocks that can
139     // be outlined
140     const auto MinimumSplit =
141         std::min<It::difference_type>(NumOutlineableBlocks, 1);
142     std::uniform_int_distribution<It::difference_type> Dist(
143         MinimumSplit, NumOutlineableBlocks);
144     const It::difference_type NumColdBlocks = Dist(*Gen);
145     const It ColdEnd = OutlineableBegin + NumColdBlocks;
146 
147     LLVM_DEBUG(dbgs() << formatv("BOLT-DEBUG: randomly chose last {0} (out of "
148                                  "{1} possible) blocks to split\n",
149                                  ColdEnd - OutlineableBegin,
150                                  OutlineableEnd - OutlineableBegin));
151 
152     std::for_each(OutlineableBegin, ColdEnd,
153                   [](BinaryBasicBlock *BB) { BB->setIsCold(true); });
154   }
155 };
156 } // namespace
157 
158 namespace llvm {
159 namespace bolt {
160 
161 bool SplitFunctions::shouldOptimize(const BinaryFunction &BF) const {
162   // Apply execution count threshold
163   if (BF.getKnownExecutionCount() < opts::ExecutionCountThreshold)
164     return false;
165 
166   return BinaryFunctionPass::shouldOptimize(BF);
167 }
168 
169 void SplitFunctions::runOnFunctions(BinaryContext &BC) {
170   if (!opts::SplitFunctions)
171     return;
172 
173   ParallelUtilities::WorkFuncTy WorkFun;
174   std::minstd_rand0 RandGen(opts::RandomSeed.getValue());
175   if (opts::RandomSplit)
176     WorkFun = [&](BinaryFunction &BF) {
177       splitFunction(BF, SplitRandom(RandGen));
178     };
179   else
180     WorkFun = [&](BinaryFunction &BF) { splitFunction<SplitCold>(BF); };
181 
182   ParallelUtilities::PredicateTy SkipFunc = [&](const BinaryFunction &BF) {
183     return !shouldOptimize(BF);
184   };
185 
186   // If we split functions randomly, we need to ensure that across runs with the
187   // same input, we generate random numbers for each function in the same order.
188   const bool ForceSequential = opts::RandomSplit;
189 
190   ParallelUtilities::runOnEachFunction(
191       BC, ParallelUtilities::SchedulingPolicy::SP_BB_LINEAR, WorkFun, SkipFunc,
192       "SplitFunctions", ForceSequential);
193 
194   if (SplitBytesHot + SplitBytesCold > 0)
195     outs() << "BOLT-INFO: splitting separates " << SplitBytesHot
196            << " hot bytes from " << SplitBytesCold << " cold bytes "
197            << format("(%.2lf%% of split functions is hot).\n",
198                      100.0 * SplitBytesHot / (SplitBytesHot + SplitBytesCold));
199 }
200 
201 template <typename SplitStrategy>
202 void SplitFunctions::splitFunction(BinaryFunction &BF, SplitStrategy Strategy) {
203   if (BF.empty())
204     return;
205 
206   if (!Strategy.canSplit(BF))
207     return;
208 
209   BinaryFunction::BasicBlockOrderType PreSplitLayout = BF.getLayout();
210 
211   BinaryContext &BC = BF.getBinaryContext();
212   size_t OriginalHotSize;
213   size_t HotSize;
214   size_t ColdSize;
215   if (BC.isX86()) {
216     std::tie(OriginalHotSize, ColdSize) = BC.calculateEmittedSize(BF);
217     LLVM_DEBUG(dbgs() << "Estimated size for function " << BF
218                       << " pre-split is <0x"
219                       << Twine::utohexstr(OriginalHotSize) << ", 0x"
220                       << Twine::utohexstr(ColdSize) << ">\n");
221   }
222 
223   // Never outline the first basic block.
224   BF.layout_front()->setCanOutline(false);
225   for (BinaryBasicBlock *BB : BF.layout()) {
226     if (!BB->canOutline())
227       continue;
228     if (!Strategy.canOutline(*BB)) {
229       BB->setCanOutline(false);
230       continue;
231     }
232     // Do not split extra entry points in aarch64. They can be referred by
233     // using ADRs and when this happens, these blocks cannot be placed far
234     // away due to the limited range in ADR instruction.
235     if (BC.isAArch64() && BB->isEntryPoint()) {
236       BB->setCanOutline(false);
237       continue;
238     }
239 
240     if (BF.hasEHRanges() && !opts::SplitEH) {
241       // We cannot move landing pads (or rather entry points for landing pads).
242       if (BB->isLandingPad()) {
243         BB->setCanOutline(false);
244         continue;
245       }
246       // We cannot move a block that can throw since exception-handling
247       // runtime cannot deal with split functions. However, if we can guarantee
248       // that the block never throws, it is safe to move the block to
249       // decrease the size of the function.
250       for (MCInst &Instr : *BB) {
251         if (BC.MIB->isInvoke(Instr)) {
252           BB->setCanOutline(false);
253           break;
254         }
255       }
256     }
257   }
258 
259   if (opts::AggressiveSplitting) {
260     // All blocks with 0 count that we can move go to the end of the function.
261     // Even if they were natural to cluster formation and were seen in-between
262     // hot basic blocks.
263     llvm::stable_sort(BF.layout(),
264                       [&](BinaryBasicBlock *A, BinaryBasicBlock *B) {
265                         return A->canOutline() < B->canOutline();
266                       });
267   } else if (BF.hasEHRanges() && !opts::SplitEH) {
268     // Typically functions with exception handling have landing pads at the end.
269     // We cannot move beginning of landing pads, but we can move 0-count blocks
270     // comprising landing pads to the end and thus facilitate splitting.
271     auto FirstLP = BF.layout_begin();
272     while ((*FirstLP)->isLandingPad())
273       ++FirstLP;
274 
275     std::stable_sort(FirstLP, BF.layout_end(),
276                      [&](BinaryBasicBlock *A, BinaryBasicBlock *B) {
277                        return A->canOutline() < B->canOutline();
278                      });
279   }
280 
281   // Separate hot from cold starting from the bottom.
282   Strategy.partition(BF.layout_rbegin(), BF.layout_rend());
283 
284   // For shared objects, invoke instructions and corresponding landing pads
285   // have to be placed in the same fragment. When we split them, create
286   // trampoline landing pads that will redirect the execution to real LPs.
287   TrampolineSetType Trampolines;
288   if (!BC.HasFixedLoadAddress && BF.hasEHRanges() && BF.isSplit())
289     Trampolines = createEHTrampolines(BF);
290 
291   // Check the new size to see if it's worth splitting the function.
292   if (BC.isX86() && BF.isSplit()) {
293     std::tie(HotSize, ColdSize) = BC.calculateEmittedSize(BF);
294     LLVM_DEBUG(dbgs() << "Estimated size for function " << BF
295                       << " post-split is <0x" << Twine::utohexstr(HotSize)
296                       << ", 0x" << Twine::utohexstr(ColdSize) << ">\n");
297     if (alignTo(OriginalHotSize, opts::SplitAlignThreshold) <=
298         alignTo(HotSize, opts::SplitAlignThreshold) + opts::SplitThreshold) {
299       LLVM_DEBUG(dbgs() << "Reversing splitting of function " << BF << ":\n  0x"
300                         << Twine::utohexstr(HotSize) << ", 0x"
301                         << Twine::utohexstr(ColdSize) << " -> 0x"
302                         << Twine::utohexstr(OriginalHotSize) << '\n');
303 
304       // Reverse the action of createEHTrampolines(). The trampolines will be
305       // placed immediately before the matching destination resulting in no
306       // extra code.
307       if (PreSplitLayout.size() != BF.size())
308         PreSplitLayout = mergeEHTrampolines(BF, PreSplitLayout, Trampolines);
309 
310       BF.updateBasicBlockLayout(PreSplitLayout);
311       for (BinaryBasicBlock &BB : BF)
312         BB.setIsCold(false);
313     } else {
314       SplitBytesHot += HotSize;
315       SplitBytesCold += ColdSize;
316     }
317   }
318 }
319 
320 SplitFunctions::TrampolineSetType
321 SplitFunctions::createEHTrampolines(BinaryFunction &BF) const {
322   const auto &MIB = BF.getBinaryContext().MIB;
323 
324   // Map real landing pads to the corresponding trampolines.
325   TrampolineSetType LPTrampolines;
326 
327   // Iterate over the copy of basic blocks since we are adding new blocks to the
328   // function which will invalidate its iterators.
329   std::vector<BinaryBasicBlock *> Blocks(BF.pbegin(), BF.pend());
330   for (BinaryBasicBlock *BB : Blocks) {
331     for (MCInst &Instr : *BB) {
332       const Optional<MCPlus::MCLandingPad> EHInfo = MIB->getEHInfo(Instr);
333       if (!EHInfo || !EHInfo->first)
334         continue;
335 
336       const MCSymbol *LPLabel = EHInfo->first;
337       BinaryBasicBlock *LPBlock = BF.getBasicBlockForLabel(LPLabel);
338       if (BB->isCold() == LPBlock->isCold())
339         continue;
340 
341       const MCSymbol *TrampolineLabel = nullptr;
342       auto Iter = LPTrampolines.find(LPLabel);
343       if (Iter != LPTrampolines.end()) {
344         TrampolineLabel = Iter->second;
345       } else {
346         // Create a trampoline basic block in the same fragment as the thrower.
347         // Note: there's no need to insert the jump instruction, it will be
348         // added by fixBranches().
349         BinaryBasicBlock *TrampolineBB = BF.addBasicBlock();
350         TrampolineBB->setIsCold(BB->isCold());
351         TrampolineBB->setExecutionCount(LPBlock->getExecutionCount());
352         TrampolineBB->addSuccessor(LPBlock, TrampolineBB->getExecutionCount());
353         TrampolineBB->setCFIState(LPBlock->getCFIState());
354         TrampolineLabel = TrampolineBB->getLabel();
355         LPTrampolines.insert(std::make_pair(LPLabel, TrampolineLabel));
356       }
357 
358       // Substitute the landing pad with the trampoline.
359       MIB->updateEHInfo(Instr,
360                         MCPlus::MCLandingPad(TrampolineLabel, EHInfo->second));
361     }
362   }
363 
364   if (LPTrampolines.empty())
365     return LPTrampolines;
366 
367   // All trampoline blocks were added to the end of the function. Place them at
368   // the end of corresponding fragments.
369   std::stable_sort(BF.layout_begin(), BF.layout_end(),
370                    [&](BinaryBasicBlock *A, BinaryBasicBlock *B) {
371                      return A->isCold() < B->isCold();
372                    });
373 
374   // Conservatively introduce branch instructions.
375   BF.fixBranches();
376 
377   // Update exception-handling CFG for the function.
378   BF.recomputeLandingPads();
379 
380   return LPTrampolines;
381 }
382 
383 SplitFunctions::BasicBlockOrderType SplitFunctions::mergeEHTrampolines(
384     BinaryFunction &BF, SplitFunctions::BasicBlockOrderType &Layout,
385     const SplitFunctions::TrampolineSetType &Trampolines) const {
386   BasicBlockOrderType MergedLayout;
387   for (BinaryBasicBlock *BB : Layout) {
388     auto Iter = Trampolines.find(BB->getLabel());
389     if (Iter != Trampolines.end()) {
390       BinaryBasicBlock *LPBlock = BF.getBasicBlockForLabel(Iter->second);
391       assert(LPBlock && "Could not find matching landing pad block.");
392       MergedLayout.push_back(LPBlock);
393     }
394     MergedLayout.push_back(BB);
395   }
396 
397   return MergedLayout;
398 }
399 
400 } // namespace bolt
401 } // namespace llvm
402