1 //===-- ProfiledBinary.h - Binary decoder -----------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef LLVM_TOOLS_LLVM_PROFGEN_PROFILEDBINARY_H
10 #define LLVM_TOOLS_LLVM_PROFGEN_PROFILEDBINARY_H
11 
12 #include "CallContext.h"
13 #include "ErrorHandling.h"
14 #include "llvm/ADT/Optional.h"
15 #include "llvm/ADT/StringRef.h"
16 #include "llvm/ADT/StringSet.h"
17 #include "llvm/DebugInfo/DWARF/DWARFContext.h"
18 #include "llvm/DebugInfo/Symbolize/Symbolize.h"
19 #include "llvm/MC/MCAsmInfo.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCDisassembler/MCDisassembler.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCInstPrinter.h"
24 #include "llvm/MC/MCInstrAnalysis.h"
25 #include "llvm/MC/MCInstrInfo.h"
26 #include "llvm/MC/MCObjectFileInfo.h"
27 #include "llvm/MC/MCPseudoProbe.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCTargetOptions.h"
31 #include "llvm/Object/ELFObjectFile.h"
32 #include "llvm/ProfileData/SampleProf.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Path.h"
35 #include "llvm/Transforms/IPO/SampleContextTracker.h"
36 #include <list>
37 #include <map>
38 #include <set>
39 #include <sstream>
40 #include <string>
41 #include <unordered_map>
42 #include <unordered_set>
43 #include <vector>
44 
45 extern cl::opt<bool> EnableCSPreInliner;
46 extern cl::opt<bool> UseContextCostForPreInliner;
47 
48 using namespace llvm;
49 using namespace sampleprof;
50 using namespace llvm::object;
51 
52 namespace llvm {
53 namespace sampleprof {
54 
55 class ProfiledBinary;
56 
57 struct InstructionPointer {
58   const ProfiledBinary *Binary;
59   union {
60     // Offset of the executable segment of the binary.
61     uint64_t Offset = 0;
62     // Also used as address in unwinder
63     uint64_t Address;
64   };
65   // Index to the sorted code address array of the binary.
66   uint64_t Index = 0;
67   InstructionPointer(const ProfiledBinary *Binary, uint64_t Address,
68                      bool RoundToNext = false);
69   bool advance();
70   bool backward();
71   void update(uint64_t Addr);
72 };
73 
74 // The special frame addresses.
75 enum SpecialFrameAddr {
76   // Dummy root of frame trie.
77   DummyRoot = 0,
78   // Represent all the addresses outside of current binary.
79   // This's also used to indicate the call stack should be truncated since this
80   // isn't a real call context the compiler will see.
81   ExternalAddr = 1,
82 };
83 
84 using RangesTy = std::vector<std::pair<uint64_t, uint64_t>>;
85 
86 struct BinaryFunction {
87   StringRef FuncName;
88   // End of range is an exclusive bound.
89   RangesTy Ranges;
90 
getFuncSizeBinaryFunction91   uint64_t getFuncSize() {
92     uint64_t Sum = 0;
93     for (auto &R : Ranges) {
94       Sum += R.second - R.first;
95     }
96     return Sum;
97   }
98 };
99 
100 // Info about function range. A function can be split into multiple
101 // non-continuous ranges, each range corresponds to one FuncRange.
102 struct FuncRange {
103   uint64_t StartOffset;
104   // EndOffset is an exclusive bound.
105   uint64_t EndOffset;
106   // Function the range belongs to
107   BinaryFunction *Func;
108   // Whether the start offset is the real entry of the function.
109   bool IsFuncEntry = false;
110 
getFuncNameFuncRange111   StringRef getFuncName() { return Func->FuncName; }
112 };
113 
114 // PrologEpilog offset tracker, used to filter out broken stack samples
115 // Currently we use a heuristic size (two) to infer prolog and epilog
116 // based on the start address and return address. In the future,
117 // we will switch to Dwarf CFI based tracker
118 struct PrologEpilogTracker {
119   // A set of prolog and epilog offsets. Used by virtual unwinding.
120   std::unordered_set<uint64_t> PrologEpilogSet;
121   ProfiledBinary *Binary;
PrologEpilogTrackerPrologEpilogTracker122   PrologEpilogTracker(ProfiledBinary *Bin) : Binary(Bin){};
123 
124   // Take the two addresses from the start of function as prolog
inferPrologOffsetsPrologEpilogTracker125   void inferPrologOffsets(std::map<uint64_t, FuncRange> &FuncStartOffsetMap) {
126     for (auto I : FuncStartOffsetMap) {
127       PrologEpilogSet.insert(I.first);
128       InstructionPointer IP(Binary, I.first);
129       if (!IP.advance())
130         break;
131       PrologEpilogSet.insert(IP.Offset);
132     }
133   }
134 
135   // Take the last two addresses before the return address as epilog
inferEpilogOffsetsPrologEpilogTracker136   void inferEpilogOffsets(std::unordered_set<uint64_t> &RetAddrs) {
137     for (auto Addr : RetAddrs) {
138       PrologEpilogSet.insert(Addr);
139       InstructionPointer IP(Binary, Addr);
140       if (!IP.backward())
141         break;
142       PrologEpilogSet.insert(IP.Offset);
143     }
144   }
145 };
146 
147 // Track function byte size under different context (outlined version as well as
148 // various inlined versions). It also provides query support to get function
149 // size with the best matching context, which is used to help pre-inliner use
150 // accurate post-optimization size to make decisions.
151 // TODO: If an inlinee is completely optimized away, ideally we should have zero
152 // for its context size, currently we would misss such context since it doesn't
153 // have instructions. To fix this, we need to mark all inlinee with entry probe
154 // but without instructions as having zero size.
155 class BinarySizeContextTracker {
156 public:
157   // Add instruction with given size to a context
158   void addInstructionForContext(const SampleContextFrameVector &Context,
159                                 uint32_t InstrSize);
160 
161   // Get function size with a specific context. When there's no exact match
162   // for the given context, try to retrieve the size of that function from
163   // closest matching context.
164   uint32_t getFuncSizeForContext(const ContextTrieNode *Context);
165 
166   // For inlinees that are full optimized away, we can establish zero size using
167   // their remaining probes.
168   void trackInlineesOptimizedAway(MCPseudoProbeDecoder &ProbeDecoder);
169 
170   using ProbeFrameStack = SmallVector<std::pair<StringRef, uint32_t>>;
171   void trackInlineesOptimizedAway(MCPseudoProbeDecoder &ProbeDecoder,
172                               MCDecodedPseudoProbeInlineTree &ProbeNode,
173                               ProbeFrameStack &Context);
174 
dump()175   void dump() { RootContext.dumpTree(); }
176 
177 private:
178   // Root node for context trie tree, node that this is a reverse context trie
179   // with callee as parent and caller as child. This way we can traverse from
180   // root to find the best/longest matching context if an exact match does not
181   // exist. It gives us the best possible estimate for function's post-inline,
182   // post-optimization byte size.
183   ContextTrieNode RootContext;
184 };
185 
186 using OffsetRange = std::pair<uint64_t, uint64_t>;
187 
188 class ProfiledBinary {
189   // Absolute path of the executable binary.
190   std::string Path;
191   // Path of the debug info binary.
192   std::string DebugBinaryPath;
193   // Path of symbolizer path which should be pointed to binary with debug info.
194   StringRef SymbolizerPath;
195   // The target triple.
196   Triple TheTriple;
197   // The runtime base address that the first executable segment is loaded at.
198   uint64_t BaseAddress = 0;
199   // The runtime base address that the first loadabe segment is loaded at.
200   uint64_t FirstLoadableAddress = 0;
201   // The preferred load address of each executable segment.
202   std::vector<uint64_t> PreferredTextSegmentAddresses;
203   // The file offset of each executable segment.
204   std::vector<uint64_t> TextSegmentOffsets;
205 
206   // Mutiple MC component info
207   std::unique_ptr<const MCRegisterInfo> MRI;
208   std::unique_ptr<const MCAsmInfo> AsmInfo;
209   std::unique_ptr<const MCSubtargetInfo> STI;
210   std::unique_ptr<const MCInstrInfo> MII;
211   std::unique_ptr<MCDisassembler> DisAsm;
212   std::unique_ptr<const MCInstrAnalysis> MIA;
213   std::unique_ptr<MCInstPrinter> IPrinter;
214   // A list of text sections sorted by start RVA and size. Used to check
215   // if a given RVA is a valid code address.
216   std::set<std::pair<uint64_t, uint64_t>> TextSections;
217 
218   // A map of mapping function name to BinaryFunction info.
219   std::unordered_map<std::string, BinaryFunction> BinaryFunctions;
220 
221   // A list of binary functions that have samples.
222   std::unordered_set<const BinaryFunction *> ProfiledFunctions;
223 
224   // An ordered map of mapping function's start offset to function range
225   // relevant info. Currently to determine if the offset of ELF is the start of
226   // a real function, we leverage the function range info from DWARF.
227   std::map<uint64_t, FuncRange> StartOffset2FuncRangeMap;
228 
229   // Offset to context location map. Used to expand the context.
230   std::unordered_map<uint64_t, SampleContextFrameVector> Offset2LocStackMap;
231 
232   // Offset to instruction size map. Also used for quick offset lookup.
233   std::unordered_map<uint64_t, uint64_t> Offset2InstSizeMap;
234 
235   // An array of offsets of all instructions sorted in increasing order. The
236   // sorting is needed to fast advance to the next forward/backward instruction.
237   std::vector<uint64_t> CodeAddrOffsets;
238   // A set of call instruction offsets. Used by virtual unwinding.
239   std::unordered_set<uint64_t> CallOffsets;
240   // A set of return instruction offsets. Used by virtual unwinding.
241   std::unordered_set<uint64_t> RetOffsets;
242   // An ordered set of unconditional branch instruction offsets.
243   std::set<uint64_t> UncondBranchOffsets;
244   // A set of branch instruction offsets.
245   std::unordered_set<uint64_t> BranchOffsets;
246 
247   // Estimate and track function prolog and epilog ranges.
248   PrologEpilogTracker ProEpilogTracker;
249 
250   // Track function sizes under different context
251   BinarySizeContextTracker FuncSizeTracker;
252 
253   // The symbolizer used to get inline context for an instruction.
254   std::unique_ptr<symbolize::LLVMSymbolizer> Symbolizer;
255 
256   // String table owning function name strings created from the symbolizer.
257   std::unordered_set<std::string> NameStrings;
258 
259   // A collection of functions to print disassembly for.
260   StringSet<> DisassembleFunctionSet;
261 
262   // Pseudo probe decoder
263   MCPseudoProbeDecoder ProbeDecoder;
264 
265   // Function name to probe frame map for top-level outlined functions.
266   StringMap<MCDecodedPseudoProbeInlineTree *> TopLevelProbeFrameMap;
267 
268   bool UsePseudoProbes = false;
269 
270   bool UseFSDiscriminator = false;
271 
272   // Whether we need to symbolize all instructions to get function context size.
273   bool TrackFuncContextSize = false;
274 
275   // Indicate if the base loading address is parsed from the mmap event or uses
276   // the preferred address
277   bool IsLoadedByMMap = false;
278   // Use to avoid redundant warning.
279   bool MissingMMapWarned = false;
280 
281   void setPreferredTextSegmentAddresses(const ELFObjectFileBase *O);
282 
283   template <class ELFT>
284   void setPreferredTextSegmentAddresses(const ELFFile<ELFT> &Obj, StringRef FileName);
285 
286   void checkPseudoProbe(const ELFObjectFileBase *Obj);
287 
288   void decodePseudoProbe(const ELFObjectFileBase *Obj);
289 
290   void
291   checkUseFSDiscriminator(const ELFObjectFileBase *Obj,
292                           std::map<SectionRef, SectionSymbolsTy> &AllSymbols);
293 
294   // Set up disassembler and related components.
295   void setUpDisassembler(const ELFObjectFileBase *Obj);
296   void setupSymbolizer();
297 
298   // Load debug info of subprograms from DWARF section.
299   void loadSymbolsFromDWARF(ObjectFile &Obj);
300 
301   // Load debug info from DWARF unit.
302   void loadSymbolsFromDWARFUnit(DWARFUnit &CompilationUnit);
303 
304   // A function may be spilt into multiple non-continuous address ranges. We use
305   // this to set whether start offset of a function is the real entry of the
306   // function and also set false to the non-function label.
307   void setIsFuncEntry(uint64_t Offset, StringRef RangeSymName);
308 
309   // Warn if no entry range exists in the function.
310   void warnNoFuncEntry();
311 
312   /// Dissassemble the text section and build various address maps.
313   void disassemble(const ELFObjectFileBase *O);
314 
315   /// Helper function to dissassemble the symbol and extract info for unwinding
316   bool dissassembleSymbol(std::size_t SI, ArrayRef<uint8_t> Bytes,
317                           SectionSymbolsTy &Symbols, const SectionRef &Section);
318   /// Symbolize a given instruction pointer and return a full call context.
319   SampleContextFrameVector symbolize(const InstructionPointer &IP,
320                                      bool UseCanonicalFnName = false,
321                                      bool UseProbeDiscriminator = false);
322   /// Decode the interesting parts of the binary and build internal data
323   /// structures. On high level, the parts of interest are:
324   ///   1. Text sections, including the main code section and the PLT
325   ///   entries that will be used to handle cross-module call transitions.
326   ///   2. The .debug_line section, used by Dwarf-based profile generation.
327   ///   3. Pseudo probe related sections, used by probe-based profile
328   ///   generation.
329   void load();
330 
331 public:
ProfiledBinary(const StringRef ExeBinPath,const StringRef DebugBinPath)332   ProfiledBinary(const StringRef ExeBinPath, const StringRef DebugBinPath)
333       : Path(ExeBinPath), DebugBinaryPath(DebugBinPath), ProEpilogTracker(this),
334         TrackFuncContextSize(EnableCSPreInliner &&
335                              UseContextCostForPreInliner) {
336     // Point to executable binary if debug info binary is not specified.
337     SymbolizerPath = DebugBinPath.empty() ? ExeBinPath : DebugBinPath;
338     setupSymbolizer();
339     load();
340   }
341 
342   void decodePseudoProbe();
343 
virtualAddrToOffset(uint64_t VirtualAddress)344   uint64_t virtualAddrToOffset(uint64_t VirtualAddress) const {
345     return VirtualAddress - BaseAddress;
346   }
offsetToVirtualAddr(uint64_t Offset)347   uint64_t offsetToVirtualAddr(uint64_t Offset) const {
348     return Offset + BaseAddress;
349   }
getPath()350   StringRef getPath() const { return Path; }
getName()351   StringRef getName() const { return llvm::sys::path::filename(Path); }
getBaseAddress()352   uint64_t getBaseAddress() const { return BaseAddress; }
setBaseAddress(uint64_t Address)353   void setBaseAddress(uint64_t Address) { BaseAddress = Address; }
354 
355   // Return the preferred load address for the first executable segment.
getPreferredBaseAddress()356   uint64_t getPreferredBaseAddress() const { return PreferredTextSegmentAddresses[0]; }
357   // Return the preferred load address for the first loadable segment.
getFirstLoadableAddress()358   uint64_t getFirstLoadableAddress() const { return FirstLoadableAddress; }
359   // Return the file offset for the first executable segment.
getTextSegmentOffset()360   uint64_t getTextSegmentOffset() const { return TextSegmentOffsets[0]; }
getPreferredTextSegmentAddresses()361   const std::vector<uint64_t> &getPreferredTextSegmentAddresses() const {
362     return PreferredTextSegmentAddresses;
363   }
getTextSegmentOffsets()364   const std::vector<uint64_t> &getTextSegmentOffsets() const {
365     return TextSegmentOffsets;
366   }
367 
getInstSize(uint64_t Offset)368   uint64_t getInstSize(uint64_t Offset) const {
369     auto I = Offset2InstSizeMap.find(Offset);
370     if (I == Offset2InstSizeMap.end())
371       return 0;
372     return I->second;
373   }
374 
offsetIsCode(uint64_t Offset)375   bool offsetIsCode(uint64_t Offset) const {
376     return Offset2InstSizeMap.find(Offset) != Offset2InstSizeMap.end();
377   }
addressIsCode(uint64_t Address)378   bool addressIsCode(uint64_t Address) const {
379     uint64_t Offset = virtualAddrToOffset(Address);
380     return offsetIsCode(Offset);
381   }
addressIsCall(uint64_t Address)382   bool addressIsCall(uint64_t Address) const {
383     uint64_t Offset = virtualAddrToOffset(Address);
384     return CallOffsets.count(Offset);
385   }
addressIsReturn(uint64_t Address)386   bool addressIsReturn(uint64_t Address) const {
387     uint64_t Offset = virtualAddrToOffset(Address);
388     return RetOffsets.count(Offset);
389   }
addressInPrologEpilog(uint64_t Address)390   bool addressInPrologEpilog(uint64_t Address) const {
391     uint64_t Offset = virtualAddrToOffset(Address);
392     return ProEpilogTracker.PrologEpilogSet.count(Offset);
393   }
394 
offsetIsTransfer(uint64_t Offset)395   bool offsetIsTransfer(uint64_t Offset) {
396     return BranchOffsets.count(Offset) || RetOffsets.count(Offset) ||
397            CallOffsets.count(Offset);
398   }
399 
rangeCrossUncondBranch(uint64_t Start,uint64_t End)400   bool rangeCrossUncondBranch(uint64_t Start, uint64_t End) {
401     if (Start >= End)
402       return false;
403     auto R = UncondBranchOffsets.lower_bound(Start);
404     return R != UncondBranchOffsets.end() && *R < End;
405   }
406 
getAddressforIndex(uint64_t Index)407   uint64_t getAddressforIndex(uint64_t Index) const {
408     return offsetToVirtualAddr(CodeAddrOffsets[Index]);
409   }
410 
getCodeOffsetsSize()411   size_t getCodeOffsetsSize() const { return CodeAddrOffsets.size(); }
412 
usePseudoProbes()413   bool usePseudoProbes() const { return UsePseudoProbes; }
useFSDiscriminator()414   bool useFSDiscriminator() const { return UseFSDiscriminator; }
415   // Get the index in CodeAddrOffsets for the address
416   // As we might get an address which is not the code
417   // here it would round to the next valid code address by
418   // using lower bound operation
getIndexForOffset(uint64_t Offset)419   uint32_t getIndexForOffset(uint64_t Offset) const {
420     auto Low = llvm::lower_bound(CodeAddrOffsets, Offset);
421     return Low - CodeAddrOffsets.begin();
422   }
getIndexForAddr(uint64_t Address)423   uint32_t getIndexForAddr(uint64_t Address) const {
424     uint64_t Offset = virtualAddrToOffset(Address);
425     return getIndexForOffset(Offset);
426   }
427 
getCallAddrFromFrameAddr(uint64_t FrameAddr)428   uint64_t getCallAddrFromFrameAddr(uint64_t FrameAddr) const {
429     if (FrameAddr == ExternalAddr)
430       return ExternalAddr;
431     auto I = getIndexForAddr(FrameAddr);
432     FrameAddr = I ? getAddressforIndex(I - 1) : 0;
433     if (FrameAddr && addressIsCall(FrameAddr))
434       return FrameAddr;
435     return 0;
436   }
437 
findFuncRangeForStartOffset(uint64_t Offset)438   FuncRange *findFuncRangeForStartOffset(uint64_t Offset) {
439     auto I = StartOffset2FuncRangeMap.find(Offset);
440     if (I == StartOffset2FuncRangeMap.end())
441       return nullptr;
442     return &I->second;
443   }
444 
445   // Binary search the function range which includes the input offset.
findFuncRangeForOffset(uint64_t Offset)446   FuncRange *findFuncRangeForOffset(uint64_t Offset) {
447     auto I = StartOffset2FuncRangeMap.upper_bound(Offset);
448     if (I == StartOffset2FuncRangeMap.begin())
449       return nullptr;
450     I--;
451 
452     if (Offset >= I->second.EndOffset)
453       return nullptr;
454 
455     return &I->second;
456   }
457 
458   // Get all ranges of one function.
getRangesForOffset(uint64_t Offset)459   RangesTy getRangesForOffset(uint64_t Offset) {
460     auto *FRange = findFuncRangeForOffset(Offset);
461     // Ignore the range which falls into plt section or system lib.
462     if (!FRange)
463       return RangesTy();
464 
465     return FRange->Func->Ranges;
466   }
467 
468   const std::unordered_map<std::string, BinaryFunction> &
getAllBinaryFunctions()469   getAllBinaryFunctions() {
470     return BinaryFunctions;
471   }
472 
getProfiledFunctions()473   std::unordered_set<const BinaryFunction *> &getProfiledFunctions() {
474     return ProfiledFunctions;
475   }
476 
setProfiledFunctions(std::unordered_set<const BinaryFunction * > & Funcs)477   void setProfiledFunctions(std::unordered_set<const BinaryFunction *> &Funcs) {
478     ProfiledFunctions = Funcs;
479   }
480 
getBinaryFunction(StringRef FName)481   BinaryFunction *getBinaryFunction(StringRef FName) {
482     auto I = BinaryFunctions.find(FName.str());
483     if (I == BinaryFunctions.end())
484       return nullptr;
485     return &I->second;
486   }
487 
getFuncSizeForContext(const ContextTrieNode * ContextNode)488   uint32_t getFuncSizeForContext(const ContextTrieNode *ContextNode) {
489     return FuncSizeTracker.getFuncSizeForContext(ContextNode);
490   }
491 
492   // Load the symbols from debug table and populate into symbol list.
493   void populateSymbolListFromDWARF(ProfileSymbolList &SymbolList);
494 
495   const SampleContextFrameVector &
496   getFrameLocationStack(uint64_t Offset, bool UseProbeDiscriminator = false) {
497     auto I = Offset2LocStackMap.emplace(Offset, SampleContextFrameVector());
498     if (I.second) {
499       InstructionPointer IP(this, Offset);
500       I.first->second = symbolize(IP, true, UseProbeDiscriminator);
501     }
502     return I.first->second;
503   }
504 
getInlineLeafFrameLoc(uint64_t Offset)505   Optional<SampleContextFrame> getInlineLeafFrameLoc(uint64_t Offset) {
506     const auto &Stack = getFrameLocationStack(Offset);
507     if (Stack.empty())
508       return {};
509     return Stack.back();
510   }
511 
flushSymbolizer()512   void flushSymbolizer() { Symbolizer.reset(); }
513 
514   // Compare two addresses' inline context
515   bool inlineContextEqual(uint64_t Add1, uint64_t Add2);
516 
517   // Get the full context of the current stack with inline context filled in.
518   // It will search the disassembling info stored in Offset2LocStackMap. This is
519   // used as the key of function sample map
520   SampleContextFrameVector
521   getExpandedContext(const SmallVectorImpl<uint64_t> &Stack,
522                      bool &WasLeafInlined);
523   // Go through instructions among the given range and record its size for the
524   // inline context.
525   void computeInlinedContextSizeForRange(uint64_t StartOffset,
526                                          uint64_t EndOffset);
527 
528   void computeInlinedContextSizeForFunc(const BinaryFunction *Func);
529 
getCallProbeForAddr(uint64_t Address)530   const MCDecodedPseudoProbe *getCallProbeForAddr(uint64_t Address) const {
531     return ProbeDecoder.getCallProbeForAddr(Address);
532   }
533 
534   void getInlineContextForProbe(const MCDecodedPseudoProbe *Probe,
535                                 SampleContextFrameVector &InlineContextStack,
536                                 bool IncludeLeaf = false) const {
537     SmallVector<MCPseduoProbeFrameLocation, 16> ProbeInlineContext;
538     ProbeDecoder.getInlineContextForProbe(Probe, ProbeInlineContext,
539                                           IncludeLeaf);
540     for (uint32_t I = 0; I < ProbeInlineContext.size(); I++) {
541       auto &Callsite = ProbeInlineContext[I];
542       // Clear the current context for an unknown probe.
543       if (Callsite.second == 0 && I != ProbeInlineContext.size() - 1) {
544         InlineContextStack.clear();
545         continue;
546       }
547       InlineContextStack.emplace_back(Callsite.first,
548                                       LineLocation(Callsite.second, 0));
549     }
550   }
getAddress2ProbesMap()551   const AddressProbesMap &getAddress2ProbesMap() const {
552     return ProbeDecoder.getAddress2ProbesMap();
553   }
getFuncDescForGUID(uint64_t GUID)554   const MCPseudoProbeFuncDesc *getFuncDescForGUID(uint64_t GUID) {
555     return ProbeDecoder.getFuncDescForGUID(GUID);
556   }
557 
558   const MCPseudoProbeFuncDesc *
getInlinerDescForProbe(const MCDecodedPseudoProbe * Probe)559   getInlinerDescForProbe(const MCDecodedPseudoProbe *Probe) {
560     return ProbeDecoder.getInlinerDescForProbe(Probe);
561   }
562 
getTrackFuncContextSize()563   bool getTrackFuncContextSize() { return TrackFuncContextSize; }
564 
getIsLoadedByMMap()565   bool getIsLoadedByMMap() { return IsLoadedByMMap; }
566 
setIsLoadedByMMap(bool Value)567   void setIsLoadedByMMap(bool Value) { IsLoadedByMMap = Value; }
568 
getMissingMMapWarned()569   bool getMissingMMapWarned() { return MissingMMapWarned; }
570 
setMissingMMapWarned(bool Value)571   void setMissingMMapWarned(bool Value) { MissingMMapWarned = Value; }
572 };
573 
574 } // end namespace sampleprof
575 } // end namespace llvm
576 
577 #endif
578