1 //===--- Passes/BinaryFunctionCallGraph.cpp -------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 //===----------------------------------------------------------------------===// 10 11 #include "bolt/Passes/BinaryFunctionCallGraph.h" 12 #include "bolt/Core/BinaryContext.h" 13 #include "bolt/Core/BinaryFunction.h" 14 #include "llvm/Support/CommandLine.h" 15 #include "llvm/Support/Timer.h" 16 #include <stack> 17 18 #define DEBUG_TYPE "callgraph" 19 20 namespace opts { 21 extern llvm::cl::opt<bool> TimeOpts; 22 extern llvm::cl::opt<unsigned> Verbosity; 23 } // namespace opts 24 25 namespace llvm { 26 namespace bolt { 27 28 CallGraph::NodeId BinaryFunctionCallGraph::addNode(BinaryFunction *BF, 29 uint32_t Size, 30 uint64_t Samples) { 31 NodeId Id = CallGraph::addNode(Size, Samples); 32 assert(size_t(Id) == Funcs.size()); 33 Funcs.push_back(BF); 34 FuncToNodeId[BF] = Id; 35 assert(Funcs[Id] == BF); 36 return Id; 37 } 38 39 std::deque<BinaryFunction *> BinaryFunctionCallGraph::buildTraversalOrder() { 40 NamedRegionTimer T1("buildcgorder", "Build cg traversal order", 41 "CG breakdown", "CG breakdown", opts::TimeOpts); 42 std::deque<BinaryFunction *> TopologicalOrder; 43 enum NodeStatus { NEW, VISITING, VISITED }; 44 std::vector<NodeStatus> NodeStatus(Funcs.size()); 45 std::stack<NodeId> Worklist; 46 47 for (BinaryFunction *Func : Funcs) { 48 const NodeId Id = FuncToNodeId.at(Func); 49 Worklist.push(Id); 50 NodeStatus[Id] = NEW; 51 } 52 53 while (!Worklist.empty()) { 54 const NodeId FuncId = Worklist.top(); 55 Worklist.pop(); 56 57 if (NodeStatus[FuncId] == VISITED) 58 continue; 59 60 if (NodeStatus[FuncId] == VISITING) { 61 TopologicalOrder.push_back(Funcs[FuncId]); 62 NodeStatus[FuncId] = VISITED; 63 continue; 64 } 65 66 assert(NodeStatus[FuncId] == NEW); 67 NodeStatus[FuncId] = VISITING; 68 Worklist.push(FuncId); 69 for (const NodeId Callee : successors(FuncId)) { 70 if (NodeStatus[Callee] == VISITING || NodeStatus[Callee] == VISITED) 71 continue; 72 Worklist.push(Callee); 73 } 74 } 75 76 return TopologicalOrder; 77 } 78 79 BinaryFunctionCallGraph 80 buildCallGraph(BinaryContext &BC, CgFilterFunction Filter, bool CgFromPerfData, 81 bool IncludeColdCalls, bool UseFunctionHotSize, 82 bool UseSplitHotSize, bool UseEdgeCounts, 83 bool IgnoreRecursiveCalls) { 84 NamedRegionTimer T1("buildcg", "Callgraph construction", "CG breakdown", 85 "CG breakdown", opts::TimeOpts); 86 BinaryFunctionCallGraph Cg; 87 static constexpr uint64_t COUNT_NO_PROFILE = 88 BinaryBasicBlock::COUNT_NO_PROFILE; 89 90 // Compute function size 91 auto functionSize = [&](const BinaryFunction *Function) { 92 return UseFunctionHotSize && Function->isSplit() 93 ? Function->estimateHotSize(UseSplitHotSize) 94 : Function->estimateSize(); 95 }; 96 97 // Add call graph nodes. 98 auto lookupNode = [&](BinaryFunction *Function) { 99 const CallGraph::NodeId Id = Cg.maybeGetNodeId(Function); 100 if (Id == CallGraph::InvalidId) { 101 // It's ok to use the hot size here when the function is split. This is 102 // because emitFunctions will emit the hot part first in the order that is 103 // computed by ReorderFunctions. The cold part will be emitted with the 104 // rest of the cold functions and code. 105 const size_t Size = functionSize(Function); 106 // NOTE: for functions without a profile, we set the number of samples 107 // to zero. This will keep these functions from appearing in the hot 108 // section. This is a little weird because we wouldn't be trying to 109 // create a node for a function unless it was the target of a call from 110 // a hot block. The alternative would be to set the count to one or 111 // accumulate the number of calls from the callsite into the function 112 // samples. Results from perfomance testing seem to favor the zero 113 // count though, so I'm leaving it this way for now. 114 return Cg.addNode(Function, Size, Function->getKnownExecutionCount()); 115 } else { 116 return Id; 117 } 118 }; 119 120 // Add call graph edges. 121 uint64_t NotProcessed = 0; 122 uint64_t TotalCallsites = 0; 123 uint64_t NoProfileCallsites = 0; 124 uint64_t NumFallbacks = 0; 125 uint64_t RecursiveCallsites = 0; 126 for (auto &It : BC.getBinaryFunctions()) { 127 BinaryFunction *Function = &It.second; 128 129 if (Filter(*Function)) { 130 continue; 131 } 132 133 const CallGraph::NodeId SrcId = lookupNode(Function); 134 // Offset of the current basic block from the beginning of the function 135 uint64_t Offset = 0; 136 137 auto recordCall = [&](const MCSymbol *DestSymbol, const uint64_t Count) { 138 if (BinaryFunction *DstFunc = 139 DestSymbol ? BC.getFunctionForSymbol(DestSymbol) : nullptr) { 140 if (DstFunc == Function) { 141 LLVM_DEBUG(dbgs() << "BOLT-INFO: recursive call detected in " 142 << *DstFunc << "\n"); 143 ++RecursiveCallsites; 144 if (IgnoreRecursiveCalls) 145 return false; 146 } 147 if (Filter(*DstFunc)) { 148 return false; 149 } 150 const CallGraph::NodeId DstId = lookupNode(DstFunc); 151 const bool IsValidCount = Count != COUNT_NO_PROFILE; 152 const uint64_t AdjCount = UseEdgeCounts && IsValidCount ? Count : 1; 153 if (!IsValidCount) 154 ++NoProfileCallsites; 155 Cg.incArcWeight(SrcId, DstId, AdjCount, Offset); 156 LLVM_DEBUG(if (opts::Verbosity > 1) { 157 dbgs() << "BOLT-DEBUG: buildCallGraph: call " << *Function << " -> " 158 << *DstFunc << " @ " << Offset << "\n"; 159 }); 160 return true; 161 } 162 163 return false; 164 }; 165 166 // Pairs of (symbol, count) for each target at this callsite. 167 using TargetDesc = std::pair<const MCSymbol *, uint64_t>; 168 using CallInfoTy = std::vector<TargetDesc>; 169 170 // Get pairs of (symbol, count) for each target at this callsite. 171 // If the call is to an unknown function the symbol will be nullptr. 172 // If there is no profiling data the count will be COUNT_NO_PROFILE. 173 auto getCallInfo = [&](const BinaryBasicBlock *BB, const MCInst &Inst) { 174 CallInfoTy Counts; 175 const MCSymbol *DstSym = BC.MIB->getTargetSymbol(Inst); 176 177 // If this is an indirect call use perf data directly. 178 if (!DstSym && BC.MIB->hasAnnotation(Inst, "CallProfile")) { 179 const auto &ICSP = BC.MIB->getAnnotationAs<IndirectCallSiteProfile>( 180 Inst, "CallProfile"); 181 for (const IndirectCallProfile &CSI : ICSP) { 182 if (CSI.Symbol) 183 Counts.emplace_back(CSI.Symbol, CSI.Count); 184 } 185 } else { 186 const uint64_t Count = BB->getExecutionCount(); 187 Counts.emplace_back(DstSym, Count); 188 } 189 190 return Counts; 191 }; 192 193 // If the function has an invalid profile, try to use the perf data 194 // directly (if requested). If there is no perf data for this function, 195 // fall back to the CFG walker which attempts to handle missing data. 196 if (!Function->hasValidProfile() && CgFromPerfData && 197 !Function->getAllCallSites().empty()) { 198 LLVM_DEBUG( 199 dbgs() << "BOLT-DEBUG: buildCallGraph: Falling back to perf data" 200 << " for " << *Function << "\n"); 201 ++NumFallbacks; 202 const size_t Size = functionSize(Function); 203 for (const IndirectCallProfile &CSI : Function->getAllCallSites()) { 204 ++TotalCallsites; 205 206 if (!CSI.Symbol) 207 continue; 208 209 // The computed offset may exceed the hot part of the function; hence, 210 // bound it by the size. 211 Offset = CSI.Offset; 212 if (Offset > Size) 213 Offset = Size; 214 215 if (!recordCall(CSI.Symbol, CSI.Count)) { 216 ++NotProcessed; 217 } 218 } 219 } else { 220 for (BinaryBasicBlock *BB : Function->layout()) { 221 // Don't count calls from cold blocks unless requested. 222 if (BB->isCold() && !IncludeColdCalls) 223 continue; 224 225 // Determine whether the block is included in Function's (hot) size 226 // See BinaryFunction::estimateHotSize 227 bool BBIncludedInFunctionSize = false; 228 if (UseFunctionHotSize && Function->isSplit()) { 229 if (UseSplitHotSize) 230 BBIncludedInFunctionSize = !BB->isCold(); 231 else 232 BBIncludedInFunctionSize = BB->getKnownExecutionCount() != 0; 233 } else { 234 BBIncludedInFunctionSize = true; 235 } 236 237 for (MCInst &Inst : *BB) { 238 // Find call instructions and extract target symbols from each one. 239 if (BC.MIB->isCall(Inst)) { 240 const CallInfoTy CallInfo = getCallInfo(BB, Inst); 241 242 if (!CallInfo.empty()) { 243 for (const TargetDesc &CI : CallInfo) { 244 ++TotalCallsites; 245 if (!recordCall(CI.first, CI.second)) 246 ++NotProcessed; 247 } 248 } else { 249 ++TotalCallsites; 250 ++NotProcessed; 251 } 252 } 253 // Increase Offset if needed 254 if (BBIncludedInFunctionSize) { 255 Offset += BC.computeCodeSize(&Inst, &Inst + 1); 256 } 257 } 258 } 259 } 260 } 261 262 #ifndef NDEBUG 263 bool PrintInfo = DebugFlag && isCurrentDebugType("callgraph"); 264 #else 265 bool PrintInfo = false; 266 #endif 267 if (PrintInfo || opts::Verbosity > 0) { 268 outs() << format("BOLT-INFO: buildCallGraph: %u nodes, %u callsites " 269 "(%u recursive), density = %.6lf, %u callsites not " 270 "processed, %u callsites with invalid profile, " 271 "used perf data for %u stale functions.\n", 272 Cg.numNodes(), TotalCallsites, RecursiveCallsites, 273 Cg.density(), NotProcessed, NoProfileCallsites, 274 NumFallbacks); 275 } 276 277 return Cg; 278 } 279 280 } // namespace bolt 281 } // namespace llvm 282