1 //===- LazyCallGraph.cpp - Analysis of a Module's call graph --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/LazyCallGraph.h" 10 #include "llvm/ADT/ArrayRef.h" 11 #include "llvm/ADT/STLExtras.h" 12 #include "llvm/ADT/ScopeExit.h" 13 #include "llvm/ADT/Sequence.h" 14 #include "llvm/ADT/SmallPtrSet.h" 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/ADT/iterator_range.h" 17 #include "llvm/Analysis/TargetLibraryInfo.h" 18 #include "llvm/Config/llvm-config.h" 19 #include "llvm/IR/CallSite.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/GlobalVariable.h" 22 #include "llvm/IR/Instruction.h" 23 #include "llvm/IR/Module.h" 24 #include "llvm/IR/PassManager.h" 25 #include "llvm/Support/Casting.h" 26 #include "llvm/Support/Compiler.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/GraphWriter.h" 29 #include "llvm/Support/raw_ostream.h" 30 #include <algorithm> 31 #include <cassert> 32 #include <cstddef> 33 #include <iterator> 34 #include <string> 35 #include <tuple> 36 #include <utility> 37 38 using namespace llvm; 39 40 #define DEBUG_TYPE "lcg" 41 42 void LazyCallGraph::EdgeSequence::insertEdgeInternal(Node &TargetN, 43 Edge::Kind EK) { 44 EdgeIndexMap.insert({&TargetN, Edges.size()}); 45 Edges.emplace_back(TargetN, EK); 46 } 47 48 void LazyCallGraph::EdgeSequence::setEdgeKind(Node &TargetN, Edge::Kind EK) { 49 Edges[EdgeIndexMap.find(&TargetN)->second].setKind(EK); 50 } 51 52 bool LazyCallGraph::EdgeSequence::removeEdgeInternal(Node &TargetN) { 53 auto IndexMapI = EdgeIndexMap.find(&TargetN); 54 if (IndexMapI == EdgeIndexMap.end()) 55 return false; 56 57 Edges[IndexMapI->second] = Edge(); 58 EdgeIndexMap.erase(IndexMapI); 59 return true; 60 } 61 62 static void addEdge(SmallVectorImpl<LazyCallGraph::Edge> &Edges, 63 DenseMap<LazyCallGraph::Node *, int> &EdgeIndexMap, 64 LazyCallGraph::Node &N, LazyCallGraph::Edge::Kind EK) { 65 if (!EdgeIndexMap.insert({&N, Edges.size()}).second) 66 return; 67 68 LLVM_DEBUG(dbgs() << " Added callable function: " << N.getName() << "\n"); 69 Edges.emplace_back(LazyCallGraph::Edge(N, EK)); 70 } 71 72 LazyCallGraph::EdgeSequence &LazyCallGraph::Node::populateSlow() { 73 assert(!Edges && "Must not have already populated the edges for this node!"); 74 75 LLVM_DEBUG(dbgs() << " Adding functions called by '" << getName() 76 << "' to the graph.\n"); 77 78 Edges = EdgeSequence(); 79 80 SmallVector<Constant *, 16> Worklist; 81 SmallPtrSet<Function *, 4> Callees; 82 SmallPtrSet<Constant *, 16> Visited; 83 84 // Find all the potential call graph edges in this function. We track both 85 // actual call edges and indirect references to functions. The direct calls 86 // are trivially added, but to accumulate the latter we walk the instructions 87 // and add every operand which is a constant to the worklist to process 88 // afterward. 89 // 90 // Note that we consider *any* function with a definition to be a viable 91 // edge. Even if the function's definition is subject to replacement by 92 // some other module (say, a weak definition) there may still be 93 // optimizations which essentially speculate based on the definition and 94 // a way to check that the specific definition is in fact the one being 95 // used. For example, this could be done by moving the weak definition to 96 // a strong (internal) definition and making the weak definition be an 97 // alias. Then a test of the address of the weak function against the new 98 // strong definition's address would be an effective way to determine the 99 // safety of optimizing a direct call edge. 100 for (BasicBlock &BB : *F) 101 for (Instruction &I : BB) { 102 if (auto CS = CallSite(&I)) 103 if (Function *Callee = CS.getCalledFunction()) 104 if (!Callee->isDeclaration()) 105 if (Callees.insert(Callee).second) { 106 Visited.insert(Callee); 107 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*Callee), 108 LazyCallGraph::Edge::Call); 109 } 110 111 for (Value *Op : I.operand_values()) 112 if (Constant *C = dyn_cast<Constant>(Op)) 113 if (Visited.insert(C).second) 114 Worklist.push_back(C); 115 } 116 117 // We've collected all the constant (and thus potentially function or 118 // function containing) operands to all of the instructions in the function. 119 // Process them (recursively) collecting every function found. 120 visitReferences(Worklist, Visited, [&](Function &F) { 121 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(F), 122 LazyCallGraph::Edge::Ref); 123 }); 124 125 // Add implicit reference edges to any defined libcall functions (if we 126 // haven't found an explicit edge). 127 for (auto *F : G->LibFunctions) 128 if (!Visited.count(F)) 129 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*F), 130 LazyCallGraph::Edge::Ref); 131 132 return *Edges; 133 } 134 135 void LazyCallGraph::Node::replaceFunction(Function &NewF) { 136 assert(F != &NewF && "Must not replace a function with itself!"); 137 F = &NewF; 138 } 139 140 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 141 LLVM_DUMP_METHOD void LazyCallGraph::Node::dump() const { 142 dbgs() << *this << '\n'; 143 } 144 #endif 145 146 static bool isKnownLibFunction(Function &F, TargetLibraryInfo &TLI) { 147 LibFunc LF; 148 149 // Either this is a normal library function or a "vectorizable" function. 150 return TLI.getLibFunc(F, LF) || TLI.isFunctionVectorizable(F.getName()); 151 } 152 153 LazyCallGraph::LazyCallGraph(Module &M, TargetLibraryInfo &TLI) { 154 LLVM_DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier() 155 << "\n"); 156 for (Function &F : M) { 157 if (F.isDeclaration()) 158 continue; 159 // If this function is a known lib function to LLVM then we want to 160 // synthesize reference edges to it to model the fact that LLVM can turn 161 // arbitrary code into a library function call. 162 if (isKnownLibFunction(F, TLI)) 163 LibFunctions.insert(&F); 164 165 if (F.hasLocalLinkage()) 166 continue; 167 168 // External linkage defined functions have edges to them from other 169 // modules. 170 LLVM_DEBUG(dbgs() << " Adding '" << F.getName() 171 << "' to entry set of the graph.\n"); 172 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), Edge::Ref); 173 } 174 175 // Now add entry nodes for functions reachable via initializers to globals. 176 SmallVector<Constant *, 16> Worklist; 177 SmallPtrSet<Constant *, 16> Visited; 178 for (GlobalVariable &GV : M.globals()) 179 if (GV.hasInitializer()) 180 if (Visited.insert(GV.getInitializer()).second) 181 Worklist.push_back(GV.getInitializer()); 182 183 LLVM_DEBUG( 184 dbgs() << " Adding functions referenced by global initializers to the " 185 "entry set.\n"); 186 visitReferences(Worklist, Visited, [&](Function &F) { 187 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), 188 LazyCallGraph::Edge::Ref); 189 }); 190 } 191 192 LazyCallGraph::LazyCallGraph(LazyCallGraph &&G) 193 : BPA(std::move(G.BPA)), NodeMap(std::move(G.NodeMap)), 194 EntryEdges(std::move(G.EntryEdges)), SCCBPA(std::move(G.SCCBPA)), 195 SCCMap(std::move(G.SCCMap)), 196 LibFunctions(std::move(G.LibFunctions)) { 197 updateGraphPtrs(); 198 } 199 200 LazyCallGraph &LazyCallGraph::operator=(LazyCallGraph &&G) { 201 BPA = std::move(G.BPA); 202 NodeMap = std::move(G.NodeMap); 203 EntryEdges = std::move(G.EntryEdges); 204 SCCBPA = std::move(G.SCCBPA); 205 SCCMap = std::move(G.SCCMap); 206 LibFunctions = std::move(G.LibFunctions); 207 updateGraphPtrs(); 208 return *this; 209 } 210 211 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 212 LLVM_DUMP_METHOD void LazyCallGraph::SCC::dump() const { 213 dbgs() << *this << '\n'; 214 } 215 #endif 216 217 #ifndef NDEBUG 218 void LazyCallGraph::SCC::verify() { 219 assert(OuterRefSCC && "Can't have a null RefSCC!"); 220 assert(!Nodes.empty() && "Can't have an empty SCC!"); 221 222 for (Node *N : Nodes) { 223 assert(N && "Can't have a null node!"); 224 assert(OuterRefSCC->G->lookupSCC(*N) == this && 225 "Node does not map to this SCC!"); 226 assert(N->DFSNumber == -1 && 227 "Must set DFS numbers to -1 when adding a node to an SCC!"); 228 assert(N->LowLink == -1 && 229 "Must set low link to -1 when adding a node to an SCC!"); 230 for (Edge &E : **N) 231 assert(E.getNode().isPopulated() && "Can't have an unpopulated node!"); 232 } 233 } 234 #endif 235 236 bool LazyCallGraph::SCC::isParentOf(const SCC &C) const { 237 if (this == &C) 238 return false; 239 240 for (Node &N : *this) 241 for (Edge &E : N->calls()) 242 if (OuterRefSCC->G->lookupSCC(E.getNode()) == &C) 243 return true; 244 245 // No edges found. 246 return false; 247 } 248 249 bool LazyCallGraph::SCC::isAncestorOf(const SCC &TargetC) const { 250 if (this == &TargetC) 251 return false; 252 253 LazyCallGraph &G = *OuterRefSCC->G; 254 255 // Start with this SCC. 256 SmallPtrSet<const SCC *, 16> Visited = {this}; 257 SmallVector<const SCC *, 16> Worklist = {this}; 258 259 // Walk down the graph until we run out of edges or find a path to TargetC. 260 do { 261 const SCC &C = *Worklist.pop_back_val(); 262 for (Node &N : C) 263 for (Edge &E : N->calls()) { 264 SCC *CalleeC = G.lookupSCC(E.getNode()); 265 if (!CalleeC) 266 continue; 267 268 // If the callee's SCC is the TargetC, we're done. 269 if (CalleeC == &TargetC) 270 return true; 271 272 // If this is the first time we've reached this SCC, put it on the 273 // worklist to recurse through. 274 if (Visited.insert(CalleeC).second) 275 Worklist.push_back(CalleeC); 276 } 277 } while (!Worklist.empty()); 278 279 // No paths found. 280 return false; 281 } 282 283 LazyCallGraph::RefSCC::RefSCC(LazyCallGraph &G) : G(&G) {} 284 285 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 286 LLVM_DUMP_METHOD void LazyCallGraph::RefSCC::dump() const { 287 dbgs() << *this << '\n'; 288 } 289 #endif 290 291 #ifndef NDEBUG 292 void LazyCallGraph::RefSCC::verify() { 293 assert(G && "Can't have a null graph!"); 294 assert(!SCCs.empty() && "Can't have an empty SCC!"); 295 296 // Verify basic properties of the SCCs. 297 SmallPtrSet<SCC *, 4> SCCSet; 298 for (SCC *C : SCCs) { 299 assert(C && "Can't have a null SCC!"); 300 C->verify(); 301 assert(&C->getOuterRefSCC() == this && 302 "SCC doesn't think it is inside this RefSCC!"); 303 bool Inserted = SCCSet.insert(C).second; 304 assert(Inserted && "Found a duplicate SCC!"); 305 auto IndexIt = SCCIndices.find(C); 306 assert(IndexIt != SCCIndices.end() && 307 "Found an SCC that doesn't have an index!"); 308 } 309 310 // Check that our indices map correctly. 311 for (auto &SCCIndexPair : SCCIndices) { 312 SCC *C = SCCIndexPair.first; 313 int i = SCCIndexPair.second; 314 assert(C && "Can't have a null SCC in the indices!"); 315 assert(SCCSet.count(C) && "Found an index for an SCC not in the RefSCC!"); 316 assert(SCCs[i] == C && "Index doesn't point to SCC!"); 317 } 318 319 // Check that the SCCs are in fact in post-order. 320 for (int i = 0, Size = SCCs.size(); i < Size; ++i) { 321 SCC &SourceSCC = *SCCs[i]; 322 for (Node &N : SourceSCC) 323 for (Edge &E : *N) { 324 if (!E.isCall()) 325 continue; 326 SCC &TargetSCC = *G->lookupSCC(E.getNode()); 327 if (&TargetSCC.getOuterRefSCC() == this) { 328 assert(SCCIndices.find(&TargetSCC)->second <= i && 329 "Edge between SCCs violates post-order relationship."); 330 continue; 331 } 332 } 333 } 334 } 335 #endif 336 337 bool LazyCallGraph::RefSCC::isParentOf(const RefSCC &RC) const { 338 if (&RC == this) 339 return false; 340 341 // Search all edges to see if this is a parent. 342 for (SCC &C : *this) 343 for (Node &N : C) 344 for (Edge &E : *N) 345 if (G->lookupRefSCC(E.getNode()) == &RC) 346 return true; 347 348 return false; 349 } 350 351 bool LazyCallGraph::RefSCC::isAncestorOf(const RefSCC &RC) const { 352 if (&RC == this) 353 return false; 354 355 // For each descendant of this RefSCC, see if one of its children is the 356 // argument. If not, add that descendant to the worklist and continue 357 // searching. 358 SmallVector<const RefSCC *, 4> Worklist; 359 SmallPtrSet<const RefSCC *, 4> Visited; 360 Worklist.push_back(this); 361 Visited.insert(this); 362 do { 363 const RefSCC &DescendantRC = *Worklist.pop_back_val(); 364 for (SCC &C : DescendantRC) 365 for (Node &N : C) 366 for (Edge &E : *N) { 367 auto *ChildRC = G->lookupRefSCC(E.getNode()); 368 if (ChildRC == &RC) 369 return true; 370 if (!ChildRC || !Visited.insert(ChildRC).second) 371 continue; 372 Worklist.push_back(ChildRC); 373 } 374 } while (!Worklist.empty()); 375 376 return false; 377 } 378 379 /// Generic helper that updates a postorder sequence of SCCs for a potentially 380 /// cycle-introducing edge insertion. 381 /// 382 /// A postorder sequence of SCCs of a directed graph has one fundamental 383 /// property: all deges in the DAG of SCCs point "up" the sequence. That is, 384 /// all edges in the SCC DAG point to prior SCCs in the sequence. 385 /// 386 /// This routine both updates a postorder sequence and uses that sequence to 387 /// compute the set of SCCs connected into a cycle. It should only be called to 388 /// insert a "downward" edge which will require changing the sequence to 389 /// restore it to a postorder. 390 /// 391 /// When inserting an edge from an earlier SCC to a later SCC in some postorder 392 /// sequence, all of the SCCs which may be impacted are in the closed range of 393 /// those two within the postorder sequence. The algorithm used here to restore 394 /// the state is as follows: 395 /// 396 /// 1) Starting from the source SCC, construct a set of SCCs which reach the 397 /// source SCC consisting of just the source SCC. Then scan toward the 398 /// target SCC in postorder and for each SCC, if it has an edge to an SCC 399 /// in the set, add it to the set. Otherwise, the source SCC is not 400 /// a successor, move it in the postorder sequence to immediately before 401 /// the source SCC, shifting the source SCC and all SCCs in the set one 402 /// position toward the target SCC. Stop scanning after processing the 403 /// target SCC. 404 /// 2) If the source SCC is now past the target SCC in the postorder sequence, 405 /// and thus the new edge will flow toward the start, we are done. 406 /// 3) Otherwise, starting from the target SCC, walk all edges which reach an 407 /// SCC between the source and the target, and add them to the set of 408 /// connected SCCs, then recurse through them. Once a complete set of the 409 /// SCCs the target connects to is known, hoist the remaining SCCs between 410 /// the source and the target to be above the target. Note that there is no 411 /// need to process the source SCC, it is already known to connect. 412 /// 4) At this point, all of the SCCs in the closed range between the source 413 /// SCC and the target SCC in the postorder sequence are connected, 414 /// including the target SCC and the source SCC. Inserting the edge from 415 /// the source SCC to the target SCC will form a cycle out of precisely 416 /// these SCCs. Thus we can merge all of the SCCs in this closed range into 417 /// a single SCC. 418 /// 419 /// This process has various important properties: 420 /// - Only mutates the SCCs when adding the edge actually changes the SCC 421 /// structure. 422 /// - Never mutates SCCs which are unaffected by the change. 423 /// - Updates the postorder sequence to correctly satisfy the postorder 424 /// constraint after the edge is inserted. 425 /// - Only reorders SCCs in the closed postorder sequence from the source to 426 /// the target, so easy to bound how much has changed even in the ordering. 427 /// - Big-O is the number of edges in the closed postorder range of SCCs from 428 /// source to target. 429 /// 430 /// This helper routine, in addition to updating the postorder sequence itself 431 /// will also update a map from SCCs to indices within that sequence. 432 /// 433 /// The sequence and the map must operate on pointers to the SCC type. 434 /// 435 /// Two callbacks must be provided. The first computes the subset of SCCs in 436 /// the postorder closed range from the source to the target which connect to 437 /// the source SCC via some (transitive) set of edges. The second computes the 438 /// subset of the same range which the target SCC connects to via some 439 /// (transitive) set of edges. Both callbacks should populate the set argument 440 /// provided. 441 template <typename SCCT, typename PostorderSequenceT, typename SCCIndexMapT, 442 typename ComputeSourceConnectedSetCallableT, 443 typename ComputeTargetConnectedSetCallableT> 444 static iterator_range<typename PostorderSequenceT::iterator> 445 updatePostorderSequenceForEdgeInsertion( 446 SCCT &SourceSCC, SCCT &TargetSCC, PostorderSequenceT &SCCs, 447 SCCIndexMapT &SCCIndices, 448 ComputeSourceConnectedSetCallableT ComputeSourceConnectedSet, 449 ComputeTargetConnectedSetCallableT ComputeTargetConnectedSet) { 450 int SourceIdx = SCCIndices[&SourceSCC]; 451 int TargetIdx = SCCIndices[&TargetSCC]; 452 assert(SourceIdx < TargetIdx && "Cannot have equal indices here!"); 453 454 SmallPtrSet<SCCT *, 4> ConnectedSet; 455 456 // Compute the SCCs which (transitively) reach the source. 457 ComputeSourceConnectedSet(ConnectedSet); 458 459 // Partition the SCCs in this part of the port-order sequence so only SCCs 460 // connecting to the source remain between it and the target. This is 461 // a benign partition as it preserves postorder. 462 auto SourceI = std::stable_partition( 463 SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx + 1, 464 [&ConnectedSet](SCCT *C) { return !ConnectedSet.count(C); }); 465 for (int i = SourceIdx, e = TargetIdx + 1; i < e; ++i) 466 SCCIndices.find(SCCs[i])->second = i; 467 468 // If the target doesn't connect to the source, then we've corrected the 469 // post-order and there are no cycles formed. 470 if (!ConnectedSet.count(&TargetSCC)) { 471 assert(SourceI > (SCCs.begin() + SourceIdx) && 472 "Must have moved the source to fix the post-order."); 473 assert(*std::prev(SourceI) == &TargetSCC && 474 "Last SCC to move should have bene the target."); 475 476 // Return an empty range at the target SCC indicating there is nothing to 477 // merge. 478 return make_range(std::prev(SourceI), std::prev(SourceI)); 479 } 480 481 assert(SCCs[TargetIdx] == &TargetSCC && 482 "Should not have moved target if connected!"); 483 SourceIdx = SourceI - SCCs.begin(); 484 assert(SCCs[SourceIdx] == &SourceSCC && 485 "Bad updated index computation for the source SCC!"); 486 487 488 // See whether there are any remaining intervening SCCs between the source 489 // and target. If so we need to make sure they all are reachable form the 490 // target. 491 if (SourceIdx + 1 < TargetIdx) { 492 ConnectedSet.clear(); 493 ComputeTargetConnectedSet(ConnectedSet); 494 495 // Partition SCCs so that only SCCs reached from the target remain between 496 // the source and the target. This preserves postorder. 497 auto TargetI = std::stable_partition( 498 SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1, 499 [&ConnectedSet](SCCT *C) { return ConnectedSet.count(C); }); 500 for (int i = SourceIdx + 1, e = TargetIdx + 1; i < e; ++i) 501 SCCIndices.find(SCCs[i])->second = i; 502 TargetIdx = std::prev(TargetI) - SCCs.begin(); 503 assert(SCCs[TargetIdx] == &TargetSCC && 504 "Should always end with the target!"); 505 } 506 507 // At this point, we know that connecting source to target forms a cycle 508 // because target connects back to source, and we know that all of the SCCs 509 // between the source and target in the postorder sequence participate in that 510 // cycle. 511 return make_range(SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx); 512 } 513 514 bool 515 LazyCallGraph::RefSCC::switchInternalEdgeToCall( 516 Node &SourceN, Node &TargetN, 517 function_ref<void(ArrayRef<SCC *> MergeSCCs)> MergeCB) { 518 assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!"); 519 SmallVector<SCC *, 1> DeletedSCCs; 520 521 #ifndef NDEBUG 522 // In a debug build, verify the RefSCC is valid to start with and when this 523 // routine finishes. 524 verify(); 525 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 526 #endif 527 528 SCC &SourceSCC = *G->lookupSCC(SourceN); 529 SCC &TargetSCC = *G->lookupSCC(TargetN); 530 531 // If the two nodes are already part of the same SCC, we're also done as 532 // we've just added more connectivity. 533 if (&SourceSCC == &TargetSCC) { 534 SourceN->setEdgeKind(TargetN, Edge::Call); 535 return false; // No new cycle. 536 } 537 538 // At this point we leverage the postorder list of SCCs to detect when the 539 // insertion of an edge changes the SCC structure in any way. 540 // 541 // First and foremost, we can eliminate the need for any changes when the 542 // edge is toward the beginning of the postorder sequence because all edges 543 // flow in that direction already. Thus adding a new one cannot form a cycle. 544 int SourceIdx = SCCIndices[&SourceSCC]; 545 int TargetIdx = SCCIndices[&TargetSCC]; 546 if (TargetIdx < SourceIdx) { 547 SourceN->setEdgeKind(TargetN, Edge::Call); 548 return false; // No new cycle. 549 } 550 551 // Compute the SCCs which (transitively) reach the source. 552 auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) { 553 #ifndef NDEBUG 554 // Check that the RefSCC is still valid before computing this as the 555 // results will be nonsensical of we've broken its invariants. 556 verify(); 557 #endif 558 ConnectedSet.insert(&SourceSCC); 559 auto IsConnected = [&](SCC &C) { 560 for (Node &N : C) 561 for (Edge &E : N->calls()) 562 if (ConnectedSet.count(G->lookupSCC(E.getNode()))) 563 return true; 564 565 return false; 566 }; 567 568 for (SCC *C : 569 make_range(SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1)) 570 if (IsConnected(*C)) 571 ConnectedSet.insert(C); 572 }; 573 574 // Use a normal worklist to find which SCCs the target connects to. We still 575 // bound the search based on the range in the postorder list we care about, 576 // but because this is forward connectivity we just "recurse" through the 577 // edges. 578 auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) { 579 #ifndef NDEBUG 580 // Check that the RefSCC is still valid before computing this as the 581 // results will be nonsensical of we've broken its invariants. 582 verify(); 583 #endif 584 ConnectedSet.insert(&TargetSCC); 585 SmallVector<SCC *, 4> Worklist; 586 Worklist.push_back(&TargetSCC); 587 do { 588 SCC &C = *Worklist.pop_back_val(); 589 for (Node &N : C) 590 for (Edge &E : *N) { 591 if (!E.isCall()) 592 continue; 593 SCC &EdgeC = *G->lookupSCC(E.getNode()); 594 if (&EdgeC.getOuterRefSCC() != this) 595 // Not in this RefSCC... 596 continue; 597 if (SCCIndices.find(&EdgeC)->second <= SourceIdx) 598 // Not in the postorder sequence between source and target. 599 continue; 600 601 if (ConnectedSet.insert(&EdgeC).second) 602 Worklist.push_back(&EdgeC); 603 } 604 } while (!Worklist.empty()); 605 }; 606 607 // Use a generic helper to update the postorder sequence of SCCs and return 608 // a range of any SCCs connected into a cycle by inserting this edge. This 609 // routine will also take care of updating the indices into the postorder 610 // sequence. 611 auto MergeRange = updatePostorderSequenceForEdgeInsertion( 612 SourceSCC, TargetSCC, SCCs, SCCIndices, ComputeSourceConnectedSet, 613 ComputeTargetConnectedSet); 614 615 // Run the user's callback on the merged SCCs before we actually merge them. 616 if (MergeCB) 617 MergeCB(makeArrayRef(MergeRange.begin(), MergeRange.end())); 618 619 // If the merge range is empty, then adding the edge didn't actually form any 620 // new cycles. We're done. 621 if (empty(MergeRange)) { 622 // Now that the SCC structure is finalized, flip the kind to call. 623 SourceN->setEdgeKind(TargetN, Edge::Call); 624 return false; // No new cycle. 625 } 626 627 #ifndef NDEBUG 628 // Before merging, check that the RefSCC remains valid after all the 629 // postorder updates. 630 verify(); 631 #endif 632 633 // Otherwise we need to merge all of the SCCs in the cycle into a single 634 // result SCC. 635 // 636 // NB: We merge into the target because all of these functions were already 637 // reachable from the target, meaning any SCC-wide properties deduced about it 638 // other than the set of functions within it will not have changed. 639 for (SCC *C : MergeRange) { 640 assert(C != &TargetSCC && 641 "We merge *into* the target and shouldn't process it here!"); 642 SCCIndices.erase(C); 643 TargetSCC.Nodes.append(C->Nodes.begin(), C->Nodes.end()); 644 for (Node *N : C->Nodes) 645 G->SCCMap[N] = &TargetSCC; 646 C->clear(); 647 DeletedSCCs.push_back(C); 648 } 649 650 // Erase the merged SCCs from the list and update the indices of the 651 // remaining SCCs. 652 int IndexOffset = MergeRange.end() - MergeRange.begin(); 653 auto EraseEnd = SCCs.erase(MergeRange.begin(), MergeRange.end()); 654 for (SCC *C : make_range(EraseEnd, SCCs.end())) 655 SCCIndices[C] -= IndexOffset; 656 657 // Now that the SCC structure is finalized, flip the kind to call. 658 SourceN->setEdgeKind(TargetN, Edge::Call); 659 660 // And we're done, but we did form a new cycle. 661 return true; 662 } 663 664 void LazyCallGraph::RefSCC::switchTrivialInternalEdgeToRef(Node &SourceN, 665 Node &TargetN) { 666 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); 667 668 #ifndef NDEBUG 669 // In a debug build, verify the RefSCC is valid to start with and when this 670 // routine finishes. 671 verify(); 672 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 673 #endif 674 675 assert(G->lookupRefSCC(SourceN) == this && 676 "Source must be in this RefSCC."); 677 assert(G->lookupRefSCC(TargetN) == this && 678 "Target must be in this RefSCC."); 679 assert(G->lookupSCC(SourceN) != G->lookupSCC(TargetN) && 680 "Source and Target must be in separate SCCs for this to be trivial!"); 681 682 // Set the edge kind. 683 SourceN->setEdgeKind(TargetN, Edge::Ref); 684 } 685 686 iterator_range<LazyCallGraph::RefSCC::iterator> 687 LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) { 688 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); 689 690 #ifndef NDEBUG 691 // In a debug build, verify the RefSCC is valid to start with and when this 692 // routine finishes. 693 verify(); 694 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 695 #endif 696 697 assert(G->lookupRefSCC(SourceN) == this && 698 "Source must be in this RefSCC."); 699 assert(G->lookupRefSCC(TargetN) == this && 700 "Target must be in this RefSCC."); 701 702 SCC &TargetSCC = *G->lookupSCC(TargetN); 703 assert(G->lookupSCC(SourceN) == &TargetSCC && "Source and Target must be in " 704 "the same SCC to require the " 705 "full CG update."); 706 707 // Set the edge kind. 708 SourceN->setEdgeKind(TargetN, Edge::Ref); 709 710 // Otherwise we are removing a call edge from a single SCC. This may break 711 // the cycle. In order to compute the new set of SCCs, we need to do a small 712 // DFS over the nodes within the SCC to form any sub-cycles that remain as 713 // distinct SCCs and compute a postorder over the resulting SCCs. 714 // 715 // However, we specially handle the target node. The target node is known to 716 // reach all other nodes in the original SCC by definition. This means that 717 // we want the old SCC to be replaced with an SCC containing that node as it 718 // will be the root of whatever SCC DAG results from the DFS. Assumptions 719 // about an SCC such as the set of functions called will continue to hold, 720 // etc. 721 722 SCC &OldSCC = TargetSCC; 723 SmallVector<std::pair<Node *, EdgeSequence::call_iterator>, 16> DFSStack; 724 SmallVector<Node *, 16> PendingSCCStack; 725 SmallVector<SCC *, 4> NewSCCs; 726 727 // Prepare the nodes for a fresh DFS. 728 SmallVector<Node *, 16> Worklist; 729 Worklist.swap(OldSCC.Nodes); 730 for (Node *N : Worklist) { 731 N->DFSNumber = N->LowLink = 0; 732 G->SCCMap.erase(N); 733 } 734 735 // Force the target node to be in the old SCC. This also enables us to take 736 // a very significant short-cut in the standard Tarjan walk to re-form SCCs 737 // below: whenever we build an edge that reaches the target node, we know 738 // that the target node eventually connects back to all other nodes in our 739 // walk. As a consequence, we can detect and handle participants in that 740 // cycle without walking all the edges that form this connection, and instead 741 // by relying on the fundamental guarantee coming into this operation (all 742 // nodes are reachable from the target due to previously forming an SCC). 743 TargetN.DFSNumber = TargetN.LowLink = -1; 744 OldSCC.Nodes.push_back(&TargetN); 745 G->SCCMap[&TargetN] = &OldSCC; 746 747 // Scan down the stack and DFS across the call edges. 748 for (Node *RootN : Worklist) { 749 assert(DFSStack.empty() && 750 "Cannot begin a new root with a non-empty DFS stack!"); 751 assert(PendingSCCStack.empty() && 752 "Cannot begin a new root with pending nodes for an SCC!"); 753 754 // Skip any nodes we've already reached in the DFS. 755 if (RootN->DFSNumber != 0) { 756 assert(RootN->DFSNumber == -1 && 757 "Shouldn't have any mid-DFS root nodes!"); 758 continue; 759 } 760 761 RootN->DFSNumber = RootN->LowLink = 1; 762 int NextDFSNumber = 2; 763 764 DFSStack.push_back({RootN, (*RootN)->call_begin()}); 765 do { 766 Node *N; 767 EdgeSequence::call_iterator I; 768 std::tie(N, I) = DFSStack.pop_back_val(); 769 auto E = (*N)->call_end(); 770 while (I != E) { 771 Node &ChildN = I->getNode(); 772 if (ChildN.DFSNumber == 0) { 773 // We haven't yet visited this child, so descend, pushing the current 774 // node onto the stack. 775 DFSStack.push_back({N, I}); 776 777 assert(!G->SCCMap.count(&ChildN) && 778 "Found a node with 0 DFS number but already in an SCC!"); 779 ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++; 780 N = &ChildN; 781 I = (*N)->call_begin(); 782 E = (*N)->call_end(); 783 continue; 784 } 785 786 // Check for the child already being part of some component. 787 if (ChildN.DFSNumber == -1) { 788 if (G->lookupSCC(ChildN) == &OldSCC) { 789 // If the child is part of the old SCC, we know that it can reach 790 // every other node, so we have formed a cycle. Pull the entire DFS 791 // and pending stacks into it. See the comment above about setting 792 // up the old SCC for why we do this. 793 int OldSize = OldSCC.size(); 794 OldSCC.Nodes.push_back(N); 795 OldSCC.Nodes.append(PendingSCCStack.begin(), PendingSCCStack.end()); 796 PendingSCCStack.clear(); 797 while (!DFSStack.empty()) 798 OldSCC.Nodes.push_back(DFSStack.pop_back_val().first); 799 for (Node &N : make_range(OldSCC.begin() + OldSize, OldSCC.end())) { 800 N.DFSNumber = N.LowLink = -1; 801 G->SCCMap[&N] = &OldSCC; 802 } 803 N = nullptr; 804 break; 805 } 806 807 // If the child has already been added to some child component, it 808 // couldn't impact the low-link of this parent because it isn't 809 // connected, and thus its low-link isn't relevant so skip it. 810 ++I; 811 continue; 812 } 813 814 // Track the lowest linked child as the lowest link for this node. 815 assert(ChildN.LowLink > 0 && "Must have a positive low-link number!"); 816 if (ChildN.LowLink < N->LowLink) 817 N->LowLink = ChildN.LowLink; 818 819 // Move to the next edge. 820 ++I; 821 } 822 if (!N) 823 // Cleared the DFS early, start another round. 824 break; 825 826 // We've finished processing N and its descendants, put it on our pending 827 // SCC stack to eventually get merged into an SCC of nodes. 828 PendingSCCStack.push_back(N); 829 830 // If this node is linked to some lower entry, continue walking up the 831 // stack. 832 if (N->LowLink != N->DFSNumber) 833 continue; 834 835 // Otherwise, we've completed an SCC. Append it to our post order list of 836 // SCCs. 837 int RootDFSNumber = N->DFSNumber; 838 // Find the range of the node stack by walking down until we pass the 839 // root DFS number. 840 auto SCCNodes = make_range( 841 PendingSCCStack.rbegin(), 842 find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) { 843 return N->DFSNumber < RootDFSNumber; 844 })); 845 846 // Form a new SCC out of these nodes and then clear them off our pending 847 // stack. 848 NewSCCs.push_back(G->createSCC(*this, SCCNodes)); 849 for (Node &N : *NewSCCs.back()) { 850 N.DFSNumber = N.LowLink = -1; 851 G->SCCMap[&N] = NewSCCs.back(); 852 } 853 PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end()); 854 } while (!DFSStack.empty()); 855 } 856 857 // Insert the remaining SCCs before the old one. The old SCC can reach all 858 // other SCCs we form because it contains the target node of the removed edge 859 // of the old SCC. This means that we will have edges into all of the new 860 // SCCs, which means the old one must come last for postorder. 861 int OldIdx = SCCIndices[&OldSCC]; 862 SCCs.insert(SCCs.begin() + OldIdx, NewSCCs.begin(), NewSCCs.end()); 863 864 // Update the mapping from SCC* to index to use the new SCC*s, and remove the 865 // old SCC from the mapping. 866 for (int Idx = OldIdx, Size = SCCs.size(); Idx < Size; ++Idx) 867 SCCIndices[SCCs[Idx]] = Idx; 868 869 return make_range(SCCs.begin() + OldIdx, 870 SCCs.begin() + OldIdx + NewSCCs.size()); 871 } 872 873 void LazyCallGraph::RefSCC::switchOutgoingEdgeToCall(Node &SourceN, 874 Node &TargetN) { 875 assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!"); 876 877 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 878 assert(G->lookupRefSCC(TargetN) != this && 879 "Target must not be in this RefSCC."); 880 #ifdef EXPENSIVE_CHECKS 881 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) && 882 "Target must be a descendant of the Source."); 883 #endif 884 885 // Edges between RefSCCs are the same regardless of call or ref, so we can 886 // just flip the edge here. 887 SourceN->setEdgeKind(TargetN, Edge::Call); 888 889 #ifndef NDEBUG 890 // Check that the RefSCC is still valid. 891 verify(); 892 #endif 893 } 894 895 void LazyCallGraph::RefSCC::switchOutgoingEdgeToRef(Node &SourceN, 896 Node &TargetN) { 897 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); 898 899 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 900 assert(G->lookupRefSCC(TargetN) != this && 901 "Target must not be in this RefSCC."); 902 #ifdef EXPENSIVE_CHECKS 903 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) && 904 "Target must be a descendant of the Source."); 905 #endif 906 907 // Edges between RefSCCs are the same regardless of call or ref, so we can 908 // just flip the edge here. 909 SourceN->setEdgeKind(TargetN, Edge::Ref); 910 911 #ifndef NDEBUG 912 // Check that the RefSCC is still valid. 913 verify(); 914 #endif 915 } 916 917 void LazyCallGraph::RefSCC::insertInternalRefEdge(Node &SourceN, 918 Node &TargetN) { 919 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 920 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); 921 922 SourceN->insertEdgeInternal(TargetN, Edge::Ref); 923 924 #ifndef NDEBUG 925 // Check that the RefSCC is still valid. 926 verify(); 927 #endif 928 } 929 930 void LazyCallGraph::RefSCC::insertOutgoingEdge(Node &SourceN, Node &TargetN, 931 Edge::Kind EK) { 932 // First insert it into the caller. 933 SourceN->insertEdgeInternal(TargetN, EK); 934 935 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 936 937 assert(G->lookupRefSCC(TargetN) != this && 938 "Target must not be in this RefSCC."); 939 #ifdef EXPENSIVE_CHECKS 940 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) && 941 "Target must be a descendant of the Source."); 942 #endif 943 944 #ifndef NDEBUG 945 // Check that the RefSCC is still valid. 946 verify(); 947 #endif 948 } 949 950 SmallVector<LazyCallGraph::RefSCC *, 1> 951 LazyCallGraph::RefSCC::insertIncomingRefEdge(Node &SourceN, Node &TargetN) { 952 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); 953 RefSCC &SourceC = *G->lookupRefSCC(SourceN); 954 assert(&SourceC != this && "Source must not be in this RefSCC."); 955 #ifdef EXPENSIVE_CHECKS 956 assert(SourceC.isDescendantOf(*this) && 957 "Source must be a descendant of the Target."); 958 #endif 959 960 SmallVector<RefSCC *, 1> DeletedRefSCCs; 961 962 #ifndef NDEBUG 963 // In a debug build, verify the RefSCC is valid to start with and when this 964 // routine finishes. 965 verify(); 966 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 967 #endif 968 969 int SourceIdx = G->RefSCCIndices[&SourceC]; 970 int TargetIdx = G->RefSCCIndices[this]; 971 assert(SourceIdx < TargetIdx && 972 "Postorder list doesn't see edge as incoming!"); 973 974 // Compute the RefSCCs which (transitively) reach the source. We do this by 975 // working backwards from the source using the parent set in each RefSCC, 976 // skipping any RefSCCs that don't fall in the postorder range. This has the 977 // advantage of walking the sparser parent edge (in high fan-out graphs) but 978 // more importantly this removes examining all forward edges in all RefSCCs 979 // within the postorder range which aren't in fact connected. Only connected 980 // RefSCCs (and their edges) are visited here. 981 auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) { 982 Set.insert(&SourceC); 983 auto IsConnected = [&](RefSCC &RC) { 984 for (SCC &C : RC) 985 for (Node &N : C) 986 for (Edge &E : *N) 987 if (Set.count(G->lookupRefSCC(E.getNode()))) 988 return true; 989 990 return false; 991 }; 992 993 for (RefSCC *C : make_range(G->PostOrderRefSCCs.begin() + SourceIdx + 1, 994 G->PostOrderRefSCCs.begin() + TargetIdx + 1)) 995 if (IsConnected(*C)) 996 Set.insert(C); 997 }; 998 999 // Use a normal worklist to find which SCCs the target connects to. We still 1000 // bound the search based on the range in the postorder list we care about, 1001 // but because this is forward connectivity we just "recurse" through the 1002 // edges. 1003 auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) { 1004 Set.insert(this); 1005 SmallVector<RefSCC *, 4> Worklist; 1006 Worklist.push_back(this); 1007 do { 1008 RefSCC &RC = *Worklist.pop_back_val(); 1009 for (SCC &C : RC) 1010 for (Node &N : C) 1011 for (Edge &E : *N) { 1012 RefSCC &EdgeRC = *G->lookupRefSCC(E.getNode()); 1013 if (G->getRefSCCIndex(EdgeRC) <= SourceIdx) 1014 // Not in the postorder sequence between source and target. 1015 continue; 1016 1017 if (Set.insert(&EdgeRC).second) 1018 Worklist.push_back(&EdgeRC); 1019 } 1020 } while (!Worklist.empty()); 1021 }; 1022 1023 // Use a generic helper to update the postorder sequence of RefSCCs and return 1024 // a range of any RefSCCs connected into a cycle by inserting this edge. This 1025 // routine will also take care of updating the indices into the postorder 1026 // sequence. 1027 iterator_range<SmallVectorImpl<RefSCC *>::iterator> MergeRange = 1028 updatePostorderSequenceForEdgeInsertion( 1029 SourceC, *this, G->PostOrderRefSCCs, G->RefSCCIndices, 1030 ComputeSourceConnectedSet, ComputeTargetConnectedSet); 1031 1032 // Build a set so we can do fast tests for whether a RefSCC will end up as 1033 // part of the merged RefSCC. 1034 SmallPtrSet<RefSCC *, 16> MergeSet(MergeRange.begin(), MergeRange.end()); 1035 1036 // This RefSCC will always be part of that set, so just insert it here. 1037 MergeSet.insert(this); 1038 1039 // Now that we have identified all of the SCCs which need to be merged into 1040 // a connected set with the inserted edge, merge all of them into this SCC. 1041 SmallVector<SCC *, 16> MergedSCCs; 1042 int SCCIndex = 0; 1043 for (RefSCC *RC : MergeRange) { 1044 assert(RC != this && "We're merging into the target RefSCC, so it " 1045 "shouldn't be in the range."); 1046 1047 // Walk the inner SCCs to update their up-pointer and walk all the edges to 1048 // update any parent sets. 1049 // FIXME: We should try to find a way to avoid this (rather expensive) edge 1050 // walk by updating the parent sets in some other manner. 1051 for (SCC &InnerC : *RC) { 1052 InnerC.OuterRefSCC = this; 1053 SCCIndices[&InnerC] = SCCIndex++; 1054 for (Node &N : InnerC) 1055 G->SCCMap[&N] = &InnerC; 1056 } 1057 1058 // Now merge in the SCCs. We can actually move here so try to reuse storage 1059 // the first time through. 1060 if (MergedSCCs.empty()) 1061 MergedSCCs = std::move(RC->SCCs); 1062 else 1063 MergedSCCs.append(RC->SCCs.begin(), RC->SCCs.end()); 1064 RC->SCCs.clear(); 1065 DeletedRefSCCs.push_back(RC); 1066 } 1067 1068 // Append our original SCCs to the merged list and move it into place. 1069 for (SCC &InnerC : *this) 1070 SCCIndices[&InnerC] = SCCIndex++; 1071 MergedSCCs.append(SCCs.begin(), SCCs.end()); 1072 SCCs = std::move(MergedSCCs); 1073 1074 // Remove the merged away RefSCCs from the post order sequence. 1075 for (RefSCC *RC : MergeRange) 1076 G->RefSCCIndices.erase(RC); 1077 int IndexOffset = MergeRange.end() - MergeRange.begin(); 1078 auto EraseEnd = 1079 G->PostOrderRefSCCs.erase(MergeRange.begin(), MergeRange.end()); 1080 for (RefSCC *RC : make_range(EraseEnd, G->PostOrderRefSCCs.end())) 1081 G->RefSCCIndices[RC] -= IndexOffset; 1082 1083 // At this point we have a merged RefSCC with a post-order SCCs list, just 1084 // connect the nodes to form the new edge. 1085 SourceN->insertEdgeInternal(TargetN, Edge::Ref); 1086 1087 // We return the list of SCCs which were merged so that callers can 1088 // invalidate any data they have associated with those SCCs. Note that these 1089 // SCCs are no longer in an interesting state (they are totally empty) but 1090 // the pointers will remain stable for the life of the graph itself. 1091 return DeletedRefSCCs; 1092 } 1093 1094 void LazyCallGraph::RefSCC::removeOutgoingEdge(Node &SourceN, Node &TargetN) { 1095 assert(G->lookupRefSCC(SourceN) == this && 1096 "The source must be a member of this RefSCC."); 1097 assert(G->lookupRefSCC(TargetN) != this && 1098 "The target must not be a member of this RefSCC"); 1099 1100 #ifndef NDEBUG 1101 // In a debug build, verify the RefSCC is valid to start with and when this 1102 // routine finishes. 1103 verify(); 1104 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 1105 #endif 1106 1107 // First remove it from the node. 1108 bool Removed = SourceN->removeEdgeInternal(TargetN); 1109 (void)Removed; 1110 assert(Removed && "Target not in the edge set for this caller?"); 1111 } 1112 1113 SmallVector<LazyCallGraph::RefSCC *, 1> 1114 LazyCallGraph::RefSCC::removeInternalRefEdge(Node &SourceN, 1115 ArrayRef<Node *> TargetNs) { 1116 // We return a list of the resulting *new* RefSCCs in post-order. 1117 SmallVector<RefSCC *, 1> Result; 1118 1119 #ifndef NDEBUG 1120 // In a debug build, verify the RefSCC is valid to start with and that either 1121 // we return an empty list of result RefSCCs and this RefSCC remains valid, 1122 // or we return new RefSCCs and this RefSCC is dead. 1123 verify(); 1124 auto VerifyOnExit = make_scope_exit([&]() { 1125 // If we didn't replace our RefSCC with new ones, check that this one 1126 // remains valid. 1127 if (G) 1128 verify(); 1129 }); 1130 #endif 1131 1132 // First remove the actual edges. 1133 for (Node *TargetN : TargetNs) { 1134 assert(!(*SourceN)[*TargetN].isCall() && 1135 "Cannot remove a call edge, it must first be made a ref edge"); 1136 1137 bool Removed = SourceN->removeEdgeInternal(*TargetN); 1138 (void)Removed; 1139 assert(Removed && "Target not in the edge set for this caller?"); 1140 } 1141 1142 // Direct self references don't impact the ref graph at all. 1143 if (llvm::all_of(TargetNs, 1144 [&](Node *TargetN) { return &SourceN == TargetN; })) 1145 return Result; 1146 1147 // If all targets are in the same SCC as the source, because no call edges 1148 // were removed there is no RefSCC structure change. 1149 SCC &SourceC = *G->lookupSCC(SourceN); 1150 if (llvm::all_of(TargetNs, [&](Node *TargetN) { 1151 return G->lookupSCC(*TargetN) == &SourceC; 1152 })) 1153 return Result; 1154 1155 // We build somewhat synthetic new RefSCCs by providing a postorder mapping 1156 // for each inner SCC. We store these inside the low-link field of the nodes 1157 // rather than associated with SCCs because this saves a round-trip through 1158 // the node->SCC map and in the common case, SCCs are small. We will verify 1159 // that we always give the same number to every node in the SCC such that 1160 // these are equivalent. 1161 int PostOrderNumber = 0; 1162 1163 // Reset all the other nodes to prepare for a DFS over them, and add them to 1164 // our worklist. 1165 SmallVector<Node *, 8> Worklist; 1166 for (SCC *C : SCCs) { 1167 for (Node &N : *C) 1168 N.DFSNumber = N.LowLink = 0; 1169 1170 Worklist.append(C->Nodes.begin(), C->Nodes.end()); 1171 } 1172 1173 // Track the number of nodes in this RefSCC so that we can quickly recognize 1174 // an important special case of the edge removal not breaking the cycle of 1175 // this RefSCC. 1176 const int NumRefSCCNodes = Worklist.size(); 1177 1178 SmallVector<std::pair<Node *, EdgeSequence::iterator>, 4> DFSStack; 1179 SmallVector<Node *, 4> PendingRefSCCStack; 1180 do { 1181 assert(DFSStack.empty() && 1182 "Cannot begin a new root with a non-empty DFS stack!"); 1183 assert(PendingRefSCCStack.empty() && 1184 "Cannot begin a new root with pending nodes for an SCC!"); 1185 1186 Node *RootN = Worklist.pop_back_val(); 1187 // Skip any nodes we've already reached in the DFS. 1188 if (RootN->DFSNumber != 0) { 1189 assert(RootN->DFSNumber == -1 && 1190 "Shouldn't have any mid-DFS root nodes!"); 1191 continue; 1192 } 1193 1194 RootN->DFSNumber = RootN->LowLink = 1; 1195 int NextDFSNumber = 2; 1196 1197 DFSStack.push_back({RootN, (*RootN)->begin()}); 1198 do { 1199 Node *N; 1200 EdgeSequence::iterator I; 1201 std::tie(N, I) = DFSStack.pop_back_val(); 1202 auto E = (*N)->end(); 1203 1204 assert(N->DFSNumber != 0 && "We should always assign a DFS number " 1205 "before processing a node."); 1206 1207 while (I != E) { 1208 Node &ChildN = I->getNode(); 1209 if (ChildN.DFSNumber == 0) { 1210 // Mark that we should start at this child when next this node is the 1211 // top of the stack. We don't start at the next child to ensure this 1212 // child's lowlink is reflected. 1213 DFSStack.push_back({N, I}); 1214 1215 // Continue, resetting to the child node. 1216 ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++; 1217 N = &ChildN; 1218 I = ChildN->begin(); 1219 E = ChildN->end(); 1220 continue; 1221 } 1222 if (ChildN.DFSNumber == -1) { 1223 // If this child isn't currently in this RefSCC, no need to process 1224 // it. 1225 ++I; 1226 continue; 1227 } 1228 1229 // Track the lowest link of the children, if any are still in the stack. 1230 // Any child not on the stack will have a LowLink of -1. 1231 assert(ChildN.LowLink != 0 && 1232 "Low-link must not be zero with a non-zero DFS number."); 1233 if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink) 1234 N->LowLink = ChildN.LowLink; 1235 ++I; 1236 } 1237 1238 // We've finished processing N and its descendants, put it on our pending 1239 // stack to eventually get merged into a RefSCC. 1240 PendingRefSCCStack.push_back(N); 1241 1242 // If this node is linked to some lower entry, continue walking up the 1243 // stack. 1244 if (N->LowLink != N->DFSNumber) { 1245 assert(!DFSStack.empty() && 1246 "We never found a viable root for a RefSCC to pop off!"); 1247 continue; 1248 } 1249 1250 // Otherwise, form a new RefSCC from the top of the pending node stack. 1251 int RefSCCNumber = PostOrderNumber++; 1252 int RootDFSNumber = N->DFSNumber; 1253 1254 // Find the range of the node stack by walking down until we pass the 1255 // root DFS number. Update the DFS numbers and low link numbers in the 1256 // process to avoid re-walking this list where possible. 1257 auto StackRI = find_if(reverse(PendingRefSCCStack), [&](Node *N) { 1258 if (N->DFSNumber < RootDFSNumber) 1259 // We've found the bottom. 1260 return true; 1261 1262 // Update this node and keep scanning. 1263 N->DFSNumber = -1; 1264 // Save the post-order number in the lowlink field so that we can use 1265 // it to map SCCs into new RefSCCs after we finish the DFS. 1266 N->LowLink = RefSCCNumber; 1267 return false; 1268 }); 1269 auto RefSCCNodes = make_range(StackRI.base(), PendingRefSCCStack.end()); 1270 1271 // If we find a cycle containing all nodes originally in this RefSCC then 1272 // the removal hasn't changed the structure at all. This is an important 1273 // special case and we can directly exit the entire routine more 1274 // efficiently as soon as we discover it. 1275 if (llvm::size(RefSCCNodes) == NumRefSCCNodes) { 1276 // Clear out the low link field as we won't need it. 1277 for (Node *N : RefSCCNodes) 1278 N->LowLink = -1; 1279 // Return the empty result immediately. 1280 return Result; 1281 } 1282 1283 // We've already marked the nodes internally with the RefSCC number so 1284 // just clear them off the stack and continue. 1285 PendingRefSCCStack.erase(RefSCCNodes.begin(), PendingRefSCCStack.end()); 1286 } while (!DFSStack.empty()); 1287 1288 assert(DFSStack.empty() && "Didn't flush the entire DFS stack!"); 1289 assert(PendingRefSCCStack.empty() && "Didn't flush all pending nodes!"); 1290 } while (!Worklist.empty()); 1291 1292 assert(PostOrderNumber > 1 && 1293 "Should never finish the DFS when the existing RefSCC remains valid!"); 1294 1295 // Otherwise we create a collection of new RefSCC nodes and build 1296 // a radix-sort style map from postorder number to these new RefSCCs. We then 1297 // append SCCs to each of these RefSCCs in the order they occurred in the 1298 // original SCCs container. 1299 for (int i = 0; i < PostOrderNumber; ++i) 1300 Result.push_back(G->createRefSCC(*G)); 1301 1302 // Insert the resulting postorder sequence into the global graph postorder 1303 // sequence before the current RefSCC in that sequence, and then remove the 1304 // current one. 1305 // 1306 // FIXME: It'd be nice to change the APIs so that we returned an iterator 1307 // range over the global postorder sequence and generally use that sequence 1308 // rather than building a separate result vector here. 1309 int Idx = G->getRefSCCIndex(*this); 1310 G->PostOrderRefSCCs.erase(G->PostOrderRefSCCs.begin() + Idx); 1311 G->PostOrderRefSCCs.insert(G->PostOrderRefSCCs.begin() + Idx, Result.begin(), 1312 Result.end()); 1313 for (int i : seq<int>(Idx, G->PostOrderRefSCCs.size())) 1314 G->RefSCCIndices[G->PostOrderRefSCCs[i]] = i; 1315 1316 for (SCC *C : SCCs) { 1317 // We store the SCC number in the node's low-link field above. 1318 int SCCNumber = C->begin()->LowLink; 1319 // Clear out all of the SCC's node's low-link fields now that we're done 1320 // using them as side-storage. 1321 for (Node &N : *C) { 1322 assert(N.LowLink == SCCNumber && 1323 "Cannot have different numbers for nodes in the same SCC!"); 1324 N.LowLink = -1; 1325 } 1326 1327 RefSCC &RC = *Result[SCCNumber]; 1328 int SCCIndex = RC.SCCs.size(); 1329 RC.SCCs.push_back(C); 1330 RC.SCCIndices[C] = SCCIndex; 1331 C->OuterRefSCC = &RC; 1332 } 1333 1334 // Now that we've moved things into the new RefSCCs, clear out our current 1335 // one. 1336 G = nullptr; 1337 SCCs.clear(); 1338 SCCIndices.clear(); 1339 1340 #ifndef NDEBUG 1341 // Verify the new RefSCCs we've built. 1342 for (RefSCC *RC : Result) 1343 RC->verify(); 1344 #endif 1345 1346 // Return the new list of SCCs. 1347 return Result; 1348 } 1349 1350 void LazyCallGraph::RefSCC::handleTrivialEdgeInsertion(Node &SourceN, 1351 Node &TargetN) { 1352 // The only trivial case that requires any graph updates is when we add new 1353 // ref edge and may connect different RefSCCs along that path. This is only 1354 // because of the parents set. Every other part of the graph remains constant 1355 // after this edge insertion. 1356 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 1357 RefSCC &TargetRC = *G->lookupRefSCC(TargetN); 1358 if (&TargetRC == this) 1359 return; 1360 1361 #ifdef EXPENSIVE_CHECKS 1362 assert(TargetRC.isDescendantOf(*this) && 1363 "Target must be a descendant of the Source."); 1364 #endif 1365 } 1366 1367 void LazyCallGraph::RefSCC::insertTrivialCallEdge(Node &SourceN, 1368 Node &TargetN) { 1369 #ifndef NDEBUG 1370 // Check that the RefSCC is still valid when we finish. 1371 auto ExitVerifier = make_scope_exit([this] { verify(); }); 1372 1373 #ifdef EXPENSIVE_CHECKS 1374 // Check that we aren't breaking some invariants of the SCC graph. Note that 1375 // this is quadratic in the number of edges in the call graph! 1376 SCC &SourceC = *G->lookupSCC(SourceN); 1377 SCC &TargetC = *G->lookupSCC(TargetN); 1378 if (&SourceC != &TargetC) 1379 assert(SourceC.isAncestorOf(TargetC) && 1380 "Call edge is not trivial in the SCC graph!"); 1381 #endif // EXPENSIVE_CHECKS 1382 #endif // NDEBUG 1383 1384 // First insert it into the source or find the existing edge. 1385 auto InsertResult = 1386 SourceN->EdgeIndexMap.insert({&TargetN, SourceN->Edges.size()}); 1387 if (!InsertResult.second) { 1388 // Already an edge, just update it. 1389 Edge &E = SourceN->Edges[InsertResult.first->second]; 1390 if (E.isCall()) 1391 return; // Nothing to do! 1392 E.setKind(Edge::Call); 1393 } else { 1394 // Create the new edge. 1395 SourceN->Edges.emplace_back(TargetN, Edge::Call); 1396 } 1397 1398 // Now that we have the edge, handle the graph fallout. 1399 handleTrivialEdgeInsertion(SourceN, TargetN); 1400 } 1401 1402 void LazyCallGraph::RefSCC::insertTrivialRefEdge(Node &SourceN, Node &TargetN) { 1403 #ifndef NDEBUG 1404 // Check that the RefSCC is still valid when we finish. 1405 auto ExitVerifier = make_scope_exit([this] { verify(); }); 1406 1407 #ifdef EXPENSIVE_CHECKS 1408 // Check that we aren't breaking some invariants of the RefSCC graph. 1409 RefSCC &SourceRC = *G->lookupRefSCC(SourceN); 1410 RefSCC &TargetRC = *G->lookupRefSCC(TargetN); 1411 if (&SourceRC != &TargetRC) 1412 assert(SourceRC.isAncestorOf(TargetRC) && 1413 "Ref edge is not trivial in the RefSCC graph!"); 1414 #endif // EXPENSIVE_CHECKS 1415 #endif // NDEBUG 1416 1417 // First insert it into the source or find the existing edge. 1418 auto InsertResult = 1419 SourceN->EdgeIndexMap.insert({&TargetN, SourceN->Edges.size()}); 1420 if (!InsertResult.second) 1421 // Already an edge, we're done. 1422 return; 1423 1424 // Create the new edge. 1425 SourceN->Edges.emplace_back(TargetN, Edge::Ref); 1426 1427 // Now that we have the edge, handle the graph fallout. 1428 handleTrivialEdgeInsertion(SourceN, TargetN); 1429 } 1430 1431 void LazyCallGraph::RefSCC::replaceNodeFunction(Node &N, Function &NewF) { 1432 Function &OldF = N.getFunction(); 1433 1434 #ifndef NDEBUG 1435 // Check that the RefSCC is still valid when we finish. 1436 auto ExitVerifier = make_scope_exit([this] { verify(); }); 1437 1438 assert(G->lookupRefSCC(N) == this && 1439 "Cannot replace the function of a node outside this RefSCC."); 1440 1441 assert(G->NodeMap.find(&NewF) == G->NodeMap.end() && 1442 "Must not have already walked the new function!'"); 1443 1444 // It is important that this replacement not introduce graph changes so we 1445 // insist that the caller has already removed every use of the original 1446 // function and that all uses of the new function correspond to existing 1447 // edges in the graph. The common and expected way to use this is when 1448 // replacing the function itself in the IR without changing the call graph 1449 // shape and just updating the analysis based on that. 1450 assert(&OldF != &NewF && "Cannot replace a function with itself!"); 1451 assert(OldF.use_empty() && 1452 "Must have moved all uses from the old function to the new!"); 1453 #endif 1454 1455 N.replaceFunction(NewF); 1456 1457 // Update various call graph maps. 1458 G->NodeMap.erase(&OldF); 1459 G->NodeMap[&NewF] = &N; 1460 } 1461 1462 void LazyCallGraph::insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK) { 1463 assert(SCCMap.empty() && 1464 "This method cannot be called after SCCs have been formed!"); 1465 1466 return SourceN->insertEdgeInternal(TargetN, EK); 1467 } 1468 1469 void LazyCallGraph::removeEdge(Node &SourceN, Node &TargetN) { 1470 assert(SCCMap.empty() && 1471 "This method cannot be called after SCCs have been formed!"); 1472 1473 bool Removed = SourceN->removeEdgeInternal(TargetN); 1474 (void)Removed; 1475 assert(Removed && "Target not in the edge set for this caller?"); 1476 } 1477 1478 void LazyCallGraph::removeDeadFunction(Function &F) { 1479 // FIXME: This is unnecessarily restrictive. We should be able to remove 1480 // functions which recursively call themselves. 1481 assert(F.use_empty() && 1482 "This routine should only be called on trivially dead functions!"); 1483 1484 // We shouldn't remove library functions as they are never really dead while 1485 // the call graph is in use -- every function definition refers to them. 1486 assert(!isLibFunction(F) && 1487 "Must not remove lib functions from the call graph!"); 1488 1489 auto NI = NodeMap.find(&F); 1490 if (NI == NodeMap.end()) 1491 // Not in the graph at all! 1492 return; 1493 1494 Node &N = *NI->second; 1495 NodeMap.erase(NI); 1496 1497 // Remove this from the entry edges if present. 1498 EntryEdges.removeEdgeInternal(N); 1499 1500 if (SCCMap.empty()) { 1501 // No SCCs have been formed, so removing this is fine and there is nothing 1502 // else necessary at this point but clearing out the node. 1503 N.clear(); 1504 return; 1505 } 1506 1507 // Cannot remove a function which has yet to be visited in the DFS walk, so 1508 // if we have a node at all then we must have an SCC and RefSCC. 1509 auto CI = SCCMap.find(&N); 1510 assert(CI != SCCMap.end() && 1511 "Tried to remove a node without an SCC after DFS walk started!"); 1512 SCC &C = *CI->second; 1513 SCCMap.erase(CI); 1514 RefSCC &RC = C.getOuterRefSCC(); 1515 1516 // This node must be the only member of its SCC as it has no callers, and 1517 // that SCC must be the only member of a RefSCC as it has no references. 1518 // Validate these properties first. 1519 assert(C.size() == 1 && "Dead functions must be in a singular SCC"); 1520 assert(RC.size() == 1 && "Dead functions must be in a singular RefSCC"); 1521 1522 auto RCIndexI = RefSCCIndices.find(&RC); 1523 int RCIndex = RCIndexI->second; 1524 PostOrderRefSCCs.erase(PostOrderRefSCCs.begin() + RCIndex); 1525 RefSCCIndices.erase(RCIndexI); 1526 for (int i = RCIndex, Size = PostOrderRefSCCs.size(); i < Size; ++i) 1527 RefSCCIndices[PostOrderRefSCCs[i]] = i; 1528 1529 // Finally clear out all the data structures from the node down through the 1530 // components. 1531 N.clear(); 1532 N.G = nullptr; 1533 N.F = nullptr; 1534 C.clear(); 1535 RC.clear(); 1536 RC.G = nullptr; 1537 1538 // Nothing to delete as all the objects are allocated in stable bump pointer 1539 // allocators. 1540 } 1541 1542 LazyCallGraph::Node &LazyCallGraph::insertInto(Function &F, Node *&MappedN) { 1543 return *new (MappedN = BPA.Allocate()) Node(*this, F); 1544 } 1545 1546 void LazyCallGraph::updateGraphPtrs() { 1547 // Walk the node map to update their graph pointers. While this iterates in 1548 // an unstable order, the order has no effect so it remains correct. 1549 for (auto &FunctionNodePair : NodeMap) 1550 FunctionNodePair.second->G = this; 1551 1552 for (auto *RC : PostOrderRefSCCs) 1553 RC->G = this; 1554 } 1555 1556 template <typename RootsT, typename GetBeginT, typename GetEndT, 1557 typename GetNodeT, typename FormSCCCallbackT> 1558 void LazyCallGraph::buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin, 1559 GetEndT &&GetEnd, GetNodeT &&GetNode, 1560 FormSCCCallbackT &&FormSCC) { 1561 using EdgeItT = decltype(GetBegin(std::declval<Node &>())); 1562 1563 SmallVector<std::pair<Node *, EdgeItT>, 16> DFSStack; 1564 SmallVector<Node *, 16> PendingSCCStack; 1565 1566 // Scan down the stack and DFS across the call edges. 1567 for (Node *RootN : Roots) { 1568 assert(DFSStack.empty() && 1569 "Cannot begin a new root with a non-empty DFS stack!"); 1570 assert(PendingSCCStack.empty() && 1571 "Cannot begin a new root with pending nodes for an SCC!"); 1572 1573 // Skip any nodes we've already reached in the DFS. 1574 if (RootN->DFSNumber != 0) { 1575 assert(RootN->DFSNumber == -1 && 1576 "Shouldn't have any mid-DFS root nodes!"); 1577 continue; 1578 } 1579 1580 RootN->DFSNumber = RootN->LowLink = 1; 1581 int NextDFSNumber = 2; 1582 1583 DFSStack.push_back({RootN, GetBegin(*RootN)}); 1584 do { 1585 Node *N; 1586 EdgeItT I; 1587 std::tie(N, I) = DFSStack.pop_back_val(); 1588 auto E = GetEnd(*N); 1589 while (I != E) { 1590 Node &ChildN = GetNode(I); 1591 if (ChildN.DFSNumber == 0) { 1592 // We haven't yet visited this child, so descend, pushing the current 1593 // node onto the stack. 1594 DFSStack.push_back({N, I}); 1595 1596 ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++; 1597 N = &ChildN; 1598 I = GetBegin(*N); 1599 E = GetEnd(*N); 1600 continue; 1601 } 1602 1603 // If the child has already been added to some child component, it 1604 // couldn't impact the low-link of this parent because it isn't 1605 // connected, and thus its low-link isn't relevant so skip it. 1606 if (ChildN.DFSNumber == -1) { 1607 ++I; 1608 continue; 1609 } 1610 1611 // Track the lowest linked child as the lowest link for this node. 1612 assert(ChildN.LowLink > 0 && "Must have a positive low-link number!"); 1613 if (ChildN.LowLink < N->LowLink) 1614 N->LowLink = ChildN.LowLink; 1615 1616 // Move to the next edge. 1617 ++I; 1618 } 1619 1620 // We've finished processing N and its descendants, put it on our pending 1621 // SCC stack to eventually get merged into an SCC of nodes. 1622 PendingSCCStack.push_back(N); 1623 1624 // If this node is linked to some lower entry, continue walking up the 1625 // stack. 1626 if (N->LowLink != N->DFSNumber) 1627 continue; 1628 1629 // Otherwise, we've completed an SCC. Append it to our post order list of 1630 // SCCs. 1631 int RootDFSNumber = N->DFSNumber; 1632 // Find the range of the node stack by walking down until we pass the 1633 // root DFS number. 1634 auto SCCNodes = make_range( 1635 PendingSCCStack.rbegin(), 1636 find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) { 1637 return N->DFSNumber < RootDFSNumber; 1638 })); 1639 // Form a new SCC out of these nodes and then clear them off our pending 1640 // stack. 1641 FormSCC(SCCNodes); 1642 PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end()); 1643 } while (!DFSStack.empty()); 1644 } 1645 } 1646 1647 /// Build the internal SCCs for a RefSCC from a sequence of nodes. 1648 /// 1649 /// Appends the SCCs to the provided vector and updates the map with their 1650 /// indices. Both the vector and map must be empty when passed into this 1651 /// routine. 1652 void LazyCallGraph::buildSCCs(RefSCC &RC, node_stack_range Nodes) { 1653 assert(RC.SCCs.empty() && "Already built SCCs!"); 1654 assert(RC.SCCIndices.empty() && "Already mapped SCC indices!"); 1655 1656 for (Node *N : Nodes) { 1657 assert(N->LowLink >= (*Nodes.begin())->LowLink && 1658 "We cannot have a low link in an SCC lower than its root on the " 1659 "stack!"); 1660 1661 // This node will go into the next RefSCC, clear out its DFS and low link 1662 // as we scan. 1663 N->DFSNumber = N->LowLink = 0; 1664 } 1665 1666 // Each RefSCC contains a DAG of the call SCCs. To build these, we do 1667 // a direct walk of the call edges using Tarjan's algorithm. We reuse the 1668 // internal storage as we won't need it for the outer graph's DFS any longer. 1669 buildGenericSCCs( 1670 Nodes, [](Node &N) { return N->call_begin(); }, 1671 [](Node &N) { return N->call_end(); }, 1672 [](EdgeSequence::call_iterator I) -> Node & { return I->getNode(); }, 1673 [this, &RC](node_stack_range Nodes) { 1674 RC.SCCs.push_back(createSCC(RC, Nodes)); 1675 for (Node &N : *RC.SCCs.back()) { 1676 N.DFSNumber = N.LowLink = -1; 1677 SCCMap[&N] = RC.SCCs.back(); 1678 } 1679 }); 1680 1681 // Wire up the SCC indices. 1682 for (int i = 0, Size = RC.SCCs.size(); i < Size; ++i) 1683 RC.SCCIndices[RC.SCCs[i]] = i; 1684 } 1685 1686 void LazyCallGraph::buildRefSCCs() { 1687 if (EntryEdges.empty() || !PostOrderRefSCCs.empty()) 1688 // RefSCCs are either non-existent or already built! 1689 return; 1690 1691 assert(RefSCCIndices.empty() && "Already mapped RefSCC indices!"); 1692 1693 SmallVector<Node *, 16> Roots; 1694 for (Edge &E : *this) 1695 Roots.push_back(&E.getNode()); 1696 1697 // The roots will be popped of a stack, so use reverse to get a less 1698 // surprising order. This doesn't change any of the semantics anywhere. 1699 std::reverse(Roots.begin(), Roots.end()); 1700 1701 buildGenericSCCs( 1702 Roots, 1703 [](Node &N) { 1704 // We need to populate each node as we begin to walk its edges. 1705 N.populate(); 1706 return N->begin(); 1707 }, 1708 [](Node &N) { return N->end(); }, 1709 [](EdgeSequence::iterator I) -> Node & { return I->getNode(); }, 1710 [this](node_stack_range Nodes) { 1711 RefSCC *NewRC = createRefSCC(*this); 1712 buildSCCs(*NewRC, Nodes); 1713 1714 // Push the new node into the postorder list and remember its position 1715 // in the index map. 1716 bool Inserted = 1717 RefSCCIndices.insert({NewRC, PostOrderRefSCCs.size()}).second; 1718 (void)Inserted; 1719 assert(Inserted && "Cannot already have this RefSCC in the index map!"); 1720 PostOrderRefSCCs.push_back(NewRC); 1721 #ifndef NDEBUG 1722 NewRC->verify(); 1723 #endif 1724 }); 1725 } 1726 1727 AnalysisKey LazyCallGraphAnalysis::Key; 1728 1729 LazyCallGraphPrinterPass::LazyCallGraphPrinterPass(raw_ostream &OS) : OS(OS) {} 1730 1731 static void printNode(raw_ostream &OS, LazyCallGraph::Node &N) { 1732 OS << " Edges in function: " << N.getFunction().getName() << "\n"; 1733 for (LazyCallGraph::Edge &E : N.populate()) 1734 OS << " " << (E.isCall() ? "call" : "ref ") << " -> " 1735 << E.getFunction().getName() << "\n"; 1736 1737 OS << "\n"; 1738 } 1739 1740 static void printSCC(raw_ostream &OS, LazyCallGraph::SCC &C) { 1741 ptrdiff_t Size = size(C); 1742 OS << " SCC with " << Size << " functions:\n"; 1743 1744 for (LazyCallGraph::Node &N : C) 1745 OS << " " << N.getFunction().getName() << "\n"; 1746 } 1747 1748 static void printRefSCC(raw_ostream &OS, LazyCallGraph::RefSCC &C) { 1749 ptrdiff_t Size = size(C); 1750 OS << " RefSCC with " << Size << " call SCCs:\n"; 1751 1752 for (LazyCallGraph::SCC &InnerC : C) 1753 printSCC(OS, InnerC); 1754 1755 OS << "\n"; 1756 } 1757 1758 PreservedAnalyses LazyCallGraphPrinterPass::run(Module &M, 1759 ModuleAnalysisManager &AM) { 1760 LazyCallGraph &G = AM.getResult<LazyCallGraphAnalysis>(M); 1761 1762 OS << "Printing the call graph for module: " << M.getModuleIdentifier() 1763 << "\n\n"; 1764 1765 for (Function &F : M) 1766 printNode(OS, G.get(F)); 1767 1768 G.buildRefSCCs(); 1769 for (LazyCallGraph::RefSCC &C : G.postorder_ref_sccs()) 1770 printRefSCC(OS, C); 1771 1772 return PreservedAnalyses::all(); 1773 } 1774 1775 LazyCallGraphDOTPrinterPass::LazyCallGraphDOTPrinterPass(raw_ostream &OS) 1776 : OS(OS) {} 1777 1778 static void printNodeDOT(raw_ostream &OS, LazyCallGraph::Node &N) { 1779 std::string Name = "\"" + DOT::EscapeString(N.getFunction().getName()) + "\""; 1780 1781 for (LazyCallGraph::Edge &E : N.populate()) { 1782 OS << " " << Name << " -> \"" 1783 << DOT::EscapeString(E.getFunction().getName()) << "\""; 1784 if (!E.isCall()) // It is a ref edge. 1785 OS << " [style=dashed,label=\"ref\"]"; 1786 OS << ";\n"; 1787 } 1788 1789 OS << "\n"; 1790 } 1791 1792 PreservedAnalyses LazyCallGraphDOTPrinterPass::run(Module &M, 1793 ModuleAnalysisManager &AM) { 1794 LazyCallGraph &G = AM.getResult<LazyCallGraphAnalysis>(M); 1795 1796 OS << "digraph \"" << DOT::EscapeString(M.getModuleIdentifier()) << "\" {\n"; 1797 1798 for (Function &F : M) 1799 printNodeDOT(OS, G.get(F)); 1800 1801 OS << "}\n"; 1802 1803 return PreservedAnalyses::all(); 1804 } 1805