1 //===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// Implements a lazy call graph analysis and related passes for the new pass
11 /// manager.
12 ///
13 /// NB: This is *not* a traditional call graph! It is a graph which models both
14 /// the current calls and potential calls. As a consequence there are many
15 /// edges in this call graph that do not correspond to a 'call' or 'invoke'
16 /// instruction.
17 ///
18 /// The primary use cases of this graph analysis is to facilitate iterating
19 /// across the functions of a module in ways that ensure all callees are
20 /// visited prior to a caller (given any SCC constraints), or vice versa. As
21 /// such is it particularly well suited to organizing CGSCC optimizations such
22 /// as inlining, outlining, argument promotion, etc. That is its primary use
23 /// case and motivates the design. It may not be appropriate for other
24 /// purposes. The use graph of functions or some other conservative analysis of
25 /// call instructions may be interesting for optimizations and subsequent
26 /// analyses which don't work in the context of an overly specified
27 /// potential-call-edge graph.
28 ///
29 /// To understand the specific rules and nature of this call graph analysis,
30 /// see the documentation of the \c LazyCallGraph below.
31 ///
32 //===----------------------------------------------------------------------===//
33
34 #ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
35 #define LLVM_ANALYSIS_LAZYCALLGRAPH_H
36
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/ADT/DenseMap.h"
39 #include "llvm/ADT/Optional.h"
40 #include "llvm/ADT/PointerIntPair.h"
41 #include "llvm/ADT/STLExtras.h"
42 #include "llvm/ADT/SetVector.h"
43 #include "llvm/ADT/SmallPtrSet.h"
44 #include "llvm/ADT/SmallVector.h"
45 #include "llvm/ADT/StringRef.h"
46 #include "llvm/ADT/iterator.h"
47 #include "llvm/ADT/iterator_range.h"
48 #include "llvm/Analysis/TargetLibraryInfo.h"
49 #include "llvm/IR/Constant.h"
50 #include "llvm/IR/Constants.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/PassManager.h"
53 #include "llvm/Support/Allocator.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <cassert>
57 #include <iterator>
58 #include <string>
59 #include <utility>
60
61 namespace llvm {
62
63 template <class GraphType> struct GraphTraits;
64 class Module;
65 class Value;
66
67 /// A lazily constructed view of the call graph of a module.
68 ///
69 /// With the edges of this graph, the motivating constraint that we are
70 /// attempting to maintain is that function-local optimization, CGSCC-local
71 /// optimizations, and optimizations transforming a pair of functions connected
72 /// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
73 /// DAG. That is, no optimizations will delete, remove, or add an edge such
74 /// that functions already visited in a bottom-up order of the SCC DAG are no
75 /// longer valid to have visited, or such that functions not yet visited in
76 /// a bottom-up order of the SCC DAG are not required to have already been
77 /// visited.
78 ///
79 /// Within this constraint, the desire is to minimize the merge points of the
80 /// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
81 /// in the SCC DAG, the more independence there is in optimizing within it.
82 /// There is a strong desire to enable parallelization of optimizations over
83 /// the call graph, and both limited fanout and merge points will (artificially
84 /// in some cases) limit the scaling of such an effort.
85 ///
86 /// To this end, graph represents both direct and any potential resolution to
87 /// an indirect call edge. Another way to think about it is that it represents
88 /// both the direct call edges and any direct call edges that might be formed
89 /// through static optimizations. Specifically, it considers taking the address
90 /// of a function to be an edge in the call graph because this might be
91 /// forwarded to become a direct call by some subsequent function-local
92 /// optimization. The result is that the graph closely follows the use-def
93 /// edges for functions. Walking "up" the graph can be done by looking at all
94 /// of the uses of a function.
95 ///
96 /// The roots of the call graph are the external functions and functions
97 /// escaped into global variables. Those functions can be called from outside
98 /// of the module or via unknowable means in the IR -- we may not be able to
99 /// form even a potential call edge from a function body which may dynamically
100 /// load the function and call it.
101 ///
102 /// This analysis still requires updates to remain valid after optimizations
103 /// which could potentially change the set of potential callees. The
104 /// constraints it operates under only make the traversal order remain valid.
105 ///
106 /// The entire analysis must be re-computed if full interprocedural
107 /// optimizations run at any point. For example, globalopt completely
108 /// invalidates the information in this analysis.
109 ///
110 /// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
111 /// it from the existing CallGraph. At some point, it is expected that this
112 /// will be the only call graph and it will be renamed accordingly.
113 class LazyCallGraph {
114 public:
115 class Node;
116 class EdgeSequence;
117 class SCC;
118 class RefSCC;
119
120 /// A class used to represent edges in the call graph.
121 ///
122 /// The lazy call graph models both *call* edges and *reference* edges. Call
123 /// edges are much what you would expect, and exist when there is a 'call' or
124 /// 'invoke' instruction of some function. Reference edges are also tracked
125 /// along side these, and exist whenever any instruction (transitively
126 /// through its operands) references a function. All call edges are
127 /// inherently reference edges, and so the reference graph forms a superset
128 /// of the formal call graph.
129 ///
130 /// All of these forms of edges are fundamentally represented as outgoing
131 /// edges. The edges are stored in the source node and point at the target
132 /// node. This allows the edge structure itself to be a very compact data
133 /// structure: essentially a tagged pointer.
134 class Edge {
135 public:
136 /// The kind of edge in the graph.
137 enum Kind : bool { Ref = false, Call = true };
138
139 Edge();
140 explicit Edge(Node &N, Kind K);
141
142 /// Test whether the edge is null.
143 ///
144 /// This happens when an edge has been deleted. We leave the edge objects
145 /// around but clear them.
146 explicit operator bool() const;
147
148 /// Returnss the \c Kind of the edge.
149 Kind getKind() const;
150
151 /// Test whether the edge represents a direct call to a function.
152 ///
153 /// This requires that the edge is not null.
154 bool isCall() const;
155
156 /// Get the call graph node referenced by this edge.
157 ///
158 /// This requires that the edge is not null.
159 Node &getNode() const;
160
161 /// Get the function referenced by this edge.
162 ///
163 /// This requires that the edge is not null.
164 Function &getFunction() const;
165
166 private:
167 friend class LazyCallGraph::EdgeSequence;
168 friend class LazyCallGraph::RefSCC;
169
170 PointerIntPair<Node *, 1, Kind> Value;
171
setKind(Kind K)172 void setKind(Kind K) { Value.setInt(K); }
173 };
174
175 /// The edge sequence object.
176 ///
177 /// This typically exists entirely within the node but is exposed as
178 /// a separate type because a node doesn't initially have edges. An explicit
179 /// population step is required to produce this sequence at first and it is
180 /// then cached in the node. It is also used to represent edges entering the
181 /// graph from outside the module to model the graph's roots.
182 ///
183 /// The sequence itself both iterable and indexable. The indexes remain
184 /// stable even as the sequence mutates (including removal).
185 class EdgeSequence {
186 friend class LazyCallGraph;
187 friend class LazyCallGraph::Node;
188 friend class LazyCallGraph::RefSCC;
189
190 using VectorT = SmallVector<Edge, 4>;
191 using VectorImplT = SmallVectorImpl<Edge>;
192
193 public:
194 /// An iterator used for the edges to both entry nodes and child nodes.
195 class iterator
196 : public iterator_adaptor_base<iterator, VectorImplT::iterator,
197 std::forward_iterator_tag> {
198 friend class LazyCallGraph;
199 friend class LazyCallGraph::Node;
200
201 VectorImplT::iterator E;
202
203 // Build the iterator for a specific position in the edge list.
iterator(VectorImplT::iterator BaseI,VectorImplT::iterator E)204 iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
205 : iterator_adaptor_base(BaseI), E(E) {
206 while (I != E && !*I)
207 ++I;
208 }
209
210 public:
211 iterator() = default;
212
213 using iterator_adaptor_base::operator++;
214 iterator &operator++() {
215 do {
216 ++I;
217 } while (I != E && !*I);
218 return *this;
219 }
220 };
221
222 /// An iterator over specifically call edges.
223 ///
224 /// This has the same iteration properties as the \c iterator, but
225 /// restricts itself to edges which represent actual calls.
226 class call_iterator
227 : public iterator_adaptor_base<call_iterator, VectorImplT::iterator,
228 std::forward_iterator_tag> {
229 friend class LazyCallGraph;
230 friend class LazyCallGraph::Node;
231
232 VectorImplT::iterator E;
233
234 /// Advance the iterator to the next valid, call edge.
advanceToNextEdge()235 void advanceToNextEdge() {
236 while (I != E && (!*I || !I->isCall()))
237 ++I;
238 }
239
240 // Build the iterator for a specific position in the edge list.
call_iterator(VectorImplT::iterator BaseI,VectorImplT::iterator E)241 call_iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
242 : iterator_adaptor_base(BaseI), E(E) {
243 advanceToNextEdge();
244 }
245
246 public:
247 call_iterator() = default;
248
249 using iterator_adaptor_base::operator++;
250 call_iterator &operator++() {
251 ++I;
252 advanceToNextEdge();
253 return *this;
254 }
255 };
256
begin()257 iterator begin() { return iterator(Edges.begin(), Edges.end()); }
end()258 iterator end() { return iterator(Edges.end(), Edges.end()); }
259
260 Edge &operator[](Node &N) {
261 assert(EdgeIndexMap.find(&N) != EdgeIndexMap.end() && "No such edge!");
262 auto &E = Edges[EdgeIndexMap.find(&N)->second];
263 assert(E && "Dead or null edge!");
264 return E;
265 }
266
lookup(Node & N)267 Edge *lookup(Node &N) {
268 auto EI = EdgeIndexMap.find(&N);
269 if (EI == EdgeIndexMap.end())
270 return nullptr;
271 auto &E = Edges[EI->second];
272 return E ? &E : nullptr;
273 }
274
call_begin()275 call_iterator call_begin() {
276 return call_iterator(Edges.begin(), Edges.end());
277 }
call_end()278 call_iterator call_end() { return call_iterator(Edges.end(), Edges.end()); }
279
calls()280 iterator_range<call_iterator> calls() {
281 return make_range(call_begin(), call_end());
282 }
283
empty()284 bool empty() {
285 for (auto &E : Edges)
286 if (E)
287 return false;
288
289 return true;
290 }
291
292 private:
293 VectorT Edges;
294 DenseMap<Node *, int> EdgeIndexMap;
295
296 EdgeSequence() = default;
297
298 /// Internal helper to insert an edge to a node.
299 void insertEdgeInternal(Node &ChildN, Edge::Kind EK);
300
301 /// Internal helper to change an edge kind.
302 void setEdgeKind(Node &ChildN, Edge::Kind EK);
303
304 /// Internal helper to remove the edge to the given function.
305 bool removeEdgeInternal(Node &ChildN);
306 };
307
308 /// A node in the call graph.
309 ///
310 /// This represents a single node. It's primary roles are to cache the list of
311 /// callees, de-duplicate and provide fast testing of whether a function is
312 /// a callee, and facilitate iteration of child nodes in the graph.
313 ///
314 /// The node works much like an optional in order to lazily populate the
315 /// edges of each node. Until populated, there are no edges. Once populated,
316 /// you can access the edges by dereferencing the node or using the `->`
317 /// operator as if the node was an `Optional<EdgeSequence>`.
318 class Node {
319 friend class LazyCallGraph;
320 friend class LazyCallGraph::RefSCC;
321
322 public:
getGraph()323 LazyCallGraph &getGraph() const { return *G; }
324
getFunction()325 Function &getFunction() const { return *F; }
326
getName()327 StringRef getName() const { return F->getName(); }
328
329 /// Equality is defined as address equality.
330 bool operator==(const Node &N) const { return this == &N; }
331 bool operator!=(const Node &N) const { return !operator==(N); }
332
333 /// Tests whether the node has been populated with edges.
isPopulated()334 bool isPopulated() const { return Edges.hasValue(); }
335
336 /// Tests whether this is actually a dead node and no longer valid.
337 ///
338 /// Users rarely interact with nodes in this state and other methods are
339 /// invalid. This is used to model a node in an edge list where the
340 /// function has been completely removed.
isDead()341 bool isDead() const {
342 assert(!G == !F &&
343 "Both graph and function pointers should be null or non-null.");
344 return !G;
345 }
346
347 // We allow accessing the edges by dereferencing or using the arrow
348 // operator, essentially wrapping the internal optional.
349 EdgeSequence &operator*() const {
350 // Rip const off because the node itself isn't changing here.
351 return const_cast<EdgeSequence &>(*Edges);
352 }
353 EdgeSequence *operator->() const { return &**this; }
354
355 /// Populate the edges of this node if necessary.
356 ///
357 /// The first time this is called it will populate the edges for this node
358 /// in the graph. It does this by scanning the underlying function, so once
359 /// this is done, any changes to that function must be explicitly reflected
360 /// in updates to the graph.
361 ///
362 /// \returns the populated \c EdgeSequence to simplify walking it.
363 ///
364 /// This will not update or re-scan anything if called repeatedly. Instead,
365 /// the edge sequence is cached and returned immediately on subsequent
366 /// calls.
populate()367 EdgeSequence &populate() {
368 if (Edges)
369 return *Edges;
370
371 return populateSlow();
372 }
373
374 private:
375 LazyCallGraph *G;
376 Function *F;
377
378 // We provide for the DFS numbering and Tarjan walk lowlink numbers to be
379 // stored directly within the node. These are both '-1' when nodes are part
380 // of an SCC (or RefSCC), or '0' when not yet reached in a DFS walk.
381 int DFSNumber = 0;
382 int LowLink = 0;
383
384 Optional<EdgeSequence> Edges;
385
386 /// Basic constructor implements the scanning of F into Edges and
387 /// EdgeIndexMap.
Node(LazyCallGraph & G,Function & F)388 Node(LazyCallGraph &G, Function &F) : G(&G), F(&F) {}
389
390 /// Implementation of the scan when populating.
391 EdgeSequence &populateSlow();
392
393 /// Internal helper to directly replace the function with a new one.
394 ///
395 /// This is used to facilitate tranfsormations which need to replace the
396 /// formal Function object but directly move the body and users from one to
397 /// the other.
398 void replaceFunction(Function &NewF);
399
clear()400 void clear() { Edges.reset(); }
401
402 /// Print the name of this node's function.
403 friend raw_ostream &operator<<(raw_ostream &OS, const Node &N) {
404 return OS << N.F->getName();
405 }
406
407 /// Dump the name of this node's function to stderr.
408 void dump() const;
409 };
410
411 /// An SCC of the call graph.
412 ///
413 /// This represents a Strongly Connected Component of the direct call graph
414 /// -- ignoring indirect calls and function references. It stores this as
415 /// a collection of call graph nodes. While the order of nodes in the SCC is
416 /// stable, it is not any particular order.
417 ///
418 /// The SCCs are nested within a \c RefSCC, see below for details about that
419 /// outer structure. SCCs do not support mutation of the call graph, that
420 /// must be done through the containing \c RefSCC in order to fully reason
421 /// about the ordering and connections of the graph.
422 class LLVM_EXTERNAL_VISIBILITY SCC {
423 friend class LazyCallGraph;
424 friend class LazyCallGraph::Node;
425
426 RefSCC *OuterRefSCC;
427 SmallVector<Node *, 1> Nodes;
428
429 template <typename NodeRangeT>
SCC(RefSCC & OuterRefSCC,NodeRangeT && Nodes)430 SCC(RefSCC &OuterRefSCC, NodeRangeT &&Nodes)
431 : OuterRefSCC(&OuterRefSCC), Nodes(std::forward<NodeRangeT>(Nodes)) {}
432
clear()433 void clear() {
434 OuterRefSCC = nullptr;
435 Nodes.clear();
436 }
437
438 /// Print a short descrtiption useful for debugging or logging.
439 ///
440 /// We print the function names in the SCC wrapped in '()'s and skipping
441 /// the middle functions if there are a large number.
442 //
443 // Note: this is defined inline to dodge issues with GCC's interpretation
444 // of enclosing namespaces for friend function declarations.
445 friend raw_ostream &operator<<(raw_ostream &OS, const SCC &C) {
446 OS << '(';
447 int i = 0;
448 for (LazyCallGraph::Node &N : C) {
449 if (i > 0)
450 OS << ", ";
451 // Elide the inner elements if there are too many.
452 if (i > 8) {
453 OS << "..., " << *C.Nodes.back();
454 break;
455 }
456 OS << N;
457 ++i;
458 }
459 OS << ')';
460 return OS;
461 }
462
463 /// Dump a short description of this SCC to stderr.
464 void dump() const;
465
466 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
467 /// Verify invariants about the SCC.
468 ///
469 /// This will attempt to validate all of the basic invariants within an
470 /// SCC, but not that it is a strongly connected componet per-se. Primarily
471 /// useful while building and updating the graph to check that basic
472 /// properties are in place rather than having inexplicable crashes later.
473 void verify();
474 #endif
475
476 public:
477 using iterator = pointee_iterator<SmallVectorImpl<Node *>::const_iterator>;
478
begin()479 iterator begin() const { return Nodes.begin(); }
end()480 iterator end() const { return Nodes.end(); }
481
size()482 int size() const { return Nodes.size(); }
483
getOuterRefSCC()484 RefSCC &getOuterRefSCC() const { return *OuterRefSCC; }
485
486 /// Test if this SCC is a parent of \a C.
487 ///
488 /// Note that this is linear in the number of edges departing the current
489 /// SCC.
490 bool isParentOf(const SCC &C) const;
491
492 /// Test if this SCC is an ancestor of \a C.
493 ///
494 /// Note that in the worst case this is linear in the number of edges
495 /// departing the current SCC and every SCC in the entire graph reachable
496 /// from this SCC. Thus this very well may walk every edge in the entire
497 /// call graph! Do not call this in a tight loop!
498 bool isAncestorOf(const SCC &C) const;
499
500 /// Test if this SCC is a child of \a C.
501 ///
502 /// See the comments for \c isParentOf for detailed notes about the
503 /// complexity of this routine.
isChildOf(const SCC & C)504 bool isChildOf(const SCC &C) const { return C.isParentOf(*this); }
505
506 /// Test if this SCC is a descendant of \a C.
507 ///
508 /// See the comments for \c isParentOf for detailed notes about the
509 /// complexity of this routine.
isDescendantOf(const SCC & C)510 bool isDescendantOf(const SCC &C) const { return C.isAncestorOf(*this); }
511
512 /// Provide a short name by printing this SCC to a std::string.
513 ///
514 /// This copes with the fact that we don't have a name per-se for an SCC
515 /// while still making the use of this in debugging and logging useful.
getName()516 std::string getName() const {
517 std::string Name;
518 raw_string_ostream OS(Name);
519 OS << *this;
520 OS.flush();
521 return Name;
522 }
523 };
524
525 /// A RefSCC of the call graph.
526 ///
527 /// This models a Strongly Connected Component of function reference edges in
528 /// the call graph. As opposed to actual SCCs, these can be used to scope
529 /// subgraphs of the module which are independent from other subgraphs of the
530 /// module because they do not reference it in any way. This is also the unit
531 /// where we do mutation of the graph in order to restrict mutations to those
532 /// which don't violate this independence.
533 ///
534 /// A RefSCC contains a DAG of actual SCCs. All the nodes within the RefSCC
535 /// are necessarily within some actual SCC that nests within it. Since
536 /// a direct call *is* a reference, there will always be at least one RefSCC
537 /// around any SCC.
538 class RefSCC {
539 friend class LazyCallGraph;
540 friend class LazyCallGraph::Node;
541
542 LazyCallGraph *G;
543
544 /// A postorder list of the inner SCCs.
545 SmallVector<SCC *, 4> SCCs;
546
547 /// A map from SCC to index in the postorder list.
548 SmallDenseMap<SCC *, int, 4> SCCIndices;
549
550 /// Fast-path constructor. RefSCCs should instead be constructed by calling
551 /// formRefSCCFast on the graph itself.
552 RefSCC(LazyCallGraph &G);
553
clear()554 void clear() {
555 SCCs.clear();
556 SCCIndices.clear();
557 }
558
559 /// Print a short description useful for debugging or logging.
560 ///
561 /// We print the SCCs wrapped in '[]'s and skipping the middle SCCs if
562 /// there are a large number.
563 //
564 // Note: this is defined inline to dodge issues with GCC's interpretation
565 // of enclosing namespaces for friend function declarations.
566 friend raw_ostream &operator<<(raw_ostream &OS, const RefSCC &RC) {
567 OS << '[';
568 int i = 0;
569 for (LazyCallGraph::SCC &C : RC) {
570 if (i > 0)
571 OS << ", ";
572 // Elide the inner elements if there are too many.
573 if (i > 4) {
574 OS << "..., " << *RC.SCCs.back();
575 break;
576 }
577 OS << C;
578 ++i;
579 }
580 OS << ']';
581 return OS;
582 }
583
584 /// Dump a short description of this RefSCC to stderr.
585 void dump() const;
586
587 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
588 /// Verify invariants about the RefSCC and all its SCCs.
589 ///
590 /// This will attempt to validate all of the invariants *within* the
591 /// RefSCC, but not that it is a strongly connected component of the larger
592 /// graph. This makes it useful even when partially through an update.
593 ///
594 /// Invariants checked:
595 /// - SCCs and their indices match.
596 /// - The SCCs list is in fact in post-order.
597 void verify();
598 #endif
599
600 public:
601 using iterator = pointee_iterator<SmallVectorImpl<SCC *>::const_iterator>;
602 using range = iterator_range<iterator>;
603 using parent_iterator =
604 pointee_iterator<SmallPtrSetImpl<RefSCC *>::const_iterator>;
605
begin()606 iterator begin() const { return SCCs.begin(); }
end()607 iterator end() const { return SCCs.end(); }
608
size()609 ssize_t size() const { return SCCs.size(); }
610
611 SCC &operator[](int Idx) { return *SCCs[Idx]; }
612
find(SCC & C)613 iterator find(SCC &C) const {
614 return SCCs.begin() + SCCIndices.find(&C)->second;
615 }
616
617 /// Test if this RefSCC is a parent of \a RC.
618 ///
619 /// CAUTION: This method walks every edge in the \c RefSCC, it can be very
620 /// expensive.
621 bool isParentOf(const RefSCC &RC) const;
622
623 /// Test if this RefSCC is an ancestor of \a RC.
624 ///
625 /// CAUTION: This method walks the directed graph of edges as far as
626 /// necessary to find a possible path to the argument. In the worst case
627 /// this may walk the entire graph and can be extremely expensive.
628 bool isAncestorOf(const RefSCC &RC) const;
629
630 /// Test if this RefSCC is a child of \a RC.
631 ///
632 /// CAUTION: This method walks every edge in the argument \c RefSCC, it can
633 /// be very expensive.
isChildOf(const RefSCC & RC)634 bool isChildOf(const RefSCC &RC) const { return RC.isParentOf(*this); }
635
636 /// Test if this RefSCC is a descendant of \a RC.
637 ///
638 /// CAUTION: This method walks the directed graph of edges as far as
639 /// necessary to find a possible path from the argument. In the worst case
640 /// this may walk the entire graph and can be extremely expensive.
isDescendantOf(const RefSCC & RC)641 bool isDescendantOf(const RefSCC &RC) const {
642 return RC.isAncestorOf(*this);
643 }
644
645 /// Provide a short name by printing this RefSCC to a std::string.
646 ///
647 /// This copes with the fact that we don't have a name per-se for an RefSCC
648 /// while still making the use of this in debugging and logging useful.
getName()649 std::string getName() const {
650 std::string Name;
651 raw_string_ostream OS(Name);
652 OS << *this;
653 OS.flush();
654 return Name;
655 }
656
657 ///@{
658 /// \name Mutation API
659 ///
660 /// These methods provide the core API for updating the call graph in the
661 /// presence of (potentially still in-flight) DFS-found RefSCCs and SCCs.
662 ///
663 /// Note that these methods sometimes have complex runtimes, so be careful
664 /// how you call them.
665
666 /// Make an existing internal ref edge into a call edge.
667 ///
668 /// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
669 /// If that happens, the optional callback \p MergedCB will be invoked (if
670 /// provided) on the SCCs being merged away prior to actually performing
671 /// the merge. Note that this will never include the target SCC as that
672 /// will be the SCC functions are merged into to resolve the cycle. Once
673 /// this function returns, these merged SCCs are not in a valid state but
674 /// the pointers will remain valid until destruction of the parent graph
675 /// instance for the purpose of clearing cached information. This function
676 /// also returns 'true' if a cycle was formed and some SCCs merged away as
677 /// a convenience.
678 ///
679 /// After this operation, both SourceN's SCC and TargetN's SCC may move
680 /// position within this RefSCC's postorder list. Any SCCs merged are
681 /// merged into the TargetN's SCC in order to preserve reachability analyses
682 /// which took place on that SCC.
683 bool switchInternalEdgeToCall(
684 Node &SourceN, Node &TargetN,
685 function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
686
687 /// Make an existing internal call edge between separate SCCs into a ref
688 /// edge.
689 ///
690 /// If SourceN and TargetN in separate SCCs within this RefSCC, changing
691 /// the call edge between them to a ref edge is a trivial operation that
692 /// does not require any structural changes to the call graph.
693 void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);
694
695 /// Make an existing internal call edge within a single SCC into a ref
696 /// edge.
697 ///
698 /// Since SourceN and TargetN are part of a single SCC, this SCC may be
699 /// split up due to breaking a cycle in the call edges that formed it. If
700 /// that happens, then this routine will insert new SCCs into the postorder
701 /// list *before* the SCC of TargetN (previously the SCC of both). This
702 /// preserves postorder as the TargetN can reach all of the other nodes by
703 /// definition of previously being in a single SCC formed by the cycle from
704 /// SourceN to TargetN.
705 ///
706 /// The newly added SCCs are added *immediately* and contiguously
707 /// prior to the TargetN SCC and return the range covering the new SCCs in
708 /// the RefSCC's postorder sequence. You can directly iterate the returned
709 /// range to observe all of the new SCCs in postorder.
710 ///
711 /// Note that if SourceN and TargetN are in separate SCCs, the simpler
712 /// routine `switchTrivialInternalEdgeToRef` should be used instead.
713 iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
714 Node &TargetN);
715
716 /// Make an existing outgoing ref edge into a call edge.
717 ///
718 /// Note that this is trivial as there are no cyclic impacts and there
719 /// remains a reference edge.
720 void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN);
721
722 /// Make an existing outgoing call edge into a ref edge.
723 ///
724 /// This is trivial as there are no cyclic impacts and there remains
725 /// a reference edge.
726 void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN);
727
728 /// Insert a ref edge from one node in this RefSCC to another in this
729 /// RefSCC.
730 ///
731 /// This is always a trivial operation as it doesn't change any part of the
732 /// graph structure besides connecting the two nodes.
733 ///
734 /// Note that we don't support directly inserting internal *call* edges
735 /// because that could change the graph structure and requires returning
736 /// information about what became invalid. As a consequence, the pattern
737 /// should be to first insert the necessary ref edge, and then to switch it
738 /// to a call edge if needed and handle any invalidation that results. See
739 /// the \c switchInternalEdgeToCall routine for details.
740 void insertInternalRefEdge(Node &SourceN, Node &TargetN);
741
742 /// Insert an edge whose parent is in this RefSCC and child is in some
743 /// child RefSCC.
744 ///
745 /// There must be an existing path from the \p SourceN to the \p TargetN.
746 /// This operation is inexpensive and does not change the set of SCCs and
747 /// RefSCCs in the graph.
748 void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
749
750 /// Insert an edge whose source is in a descendant RefSCC and target is in
751 /// this RefSCC.
752 ///
753 /// There must be an existing path from the target to the source in this
754 /// case.
755 ///
756 /// NB! This is has the potential to be a very expensive function. It
757 /// inherently forms a cycle in the prior RefSCC DAG and we have to merge
758 /// RefSCCs to resolve that cycle. But finding all of the RefSCCs which
759 /// participate in the cycle can in the worst case require traversing every
760 /// RefSCC in the graph. Every attempt is made to avoid that, but passes
761 /// must still exercise caution calling this routine repeatedly.
762 ///
763 /// Also note that this can only insert ref edges. In order to insert
764 /// a call edge, first insert a ref edge and then switch it to a call edge.
765 /// These are intentionally kept as separate interfaces because each step
766 /// of the operation invalidates a different set of data structures.
767 ///
768 /// This returns all the RefSCCs which were merged into the this RefSCC
769 /// (the target's). This allows callers to invalidate any cached
770 /// information.
771 ///
772 /// FIXME: We could possibly optimize this quite a bit for cases where the
773 /// caller and callee are very nearby in the graph. See comments in the
774 /// implementation for details, but that use case might impact users.
775 SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
776 Node &TargetN);
777
778 /// Remove an edge whose source is in this RefSCC and target is *not*.
779 ///
780 /// This removes an inter-RefSCC edge. All inter-RefSCC edges originating
781 /// from this SCC have been fully explored by any in-flight DFS graph
782 /// formation, so this is always safe to call once you have the source
783 /// RefSCC.
784 ///
785 /// This operation does not change the cyclic structure of the graph and so
786 /// is very inexpensive. It may change the connectivity graph of the SCCs
787 /// though, so be careful calling this while iterating over them.
788 void removeOutgoingEdge(Node &SourceN, Node &TargetN);
789
790 /// Remove a list of ref edges which are entirely within this RefSCC.
791 ///
792 /// Both the \a SourceN and all of the \a TargetNs must be within this
793 /// RefSCC. Removing these edges may break cycles that form this RefSCC and
794 /// thus this operation may change the RefSCC graph significantly. In
795 /// particular, this operation will re-form new RefSCCs based on the
796 /// remaining connectivity of the graph. The following invariants are
797 /// guaranteed to hold after calling this method:
798 ///
799 /// 1) If a ref-cycle remains after removal, it leaves this RefSCC intact
800 /// and in the graph. No new RefSCCs are built.
801 /// 2) Otherwise, this RefSCC will be dead after this call and no longer in
802 /// the graph or the postorder traversal of the call graph. Any iterator
803 /// pointing at this RefSCC will become invalid.
804 /// 3) All newly formed RefSCCs will be returned and the order of the
805 /// RefSCCs returned will be a valid postorder traversal of the new
806 /// RefSCCs.
807 /// 4) No RefSCC other than this RefSCC has its member set changed (this is
808 /// inherent in the definition of removing such an edge).
809 ///
810 /// These invariants are very important to ensure that we can build
811 /// optimization pipelines on top of the CGSCC pass manager which
812 /// intelligently update the RefSCC graph without invalidating other parts
813 /// of the RefSCC graph.
814 ///
815 /// Note that we provide no routine to remove a *call* edge. Instead, you
816 /// must first switch it to a ref edge using \c switchInternalEdgeToRef.
817 /// This split API is intentional as each of these two steps can invalidate
818 /// a different aspect of the graph structure and needs to have the
819 /// invalidation handled independently.
820 ///
821 /// The runtime complexity of this method is, in the worst case, O(V+E)
822 /// where V is the number of nodes in this RefSCC and E is the number of
823 /// edges leaving the nodes in this RefSCC. Note that E includes both edges
824 /// within this RefSCC and edges from this RefSCC to child RefSCCs. Some
825 /// effort has been made to minimize the overhead of common cases such as
826 /// self-edges and edge removals which result in a spanning tree with no
827 /// more cycles.
828 SmallVector<RefSCC *, 1> removeInternalRefEdge(Node &SourceN,
829 ArrayRef<Node *> TargetNs);
830
831 /// A convenience wrapper around the above to handle trivial cases of
832 /// inserting a new call edge.
833 ///
834 /// This is trivial whenever the target is in the same SCC as the source or
835 /// the edge is an outgoing edge to some descendant SCC. In these cases
836 /// there is no change to the cyclic structure of SCCs or RefSCCs.
837 ///
838 /// To further make calling this convenient, it also handles inserting
839 /// already existing edges.
840 void insertTrivialCallEdge(Node &SourceN, Node &TargetN);
841
842 /// A convenience wrapper around the above to handle trivial cases of
843 /// inserting a new ref edge.
844 ///
845 /// This is trivial whenever the target is in the same RefSCC as the source
846 /// or the edge is an outgoing edge to some descendant RefSCC. In these
847 /// cases there is no change to the cyclic structure of the RefSCCs.
848 ///
849 /// To further make calling this convenient, it also handles inserting
850 /// already existing edges.
851 void insertTrivialRefEdge(Node &SourceN, Node &TargetN);
852
853 /// Directly replace a node's function with a new function.
854 ///
855 /// This should be used when moving the body and users of a function to
856 /// a new formal function object but not otherwise changing the call graph
857 /// structure in any way.
858 ///
859 /// It requires that the old function in the provided node have zero uses
860 /// and the new function must have calls and references to it establishing
861 /// an equivalent graph.
862 void replaceNodeFunction(Node &N, Function &NewF);
863
864 ///@}
865 };
866
867 /// A post-order depth-first RefSCC iterator over the call graph.
868 ///
869 /// This iterator walks the cached post-order sequence of RefSCCs. However,
870 /// it trades stability for flexibility. It is restricted to a forward
871 /// iterator but will survive mutations which insert new RefSCCs and continue
872 /// to point to the same RefSCC even if it moves in the post-order sequence.
873 class postorder_ref_scc_iterator
874 : public iterator_facade_base<postorder_ref_scc_iterator,
875 std::forward_iterator_tag, RefSCC> {
876 friend class LazyCallGraph;
877 friend class LazyCallGraph::Node;
878
879 /// Nonce type to select the constructor for the end iterator.
880 struct IsAtEndT {};
881
882 LazyCallGraph *G;
883 RefSCC *RC = nullptr;
884
885 /// Build the begin iterator for a node.
postorder_ref_scc_iterator(LazyCallGraph & G)886 postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {}
887
888 /// Build the end iterator for a node. This is selected purely by overload.
postorder_ref_scc_iterator(LazyCallGraph & G,IsAtEndT)889 postorder_ref_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/) : G(&G) {}
890
891 /// Get the post-order RefSCC at the given index of the postorder walk,
892 /// populating it if necessary.
getRC(LazyCallGraph & G,int Index)893 static RefSCC *getRC(LazyCallGraph &G, int Index) {
894 if (Index == (int)G.PostOrderRefSCCs.size())
895 // We're at the end.
896 return nullptr;
897
898 return G.PostOrderRefSCCs[Index];
899 }
900
901 public:
902 bool operator==(const postorder_ref_scc_iterator &Arg) const {
903 return G == Arg.G && RC == Arg.RC;
904 }
905
906 reference operator*() const { return *RC; }
907
908 using iterator_facade_base::operator++;
909 postorder_ref_scc_iterator &operator++() {
910 assert(RC && "Cannot increment the end iterator!");
911 RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
912 return *this;
913 }
914 };
915
916 /// Construct a graph for the given module.
917 ///
918 /// This sets up the graph and computes all of the entry points of the graph.
919 /// No function definitions are scanned until their nodes in the graph are
920 /// requested during traversal.
921 LazyCallGraph(Module &M,
922 function_ref<TargetLibraryInfo &(Function &)> GetTLI);
923
924 LazyCallGraph(LazyCallGraph &&G);
925 LazyCallGraph &operator=(LazyCallGraph &&RHS);
926
927 bool invalidate(Module &, const PreservedAnalyses &PA,
928 ModuleAnalysisManager::Invalidator &);
929
begin()930 EdgeSequence::iterator begin() { return EntryEdges.begin(); }
end()931 EdgeSequence::iterator end() { return EntryEdges.end(); }
932
933 void buildRefSCCs();
934
postorder_ref_scc_begin()935 postorder_ref_scc_iterator postorder_ref_scc_begin() {
936 if (!EntryEdges.empty())
937 assert(!PostOrderRefSCCs.empty() &&
938 "Must form RefSCCs before iterating them!");
939 return postorder_ref_scc_iterator(*this);
940 }
postorder_ref_scc_end()941 postorder_ref_scc_iterator postorder_ref_scc_end() {
942 if (!EntryEdges.empty())
943 assert(!PostOrderRefSCCs.empty() &&
944 "Must form RefSCCs before iterating them!");
945 return postorder_ref_scc_iterator(*this,
946 postorder_ref_scc_iterator::IsAtEndT());
947 }
948
postorder_ref_sccs()949 iterator_range<postorder_ref_scc_iterator> postorder_ref_sccs() {
950 return make_range(postorder_ref_scc_begin(), postorder_ref_scc_end());
951 }
952
953 /// Lookup a function in the graph which has already been scanned and added.
lookup(const Function & F)954 Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
955
956 /// Lookup a function's SCC in the graph.
957 ///
958 /// \returns null if the function hasn't been assigned an SCC via the RefSCC
959 /// iterator walk.
lookupSCC(Node & N)960 SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
961
962 /// Lookup a function's RefSCC in the graph.
963 ///
964 /// \returns null if the function hasn't been assigned a RefSCC via the
965 /// RefSCC iterator walk.
lookupRefSCC(Node & N)966 RefSCC *lookupRefSCC(Node &N) const {
967 if (SCC *C = lookupSCC(N))
968 return &C->getOuterRefSCC();
969
970 return nullptr;
971 }
972
973 /// Get a graph node for a given function, scanning it to populate the graph
974 /// data as necessary.
get(Function & F)975 Node &get(Function &F) {
976 Node *&N = NodeMap[&F];
977 if (N)
978 return *N;
979
980 return insertInto(F, N);
981 }
982
983 /// Get the sequence of known and defined library functions.
984 ///
985 /// These functions, because they are known to LLVM, can have calls
986 /// introduced out of thin air from arbitrary IR.
getLibFunctions()987 ArrayRef<Function *> getLibFunctions() const {
988 return LibFunctions.getArrayRef();
989 }
990
991 /// Test whether a function is a known and defined library function tracked by
992 /// the call graph.
993 ///
994 /// Because these functions are known to LLVM they are specially modeled in
995 /// the call graph and even when all IR-level references have been removed
996 /// remain active and reachable.
isLibFunction(Function & F)997 bool isLibFunction(Function &F) const { return LibFunctions.count(&F); }
998
999 ///@{
1000 /// \name Pre-SCC Mutation API
1001 ///
1002 /// These methods are only valid to call prior to forming any SCCs for this
1003 /// call graph. They can be used to update the core node-graph during
1004 /// a node-based inorder traversal that precedes any SCC-based traversal.
1005 ///
1006 /// Once you begin manipulating a call graph's SCCs, most mutation of the
1007 /// graph must be performed via a RefSCC method. There are some exceptions
1008 /// below.
1009
1010 /// Update the call graph after inserting a new edge.
1011 void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
1012
1013 /// Update the call graph after inserting a new edge.
insertEdge(Function & Source,Function & Target,Edge::Kind EK)1014 void insertEdge(Function &Source, Function &Target, Edge::Kind EK) {
1015 return insertEdge(get(Source), get(Target), EK);
1016 }
1017
1018 /// Update the call graph after deleting an edge.
1019 void removeEdge(Node &SourceN, Node &TargetN);
1020
1021 /// Update the call graph after deleting an edge.
removeEdge(Function & Source,Function & Target)1022 void removeEdge(Function &Source, Function &Target) {
1023 return removeEdge(get(Source), get(Target));
1024 }
1025
1026 ///@}
1027
1028 ///@{
1029 /// \name General Mutation API
1030 ///
1031 /// There are a very limited set of mutations allowed on the graph as a whole
1032 /// once SCCs have started to be formed. These routines have strict contracts
1033 /// but may be called at any point.
1034
1035 /// Remove a dead function from the call graph (typically to delete it).
1036 ///
1037 /// Note that the function must have an empty use list, and the call graph
1038 /// must be up-to-date prior to calling this. That means it is by itself in
1039 /// a maximal SCC which is by itself in a maximal RefSCC, etc. No structural
1040 /// changes result from calling this routine other than potentially removing
1041 /// entry points into the call graph.
1042 ///
1043 /// If SCC formation has begun, this function must not be part of the current
1044 /// DFS in order to call this safely. Typically, the function will have been
1045 /// fully visited by the DFS prior to calling this routine.
1046 void removeDeadFunction(Function &F);
1047
1048 /// Add a new function split/outlined from an existing function.
1049 ///
1050 /// The new function may only reference other functions that the original
1051 /// function did.
1052 ///
1053 /// The original function must reference (either directly or indirectly) the
1054 /// new function.
1055 ///
1056 /// The new function may also reference the original function.
1057 /// It may end up in a parent SCC in the case that the original function's
1058 /// edge to the new function is a ref edge, and the edge back is a call edge.
1059 void addSplitFunction(Function &OriginalFunction, Function &NewFunction);
1060
1061 /// Add new ref-recursive functions split/outlined from an existing function.
1062 ///
1063 /// The new functions may only reference other functions that the original
1064 /// function did. The new functions may reference (not call) the original
1065 /// function.
1066 ///
1067 /// The original function must reference (not call) all new functions.
1068 /// All new functions must reference (not call) each other.
1069 void addSplitRefRecursiveFunctions(Function &OriginalFunction,
1070 ArrayRef<Function *> NewFunctions);
1071
1072 ///@}
1073
1074 ///@{
1075 /// \name Static helpers for code doing updates to the call graph.
1076 ///
1077 /// These helpers are used to implement parts of the call graph but are also
1078 /// useful to code doing updates or otherwise wanting to walk the IR in the
1079 /// same patterns as when we build the call graph.
1080
1081 /// Recursively visits the defined functions whose address is reachable from
1082 /// every constant in the \p Worklist.
1083 ///
1084 /// Doesn't recurse through any constants already in the \p Visited set, and
1085 /// updates that set with every constant visited.
1086 ///
1087 /// For each defined function, calls \p Callback with that function.
1088 template <typename CallbackT>
visitReferences(SmallVectorImpl<Constant * > & Worklist,SmallPtrSetImpl<Constant * > & Visited,CallbackT Callback)1089 static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
1090 SmallPtrSetImpl<Constant *> &Visited,
1091 CallbackT Callback) {
1092 while (!Worklist.empty()) {
1093 Constant *C = Worklist.pop_back_val();
1094
1095 if (Function *F = dyn_cast<Function>(C)) {
1096 if (!F->isDeclaration())
1097 Callback(*F);
1098 continue;
1099 }
1100
1101 // blockaddresses are weird and don't participate in the call graph anyway,
1102 // skip them.
1103 if (isa<BlockAddress>(C))
1104 continue;
1105
1106 for (Value *Op : C->operand_values())
1107 if (Visited.insert(cast<Constant>(Op)).second)
1108 Worklist.push_back(cast<Constant>(Op));
1109 }
1110 }
1111
1112 ///@}
1113
1114 private:
1115 using node_stack_iterator = SmallVectorImpl<Node *>::reverse_iterator;
1116 using node_stack_range = iterator_range<node_stack_iterator>;
1117
1118 /// Allocator that holds all the call graph nodes.
1119 SpecificBumpPtrAllocator<Node> BPA;
1120
1121 /// Maps function->node for fast lookup.
1122 DenseMap<const Function *, Node *> NodeMap;
1123
1124 /// The entry edges into the graph.
1125 ///
1126 /// These edges are from "external" sources. Put another way, they
1127 /// escape at the module scope.
1128 EdgeSequence EntryEdges;
1129
1130 /// Allocator that holds all the call graph SCCs.
1131 SpecificBumpPtrAllocator<SCC> SCCBPA;
1132
1133 /// Maps Function -> SCC for fast lookup.
1134 DenseMap<Node *, SCC *> SCCMap;
1135
1136 /// Allocator that holds all the call graph RefSCCs.
1137 SpecificBumpPtrAllocator<RefSCC> RefSCCBPA;
1138
1139 /// The post-order sequence of RefSCCs.
1140 ///
1141 /// This list is lazily formed the first time we walk the graph.
1142 SmallVector<RefSCC *, 16> PostOrderRefSCCs;
1143
1144 /// A map from RefSCC to the index for it in the postorder sequence of
1145 /// RefSCCs.
1146 DenseMap<RefSCC *, int> RefSCCIndices;
1147
1148 /// Defined functions that are also known library functions which the
1149 /// optimizer can reason about and therefore might introduce calls to out of
1150 /// thin air.
1151 SmallSetVector<Function *, 4> LibFunctions;
1152
1153 /// Helper to insert a new function, with an already looked-up entry in
1154 /// the NodeMap.
1155 Node &insertInto(Function &F, Node *&MappedN);
1156
1157 /// Helper to initialize a new node created outside of creating SCCs and add
1158 /// it to the NodeMap if necessary. For example, useful when a function is
1159 /// split.
1160 Node &initNode(Function &F);
1161
1162 /// Helper to update pointers back to the graph object during moves.
1163 void updateGraphPtrs();
1164
1165 /// Allocates an SCC and constructs it using the graph allocator.
1166 ///
1167 /// The arguments are forwarded to the constructor.
createSCC(Ts &&...Args)1168 template <typename... Ts> SCC *createSCC(Ts &&... Args) {
1169 return new (SCCBPA.Allocate()) SCC(std::forward<Ts>(Args)...);
1170 }
1171
1172 /// Allocates a RefSCC and constructs it using the graph allocator.
1173 ///
1174 /// The arguments are forwarded to the constructor.
createRefSCC(Ts &&...Args)1175 template <typename... Ts> RefSCC *createRefSCC(Ts &&... Args) {
1176 return new (RefSCCBPA.Allocate()) RefSCC(std::forward<Ts>(Args)...);
1177 }
1178
1179 /// Common logic for building SCCs from a sequence of roots.
1180 ///
1181 /// This is a very generic implementation of the depth-first walk and SCC
1182 /// formation algorithm. It uses a generic sequence of roots and generic
1183 /// callbacks for each step. This is designed to be used to implement both
1184 /// the RefSCC formation and SCC formation with shared logic.
1185 ///
1186 /// Currently this is a relatively naive implementation of Tarjan's DFS
1187 /// algorithm to form the SCCs.
1188 ///
1189 /// FIXME: We should consider newer variants such as Nuutila.
1190 template <typename RootsT, typename GetBeginT, typename GetEndT,
1191 typename GetNodeT, typename FormSCCCallbackT>
1192 static void buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
1193 GetEndT &&GetEnd, GetNodeT &&GetNode,
1194 FormSCCCallbackT &&FormSCC);
1195
1196 /// Build the SCCs for a RefSCC out of a list of nodes.
1197 void buildSCCs(RefSCC &RC, node_stack_range Nodes);
1198
1199 /// Get the index of a RefSCC within the postorder traversal.
1200 ///
1201 /// Requires that this RefSCC is a valid one in the (perhaps partial)
1202 /// postorder traversed part of the graph.
getRefSCCIndex(RefSCC & RC)1203 int getRefSCCIndex(RefSCC &RC) {
1204 auto IndexIt = RefSCCIndices.find(&RC);
1205 assert(IndexIt != RefSCCIndices.end() && "RefSCC doesn't have an index!");
1206 assert(PostOrderRefSCCs[IndexIt->second] == &RC &&
1207 "Index does not point back at RC!");
1208 return IndexIt->second;
1209 }
1210 };
1211
Edge()1212 inline LazyCallGraph::Edge::Edge() : Value() {}
Edge(Node & N,Kind K)1213 inline LazyCallGraph::Edge::Edge(Node &N, Kind K) : Value(&N, K) {}
1214
1215 inline LazyCallGraph::Edge::operator bool() const {
1216 return Value.getPointer() && !Value.getPointer()->isDead();
1217 }
1218
getKind()1219 inline LazyCallGraph::Edge::Kind LazyCallGraph::Edge::getKind() const {
1220 assert(*this && "Queried a null edge!");
1221 return Value.getInt();
1222 }
1223
isCall()1224 inline bool LazyCallGraph::Edge::isCall() const {
1225 assert(*this && "Queried a null edge!");
1226 return getKind() == Call;
1227 }
1228
getNode()1229 inline LazyCallGraph::Node &LazyCallGraph::Edge::getNode() const {
1230 assert(*this && "Queried a null edge!");
1231 return *Value.getPointer();
1232 }
1233
getFunction()1234 inline Function &LazyCallGraph::Edge::getFunction() const {
1235 assert(*this && "Queried a null edge!");
1236 return getNode().getFunction();
1237 }
1238
1239 // Provide GraphTraits specializations for call graphs.
1240 template <> struct GraphTraits<LazyCallGraph::Node *> {
1241 using NodeRef = LazyCallGraph::Node *;
1242 using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1243
1244 static NodeRef getEntryNode(NodeRef N) { return N; }
1245 static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1246 static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1247 };
1248 template <> struct GraphTraits<LazyCallGraph *> {
1249 using NodeRef = LazyCallGraph::Node *;
1250 using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1251
1252 static NodeRef getEntryNode(NodeRef N) { return N; }
1253 static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1254 static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1255 };
1256
1257 /// An analysis pass which computes the call graph for a module.
1258 class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
1259 friend AnalysisInfoMixin<LazyCallGraphAnalysis>;
1260
1261 static AnalysisKey Key;
1262
1263 public:
1264 /// Inform generic clients of the result type.
1265 using Result = LazyCallGraph;
1266
1267 /// Compute the \c LazyCallGraph for the module \c M.
1268 ///
1269 /// This just builds the set of entry points to the call graph. The rest is
1270 /// built lazily as it is walked.
1271 LazyCallGraph run(Module &M, ModuleAnalysisManager &AM) {
1272 FunctionAnalysisManager &FAM =
1273 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1274 auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
1275 return FAM.getResult<TargetLibraryAnalysis>(F);
1276 };
1277 return LazyCallGraph(M, GetTLI);
1278 }
1279 };
1280
1281 /// A pass which prints the call graph to a \c raw_ostream.
1282 ///
1283 /// This is primarily useful for testing the analysis.
1284 class LazyCallGraphPrinterPass
1285 : public PassInfoMixin<LazyCallGraphPrinterPass> {
1286 raw_ostream &OS;
1287
1288 public:
1289 explicit LazyCallGraphPrinterPass(raw_ostream &OS);
1290
1291 PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1292 };
1293
1294 /// A pass which prints the call graph as a DOT file to a \c raw_ostream.
1295 ///
1296 /// This is primarily useful for visualization purposes.
1297 class LazyCallGraphDOTPrinterPass
1298 : public PassInfoMixin<LazyCallGraphDOTPrinterPass> {
1299 raw_ostream &OS;
1300
1301 public:
1302 explicit LazyCallGraphDOTPrinterPass(raw_ostream &OS);
1303
1304 PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1305 };
1306
1307 } // end namespace llvm
1308
1309 #endif // LLVM_ANALYSIS_LAZYCALLGRAPH_H
1310