xref: /llvm-project/llvm/include/llvm/Analysis/LazyCallGraph.h (revision b8fddca7bdb354d51e340c60aafe3dff1b35a195)
1 //===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// Implements a lazy call graph analysis and related passes for the new pass
11 /// manager.
12 ///
13 /// NB: This is *not* a traditional call graph! It is a graph which models both
14 /// the current calls and potential calls. As a consequence there are many
15 /// edges in this call graph that do not correspond to a 'call' or 'invoke'
16 /// instruction.
17 ///
18 /// The primary use cases of this graph analysis is to facilitate iterating
19 /// across the functions of a module in ways that ensure all callees are
20 /// visited prior to a caller (given any SCC constraints), or vice versa. As
21 /// such is it particularly well suited to organizing CGSCC optimizations such
22 /// as inlining, outlining, argument promotion, etc. That is its primary use
23 /// case and motivates the design. It may not be appropriate for other
24 /// purposes. The use graph of functions or some other conservative analysis of
25 /// call instructions may be interesting for optimizations and subsequent
26 /// analyses which don't work in the context of an overly specified
27 /// potential-call-edge graph.
28 ///
29 /// To understand the specific rules and nature of this call graph analysis,
30 /// see the documentation of the \c LazyCallGraph below.
31 ///
32 //===----------------------------------------------------------------------===//
33 
34 #ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
35 #define LLVM_ANALYSIS_LAZYCALLGRAPH_H
36 
37 #include "llvm/ADT/Any.h"
38 #include "llvm/ADT/ArrayRef.h"
39 #include "llvm/ADT/DenseMap.h"
40 #include "llvm/ADT/PointerIntPair.h"
41 #include "llvm/ADT/SetVector.h"
42 #include "llvm/ADT/SmallVector.h"
43 #include "llvm/ADT/StringRef.h"
44 #include "llvm/ADT/iterator.h"
45 #include "llvm/ADT/iterator_range.h"
46 #include "llvm/Analysis/TargetLibraryInfo.h"
47 #include "llvm/IR/PassManager.h"
48 #include "llvm/Support/Allocator.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include <cassert>
51 #include <iterator>
52 #include <optional>
53 #include <string>
54 #include <utility>
55 
56 namespace llvm {
57 
58 class Constant;
59 template <class GraphType> struct GraphTraits;
60 class Module;
61 
62 /// A lazily constructed view of the call graph of a module.
63 ///
64 /// With the edges of this graph, the motivating constraint that we are
65 /// attempting to maintain is that function-local optimization, CGSCC-local
66 /// optimizations, and optimizations transforming a pair of functions connected
67 /// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
68 /// DAG. That is, no optimizations will delete, remove, or add an edge such
69 /// that functions already visited in a bottom-up order of the SCC DAG are no
70 /// longer valid to have visited, or such that functions not yet visited in
71 /// a bottom-up order of the SCC DAG are not required to have already been
72 /// visited.
73 ///
74 /// Within this constraint, the desire is to minimize the merge points of the
75 /// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
76 /// in the SCC DAG, the more independence there is in optimizing within it.
77 /// There is a strong desire to enable parallelization of optimizations over
78 /// the call graph, and both limited fanout and merge points will (artificially
79 /// in some cases) limit the scaling of such an effort.
80 ///
81 /// To this end, graph represents both direct and any potential resolution to
82 /// an indirect call edge. Another way to think about it is that it represents
83 /// both the direct call edges and any direct call edges that might be formed
84 /// through static optimizations. Specifically, it considers taking the address
85 /// of a function to be an edge in the call graph because this might be
86 /// forwarded to become a direct call by some subsequent function-local
87 /// optimization. The result is that the graph closely follows the use-def
88 /// edges for functions. Walking "up" the graph can be done by looking at all
89 /// of the uses of a function.
90 ///
91 /// The roots of the call graph are the external functions and functions
92 /// escaped into global variables. Those functions can be called from outside
93 /// of the module or via unknowable means in the IR -- we may not be able to
94 /// form even a potential call edge from a function body which may dynamically
95 /// load the function and call it.
96 ///
97 /// This analysis still requires updates to remain valid after optimizations
98 /// which could potentially change the set of potential callees. The
99 /// constraints it operates under only make the traversal order remain valid.
100 ///
101 /// The entire analysis must be re-computed if full interprocedural
102 /// optimizations run at any point. For example, globalopt completely
103 /// invalidates the information in this analysis.
104 ///
105 /// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
106 /// it from the existing CallGraph. At some point, it is expected that this
107 /// will be the only call graph and it will be renamed accordingly.
108 class LazyCallGraph {
109 public:
110   class Node;
111   class EdgeSequence;
112   class RefSCC;
113 
114   /// A class used to represent edges in the call graph.
115   ///
116   /// The lazy call graph models both *call* edges and *reference* edges. Call
117   /// edges are much what you would expect, and exist when there is a 'call' or
118   /// 'invoke' instruction of some function. Reference edges are also tracked
119   /// along side these, and exist whenever any instruction (transitively
120   /// through its operands) references a function. All call edges are
121   /// inherently reference edges, and so the reference graph forms a superset
122   /// of the formal call graph.
123   ///
124   /// All of these forms of edges are fundamentally represented as outgoing
125   /// edges. The edges are stored in the source node and point at the target
126   /// node. This allows the edge structure itself to be a very compact data
127   /// structure: essentially a tagged pointer.
128   class Edge {
129   public:
130     /// The kind of edge in the graph.
131     enum Kind : bool { Ref = false, Call = true };
132 
133     Edge();
134     explicit Edge(Node &N, Kind K);
135 
136     /// Test whether the edge is null.
137     ///
138     /// This happens when an edge has been deleted. We leave the edge objects
139     /// around but clear them.
140     explicit operator bool() const;
141 
142     /// Returns the \c Kind of the edge.
143     Kind getKind() const;
144 
145     /// Test whether the edge represents a direct call to a function.
146     ///
147     /// This requires that the edge is not null.
148     bool isCall() const;
149 
150     /// Get the call graph node referenced by this edge.
151     ///
152     /// This requires that the edge is not null.
153     Node &getNode() const;
154 
155     /// Get the function referenced by this edge.
156     ///
157     /// This requires that the edge is not null.
158     Function &getFunction() const;
159 
160   private:
161     friend class LazyCallGraph::EdgeSequence;
162     friend class LazyCallGraph::RefSCC;
163 
164     PointerIntPair<Node *, 1, Kind> Value;
165 
166     void setKind(Kind K) { Value.setInt(K); }
167   };
168 
169   /// The edge sequence object.
170   ///
171   /// This typically exists entirely within the node but is exposed as
172   /// a separate type because a node doesn't initially have edges. An explicit
173   /// population step is required to produce this sequence at first and it is
174   /// then cached in the node. It is also used to represent edges entering the
175   /// graph from outside the module to model the graph's roots.
176   ///
177   /// The sequence itself both iterable and indexable. The indexes remain
178   /// stable even as the sequence mutates (including removal).
179   class EdgeSequence {
180     friend class LazyCallGraph;
181     friend class LazyCallGraph::Node;
182     friend class LazyCallGraph::RefSCC;
183 
184     using VectorT = SmallVector<Edge, 4>;
185     using VectorImplT = SmallVectorImpl<Edge>;
186 
187   public:
188     /// An iterator used for the edges to both entry nodes and child nodes.
189     class iterator
190         : public iterator_adaptor_base<iterator, VectorImplT::iterator,
191                                        std::forward_iterator_tag> {
192       friend class LazyCallGraph;
193       friend class LazyCallGraph::Node;
194 
195       VectorImplT::iterator E;
196 
197       // Build the iterator for a specific position in the edge list.
198       iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
199           : iterator_adaptor_base(BaseI), E(E) {
200         while (I != E && !*I)
201           ++I;
202       }
203 
204     public:
205       iterator() = default;
206 
207       using iterator_adaptor_base::operator++;
208       iterator &operator++() {
209         do {
210           ++I;
211         } while (I != E && !*I);
212         return *this;
213       }
214     };
215 
216     /// An iterator over specifically call edges.
217     ///
218     /// This has the same iteration properties as the \c iterator, but
219     /// restricts itself to edges which represent actual calls.
220     class call_iterator
221         : public iterator_adaptor_base<call_iterator, VectorImplT::iterator,
222                                        std::forward_iterator_tag> {
223       friend class LazyCallGraph;
224       friend class LazyCallGraph::Node;
225 
226       VectorImplT::iterator E;
227 
228       /// Advance the iterator to the next valid, call edge.
229       void advanceToNextEdge() {
230         while (I != E && (!*I || !I->isCall()))
231           ++I;
232       }
233 
234       // Build the iterator for a specific position in the edge list.
235       call_iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
236           : iterator_adaptor_base(BaseI), E(E) {
237         advanceToNextEdge();
238       }
239 
240     public:
241       call_iterator() = default;
242 
243       using iterator_adaptor_base::operator++;
244       call_iterator &operator++() {
245         ++I;
246         advanceToNextEdge();
247         return *this;
248       }
249     };
250 
251     iterator begin() { return iterator(Edges.begin(), Edges.end()); }
252     iterator end() { return iterator(Edges.end(), Edges.end()); }
253 
254     Edge &operator[](Node &N) {
255       assert(EdgeIndexMap.contains(&N) && "No such edge!");
256       auto &E = Edges[EdgeIndexMap.find(&N)->second];
257       assert(E && "Dead or null edge!");
258       return E;
259     }
260 
261     Edge *lookup(Node &N) {
262       auto EI = EdgeIndexMap.find(&N);
263       if (EI == EdgeIndexMap.end())
264         return nullptr;
265       auto &E = Edges[EI->second];
266       return E ? &E : nullptr;
267     }
268 
269     call_iterator call_begin() {
270       return call_iterator(Edges.begin(), Edges.end());
271     }
272     call_iterator call_end() { return call_iterator(Edges.end(), Edges.end()); }
273 
274     iterator_range<call_iterator> calls() {
275       return make_range(call_begin(), call_end());
276     }
277 
278     bool empty() {
279       for (auto &E : Edges)
280         if (E)
281           return false;
282 
283       return true;
284     }
285 
286   private:
287     VectorT Edges;
288     DenseMap<Node *, int> EdgeIndexMap;
289 
290     EdgeSequence() = default;
291 
292     /// Internal helper to insert an edge to a node.
293     void insertEdgeInternal(Node &ChildN, Edge::Kind EK);
294 
295     /// Internal helper to change an edge kind.
296     void setEdgeKind(Node &ChildN, Edge::Kind EK);
297 
298     /// Internal helper to remove the edge to the given function.
299     bool removeEdgeInternal(Node &ChildN);
300   };
301 
302   /// A node in the call graph.
303   ///
304   /// This represents a single node. Its primary roles are to cache the list of
305   /// callees, de-duplicate and provide fast testing of whether a function is a
306   /// callee, and facilitate iteration of child nodes in the graph.
307   ///
308   /// The node works much like an optional in order to lazily populate the
309   /// edges of each node. Until populated, there are no edges. Once populated,
310   /// you can access the edges by dereferencing the node or using the `->`
311   /// operator as if the node was an `std::optional<EdgeSequence>`.
312   class Node {
313     friend class LazyCallGraph;
314     friend class LazyCallGraph::RefSCC;
315 
316   public:
317     LazyCallGraph &getGraph() const { return *G; }
318 
319     Function &getFunction() const { return *F; }
320 
321     StringRef getName() const { return F->getName(); }
322 
323     /// Equality is defined as address equality.
324     bool operator==(const Node &N) const { return this == &N; }
325     bool operator!=(const Node &N) const { return !operator==(N); }
326 
327     /// Tests whether the node has been populated with edges.
328     bool isPopulated() const { return Edges.has_value(); }
329 
330     /// Tests whether this is actually a dead node and no longer valid.
331     ///
332     /// Users rarely interact with nodes in this state and other methods are
333     /// invalid. This is used to model a node in an edge list where the
334     /// function has been completely removed.
335     bool isDead() const {
336       assert(!G == !F &&
337              "Both graph and function pointers should be null or non-null.");
338       return !G;
339     }
340 
341     // We allow accessing the edges by dereferencing or using the arrow
342     // operator, essentially wrapping the internal optional.
343     EdgeSequence &operator*() const {
344       // Rip const off because the node itself isn't changing here.
345       return const_cast<EdgeSequence &>(*Edges);
346     }
347     EdgeSequence *operator->() const { return &**this; }
348 
349     /// Populate the edges of this node if necessary.
350     ///
351     /// The first time this is called it will populate the edges for this node
352     /// in the graph. It does this by scanning the underlying function, so once
353     /// this is done, any changes to that function must be explicitly reflected
354     /// in updates to the graph.
355     ///
356     /// \returns the populated \c EdgeSequence to simplify walking it.
357     ///
358     /// This will not update or re-scan anything if called repeatedly. Instead,
359     /// the edge sequence is cached and returned immediately on subsequent
360     /// calls.
361     EdgeSequence &populate() {
362       if (Edges)
363         return *Edges;
364 
365       return populateSlow();
366     }
367 
368   private:
369     LazyCallGraph *G;
370     Function *F;
371 
372     // We provide for the DFS numbering and Tarjan walk lowlink numbers to be
373     // stored directly within the node. These are both '-1' when nodes are part
374     // of an SCC (or RefSCC), or '0' when not yet reached in a DFS walk.
375     int DFSNumber = 0;
376     int LowLink = 0;
377 
378     std::optional<EdgeSequence> Edges;
379 
380     /// Basic constructor implements the scanning of F into Edges and
381     /// EdgeIndexMap.
382     Node(LazyCallGraph &G, Function &F) : G(&G), F(&F) {}
383 
384     /// Implementation of the scan when populating.
385     EdgeSequence &populateSlow();
386 
387     /// Internal helper to directly replace the function with a new one.
388     ///
389     /// This is used to facilitate transformations which need to replace the
390     /// formal Function object but directly move the body and users from one to
391     /// the other.
392     void replaceFunction(Function &NewF);
393 
394     void clear() { Edges.reset(); }
395 
396     /// Print the name of this node's function.
397     friend raw_ostream &operator<<(raw_ostream &OS, const Node &N) {
398       return OS << N.F->getName();
399     }
400 
401     /// Dump the name of this node's function to stderr.
402     void dump() const;
403   };
404 
405   /// An SCC of the call graph.
406   ///
407   /// This represents a Strongly Connected Component of the direct call graph
408   /// -- ignoring indirect calls and function references. It stores this as
409   /// a collection of call graph nodes. While the order of nodes in the SCC is
410   /// stable, it is not any particular order.
411   ///
412   /// The SCCs are nested within a \c RefSCC, see below for details about that
413   /// outer structure. SCCs do not support mutation of the call graph, that
414   /// must be done through the containing \c RefSCC in order to fully reason
415   /// about the ordering and connections of the graph.
416   class LLVM_ABI SCC {
417     friend class LazyCallGraph;
418     friend class LazyCallGraph::Node;
419 
420     RefSCC *OuterRefSCC;
421     SmallVector<Node *, 1> Nodes;
422 
423     template <typename NodeRangeT>
424     SCC(RefSCC &OuterRefSCC, NodeRangeT &&Nodes)
425         : OuterRefSCC(&OuterRefSCC), Nodes(std::forward<NodeRangeT>(Nodes)) {}
426 
427     void clear() {
428       OuterRefSCC = nullptr;
429       Nodes.clear();
430     }
431 
432     /// Print a short description useful for debugging or logging.
433     ///
434     /// We print the function names in the SCC wrapped in '()'s and skipping
435     /// the middle functions if there are a large number.
436     //
437     // Note: this is defined inline to dodge issues with GCC's interpretation
438     // of enclosing namespaces for friend function declarations.
439     friend raw_ostream &operator<<(raw_ostream &OS, const SCC &C) {
440       OS << '(';
441       int I = 0;
442       for (LazyCallGraph::Node &N : C) {
443         if (I > 0)
444           OS << ", ";
445         // Elide the inner elements if there are too many.
446         if (I > 8) {
447           OS << "..., " << *C.Nodes.back();
448           break;
449         }
450         OS << N;
451         ++I;
452       }
453       OS << ')';
454       return OS;
455     }
456 
457     /// Dump a short description of this SCC to stderr.
458     void dump() const;
459 
460 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
461     /// Verify invariants about the SCC.
462     ///
463     /// This will attempt to validate all of the basic invariants within an
464     /// SCC, but not that it is a strongly connected component per se.
465     /// Primarily useful while building and updating the graph to check that
466     /// basic properties are in place rather than having inexplicable crashes
467     /// later.
468     void verify();
469 #endif
470 
471   public:
472     using iterator = pointee_iterator<SmallVectorImpl<Node *>::const_iterator>;
473 
474     iterator begin() const { return Nodes.begin(); }
475     iterator end() const { return Nodes.end(); }
476 
477     int size() const { return Nodes.size(); }
478 
479     RefSCC &getOuterRefSCC() const { return *OuterRefSCC; }
480 
481     /// Test if this SCC is a parent of \a C.
482     ///
483     /// Note that this is linear in the number of edges departing the current
484     /// SCC.
485     bool isParentOf(const SCC &C) const;
486 
487     /// Test if this SCC is an ancestor of \a C.
488     ///
489     /// Note that in the worst case this is linear in the number of edges
490     /// departing the current SCC and every SCC in the entire graph reachable
491     /// from this SCC. Thus this very well may walk every edge in the entire
492     /// call graph! Do not call this in a tight loop!
493     bool isAncestorOf(const SCC &C) const;
494 
495     /// Test if this SCC is a child of \a C.
496     ///
497     /// See the comments for \c isParentOf for detailed notes about the
498     /// complexity of this routine.
499     bool isChildOf(const SCC &C) const { return C.isParentOf(*this); }
500 
501     /// Test if this SCC is a descendant of \a C.
502     ///
503     /// See the comments for \c isParentOf for detailed notes about the
504     /// complexity of this routine.
505     bool isDescendantOf(const SCC &C) const { return C.isAncestorOf(*this); }
506 
507     /// Provide a short name by printing this SCC to a std::string.
508     ///
509     /// This copes with the fact that we don't have a name per se for an SCC
510     /// while still making the use of this in debugging and logging useful.
511     std::string getName() const {
512       std::string Name;
513       raw_string_ostream OS(Name);
514       OS << *this;
515       OS.flush();
516       return Name;
517     }
518   };
519 
520   /// A RefSCC of the call graph.
521   ///
522   /// This models a Strongly Connected Component of function reference edges in
523   /// the call graph. As opposed to actual SCCs, these can be used to scope
524   /// subgraphs of the module which are independent from other subgraphs of the
525   /// module because they do not reference it in any way. This is also the unit
526   /// where we do mutation of the graph in order to restrict mutations to those
527   /// which don't violate this independence.
528   ///
529   /// A RefSCC contains a DAG of actual SCCs. All the nodes within the RefSCC
530   /// are necessarily within some actual SCC that nests within it. Since
531   /// a direct call *is* a reference, there will always be at least one RefSCC
532   /// around any SCC.
533   ///
534   /// Spurious ref edges, meaning ref edges that still exist in the call graph
535   /// even though the corresponding IR reference no longer exists, are allowed.
536   /// This is mostly to support argument promotion, which can modify a caller to
537   /// no longer pass a function. The only place that needs to specially handle
538   /// this is deleting a dead function/node, otherwise the dead ref edges are
539   /// automatically removed when visiting the function/node no longer containing
540   /// the ref edge.
541   class RefSCC {
542     friend class LazyCallGraph;
543     friend class LazyCallGraph::Node;
544 
545     LazyCallGraph *G;
546 
547     /// A postorder list of the inner SCCs.
548     SmallVector<SCC *, 4> SCCs;
549 
550     /// A map from SCC to index in the postorder list.
551     SmallDenseMap<SCC *, int, 4> SCCIndices;
552 
553     /// Fast-path constructor. RefSCCs should instead be constructed by calling
554     /// formRefSCCFast on the graph itself.
555     RefSCC(LazyCallGraph &G);
556 
557     void clear() {
558       SCCs.clear();
559       SCCIndices.clear();
560     }
561 
562     /// Print a short description useful for debugging or logging.
563     ///
564     /// We print the SCCs wrapped in '[]'s and skipping the middle SCCs if
565     /// there are a large number.
566     //
567     // Note: this is defined inline to dodge issues with GCC's interpretation
568     // of enclosing namespaces for friend function declarations.
569     friend raw_ostream &operator<<(raw_ostream &OS, const RefSCC &RC) {
570       OS << '[';
571       int I = 0;
572       for (LazyCallGraph::SCC &C : RC) {
573         if (I > 0)
574           OS << ", ";
575         // Elide the inner elements if there are too many.
576         if (I > 4) {
577           OS << "..., " << *RC.SCCs.back();
578           break;
579         }
580         OS << C;
581         ++I;
582       }
583       OS << ']';
584       return OS;
585     }
586 
587     /// Dump a short description of this RefSCC to stderr.
588     void dump() const;
589 
590 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
591     /// Verify invariants about the RefSCC and all its SCCs.
592     ///
593     /// This will attempt to validate all of the invariants *within* the
594     /// RefSCC, but not that it is a strongly connected component of the larger
595     /// graph. This makes it useful even when partially through an update.
596     ///
597     /// Invariants checked:
598     /// - SCCs and their indices match.
599     /// - The SCCs list is in fact in post-order.
600     void verify();
601 #endif
602 
603   public:
604     using iterator = pointee_iterator<SmallVectorImpl<SCC *>::const_iterator>;
605     using range = iterator_range<iterator>;
606     using parent_iterator =
607         pointee_iterator<SmallPtrSetImpl<RefSCC *>::const_iterator>;
608 
609     iterator begin() const { return SCCs.begin(); }
610     iterator end() const { return SCCs.end(); }
611 
612     ssize_t size() const { return SCCs.size(); }
613 
614     SCC &operator[](int Idx) { return *SCCs[Idx]; }
615 
616     iterator find(SCC &C) const {
617       return SCCs.begin() + SCCIndices.find(&C)->second;
618     }
619 
620     /// Test if this RefSCC is a parent of \a RC.
621     ///
622     /// CAUTION: This method walks every edge in the \c RefSCC, it can be very
623     /// expensive.
624     bool isParentOf(const RefSCC &RC) const;
625 
626     /// Test if this RefSCC is an ancestor of \a RC.
627     ///
628     /// CAUTION: This method walks the directed graph of edges as far as
629     /// necessary to find a possible path to the argument. In the worst case
630     /// this may walk the entire graph and can be extremely expensive.
631     bool isAncestorOf(const RefSCC &RC) const;
632 
633     /// Test if this RefSCC is a child of \a RC.
634     ///
635     /// CAUTION: This method walks every edge in the argument \c RefSCC, it can
636     /// be very expensive.
637     bool isChildOf(const RefSCC &RC) const { return RC.isParentOf(*this); }
638 
639     /// Test if this RefSCC is a descendant of \a RC.
640     ///
641     /// CAUTION: This method walks the directed graph of edges as far as
642     /// necessary to find a possible path from the argument. In the worst case
643     /// this may walk the entire graph and can be extremely expensive.
644     bool isDescendantOf(const RefSCC &RC) const {
645       return RC.isAncestorOf(*this);
646     }
647 
648     /// Provide a short name by printing this RefSCC to a std::string.
649     ///
650     /// This copes with the fact that we don't have a name per se for an RefSCC
651     /// while still making the use of this in debugging and logging useful.
652     std::string getName() const {
653       std::string Name;
654       raw_string_ostream OS(Name);
655       OS << *this;
656       OS.flush();
657       return Name;
658     }
659 
660     ///@{
661     /// \name Mutation API
662     ///
663     /// These methods provide the core API for updating the call graph in the
664     /// presence of (potentially still in-flight) DFS-found RefSCCs and SCCs.
665     ///
666     /// Note that these methods sometimes have complex runtimes, so be careful
667     /// how you call them.
668 
669     /// Make an existing internal ref edge into a call edge.
670     ///
671     /// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
672     /// If that happens, the optional callback \p MergedCB will be invoked (if
673     /// provided) on the SCCs being merged away prior to actually performing
674     /// the merge. Note that this will never include the target SCC as that
675     /// will be the SCC functions are merged into to resolve the cycle. Once
676     /// this function returns, these merged SCCs are not in a valid state but
677     /// the pointers will remain valid until destruction of the parent graph
678     /// instance for the purpose of clearing cached information. This function
679     /// also returns 'true' if a cycle was formed and some SCCs merged away as
680     /// a convenience.
681     ///
682     /// After this operation, both SourceN's SCC and TargetN's SCC may move
683     /// position within this RefSCC's postorder list. Any SCCs merged are
684     /// merged into the TargetN's SCC in order to preserve reachability analyses
685     /// which took place on that SCC.
686     bool switchInternalEdgeToCall(
687         Node &SourceN, Node &TargetN,
688         function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
689 
690     /// Make an existing internal call edge between separate SCCs into a ref
691     /// edge.
692     ///
693     /// If SourceN and TargetN in separate SCCs within this RefSCC, changing
694     /// the call edge between them to a ref edge is a trivial operation that
695     /// does not require any structural changes to the call graph.
696     void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);
697 
698     /// Make an existing internal call edge within a single SCC into a ref
699     /// edge.
700     ///
701     /// Since SourceN and TargetN are part of a single SCC, this SCC may be
702     /// split up due to breaking a cycle in the call edges that formed it. If
703     /// that happens, then this routine will insert new SCCs into the postorder
704     /// list *before* the SCC of TargetN (previously the SCC of both). This
705     /// preserves postorder as the TargetN can reach all of the other nodes by
706     /// definition of previously being in a single SCC formed by the cycle from
707     /// SourceN to TargetN.
708     ///
709     /// The newly added SCCs are added *immediately* and contiguously
710     /// prior to the TargetN SCC and return the range covering the new SCCs in
711     /// the RefSCC's postorder sequence. You can directly iterate the returned
712     /// range to observe all of the new SCCs in postorder.
713     ///
714     /// Note that if SourceN and TargetN are in separate SCCs, the simpler
715     /// routine `switchTrivialInternalEdgeToRef` should be used instead.
716     iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
717                                                      Node &TargetN);
718 
719     /// Make an existing outgoing ref edge into a call edge.
720     ///
721     /// Note that this is trivial as there are no cyclic impacts and there
722     /// remains a reference edge.
723     void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN);
724 
725     /// Make an existing outgoing call edge into a ref edge.
726     ///
727     /// This is trivial as there are no cyclic impacts and there remains
728     /// a reference edge.
729     void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN);
730 
731     /// Insert a ref edge from one node in this RefSCC to another in this
732     /// RefSCC.
733     ///
734     /// This is always a trivial operation as it doesn't change any part of the
735     /// graph structure besides connecting the two nodes.
736     ///
737     /// Note that we don't support directly inserting internal *call* edges
738     /// because that could change the graph structure and requires returning
739     /// information about what became invalid. As a consequence, the pattern
740     /// should be to first insert the necessary ref edge, and then to switch it
741     /// to a call edge if needed and handle any invalidation that results. See
742     /// the \c switchInternalEdgeToCall routine for details.
743     void insertInternalRefEdge(Node &SourceN, Node &TargetN);
744 
745     /// Insert an edge whose parent is in this RefSCC and child is in some
746     /// child RefSCC.
747     ///
748     /// There must be an existing path from the \p SourceN to the \p TargetN.
749     /// This operation is inexpensive and does not change the set of SCCs and
750     /// RefSCCs in the graph.
751     void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
752 
753     /// Insert an edge whose source is in a descendant RefSCC and target is in
754     /// this RefSCC.
755     ///
756     /// There must be an existing path from the target to the source in this
757     /// case.
758     ///
759     /// NB! This is has the potential to be a very expensive function. It
760     /// inherently forms a cycle in the prior RefSCC DAG and we have to merge
761     /// RefSCCs to resolve that cycle. But finding all of the RefSCCs which
762     /// participate in the cycle can in the worst case require traversing every
763     /// RefSCC in the graph. Every attempt is made to avoid that, but passes
764     /// must still exercise caution calling this routine repeatedly.
765     ///
766     /// Also note that this can only insert ref edges. In order to insert
767     /// a call edge, first insert a ref edge and then switch it to a call edge.
768     /// These are intentionally kept as separate interfaces because each step
769     /// of the operation invalidates a different set of data structures.
770     ///
771     /// This returns all the RefSCCs which were merged into the this RefSCC
772     /// (the target's). This allows callers to invalidate any cached
773     /// information.
774     ///
775     /// FIXME: We could possibly optimize this quite a bit for cases where the
776     /// caller and callee are very nearby in the graph. See comments in the
777     /// implementation for details, but that use case might impact users.
778     SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
779                                                    Node &TargetN);
780 
781     /// Remove an edge whose source is in this RefSCC and target is *not*.
782     ///
783     /// This removes an inter-RefSCC edge. All inter-RefSCC edges originating
784     /// from this SCC have been fully explored by any in-flight DFS graph
785     /// formation, so this is always safe to call once you have the source
786     /// RefSCC.
787     ///
788     /// This operation does not change the cyclic structure of the graph and so
789     /// is very inexpensive. It may change the connectivity graph of the SCCs
790     /// though, so be careful calling this while iterating over them.
791     void removeOutgoingEdge(Node &SourceN, Node &TargetN);
792 
793     /// Remove a list of ref edges which are entirely within this RefSCC.
794     ///
795     /// Both the \a SourceN and all of the \a TargetNs must be within this
796     /// RefSCC. Removing these edges may break cycles that form this RefSCC and
797     /// thus this operation may change the RefSCC graph significantly. In
798     /// particular, this operation will re-form new RefSCCs based on the
799     /// remaining connectivity of the graph. The following invariants are
800     /// guaranteed to hold after calling this method:
801     ///
802     /// 1) If a ref-cycle remains after removal, it leaves this RefSCC intact
803     ///    and in the graph. No new RefSCCs are built.
804     /// 2) Otherwise, this RefSCC will be dead after this call and no longer in
805     ///    the graph or the postorder traversal of the call graph. Any iterator
806     ///    pointing at this RefSCC will become invalid.
807     /// 3) All newly formed RefSCCs will be returned and the order of the
808     ///    RefSCCs returned will be a valid postorder traversal of the new
809     ///    RefSCCs.
810     /// 4) No RefSCC other than this RefSCC has its member set changed (this is
811     ///    inherent in the definition of removing such an edge).
812     ///
813     /// These invariants are very important to ensure that we can build
814     /// optimization pipelines on top of the CGSCC pass manager which
815     /// intelligently update the RefSCC graph without invalidating other parts
816     /// of the RefSCC graph.
817     ///
818     /// Note that we provide no routine to remove a *call* edge. Instead, you
819     /// must first switch it to a ref edge using \c switchInternalEdgeToRef.
820     /// This split API is intentional as each of these two steps can invalidate
821     /// a different aspect of the graph structure and needs to have the
822     /// invalidation handled independently.
823     ///
824     /// The runtime complexity of this method is, in the worst case, O(V+E)
825     /// where V is the number of nodes in this RefSCC and E is the number of
826     /// edges leaving the nodes in this RefSCC. Note that E includes both edges
827     /// within this RefSCC and edges from this RefSCC to child RefSCCs. Some
828     /// effort has been made to minimize the overhead of common cases such as
829     /// self-edges and edge removals which result in a spanning tree with no
830     /// more cycles.
831     [[nodiscard]] SmallVector<RefSCC *, 1>
832     removeInternalRefEdges(ArrayRef<std::pair<Node *, Node *>> Edges);
833 
834     /// A convenience wrapper around the above to handle trivial cases of
835     /// inserting a new call edge.
836     ///
837     /// This is trivial whenever the target is in the same SCC as the source or
838     /// the edge is an outgoing edge to some descendant SCC. In these cases
839     /// there is no change to the cyclic structure of SCCs or RefSCCs.
840     ///
841     /// To further make calling this convenient, it also handles inserting
842     /// already existing edges.
843     void insertTrivialCallEdge(Node &SourceN, Node &TargetN);
844 
845     /// A convenience wrapper around the above to handle trivial cases of
846     /// inserting a new ref edge.
847     ///
848     /// This is trivial whenever the target is in the same RefSCC as the source
849     /// or the edge is an outgoing edge to some descendant RefSCC. In these
850     /// cases there is no change to the cyclic structure of the RefSCCs.
851     ///
852     /// To further make calling this convenient, it also handles inserting
853     /// already existing edges.
854     void insertTrivialRefEdge(Node &SourceN, Node &TargetN);
855 
856     /// Directly replace a node's function with a new function.
857     ///
858     /// This should be used when moving the body and users of a function to
859     /// a new formal function object but not otherwise changing the call graph
860     /// structure in any way.
861     ///
862     /// It requires that the old function in the provided node have zero uses
863     /// and the new function must have calls and references to it establishing
864     /// an equivalent graph.
865     void replaceNodeFunction(Node &N, Function &NewF);
866 
867     ///@}
868   };
869 
870   /// A post-order depth-first RefSCC iterator over the call graph.
871   ///
872   /// This iterator walks the cached post-order sequence of RefSCCs. However,
873   /// it trades stability for flexibility. It is restricted to a forward
874   /// iterator but will survive mutations which insert new RefSCCs and continue
875   /// to point to the same RefSCC even if it moves in the post-order sequence.
876   class postorder_ref_scc_iterator
877       : public iterator_facade_base<postorder_ref_scc_iterator,
878                                     std::forward_iterator_tag, RefSCC> {
879     friend class LazyCallGraph;
880     friend class LazyCallGraph::Node;
881 
882     /// Nonce type to select the constructor for the end iterator.
883     struct IsAtEndT {};
884 
885     LazyCallGraph *G;
886     RefSCC *RC = nullptr;
887 
888     /// Build the begin iterator for a node.
889     postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {
890       incrementUntilNonEmptyRefSCC();
891     }
892 
893     /// Build the end iterator for a node. This is selected purely by overload.
894     postorder_ref_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/) : G(&G) {}
895 
896     /// Get the post-order RefSCC at the given index of the postorder walk,
897     /// populating it if necessary.
898     static RefSCC *getRC(LazyCallGraph &G, int Index) {
899       if (Index == (int)G.PostOrderRefSCCs.size())
900         // We're at the end.
901         return nullptr;
902 
903       return G.PostOrderRefSCCs[Index];
904     }
905 
906     // Keep incrementing until RC is non-empty (or null).
907     void incrementUntilNonEmptyRefSCC() {
908       while (RC && RC->size() == 0)
909         increment();
910     }
911 
912     void increment() {
913       assert(RC && "Cannot increment the end iterator!");
914       RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
915     }
916 
917   public:
918     bool operator==(const postorder_ref_scc_iterator &Arg) const {
919       return G == Arg.G && RC == Arg.RC;
920     }
921 
922     reference operator*() const { return *RC; }
923 
924     using iterator_facade_base::operator++;
925     postorder_ref_scc_iterator &operator++() {
926       increment();
927       incrementUntilNonEmptyRefSCC();
928       return *this;
929     }
930   };
931 
932   /// Construct a graph for the given module.
933   ///
934   /// This sets up the graph and computes all of the entry points of the graph.
935   /// No function definitions are scanned until their nodes in the graph are
936   /// requested during traversal.
937   LazyCallGraph(Module &M,
938                 function_ref<TargetLibraryInfo &(Function &)> GetTLI);
939 
940   LazyCallGraph(LazyCallGraph &&G);
941   LazyCallGraph &operator=(LazyCallGraph &&RHS);
942 
943 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
944   /// Verify that every RefSCC is valid.
945   void verify();
946 #endif
947 
948   bool invalidate(Module &, const PreservedAnalyses &PA,
949                   ModuleAnalysisManager::Invalidator &);
950 
951   EdgeSequence::iterator begin() { return EntryEdges.begin(); }
952   EdgeSequence::iterator end() { return EntryEdges.end(); }
953 
954   void buildRefSCCs();
955 
956   postorder_ref_scc_iterator postorder_ref_scc_begin() {
957     if (!EntryEdges.empty())
958       assert(!PostOrderRefSCCs.empty() &&
959              "Must form RefSCCs before iterating them!");
960     return postorder_ref_scc_iterator(*this);
961   }
962   postorder_ref_scc_iterator postorder_ref_scc_end() {
963     if (!EntryEdges.empty())
964       assert(!PostOrderRefSCCs.empty() &&
965              "Must form RefSCCs before iterating them!");
966     return postorder_ref_scc_iterator(*this,
967                                       postorder_ref_scc_iterator::IsAtEndT());
968   }
969 
970   iterator_range<postorder_ref_scc_iterator> postorder_ref_sccs() {
971     return make_range(postorder_ref_scc_begin(), postorder_ref_scc_end());
972   }
973 
974   /// Lookup a function in the graph which has already been scanned and added.
975   Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
976 
977   /// Lookup a function's SCC in the graph.
978   ///
979   /// \returns null if the function hasn't been assigned an SCC via the RefSCC
980   /// iterator walk.
981   SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
982 
983   /// Lookup a function's RefSCC in the graph.
984   ///
985   /// \returns null if the function hasn't been assigned a RefSCC via the
986   /// RefSCC iterator walk.
987   RefSCC *lookupRefSCC(Node &N) const {
988     if (SCC *C = lookupSCC(N))
989       return &C->getOuterRefSCC();
990 
991     return nullptr;
992   }
993 
994   /// Get a graph node for a given function, scanning it to populate the graph
995   /// data as necessary.
996   Node &get(Function &F) {
997     Node *&N = NodeMap[&F];
998     if (N)
999       return *N;
1000 
1001     return insertInto(F, N);
1002   }
1003 
1004   /// Get the sequence of known and defined library functions.
1005   ///
1006   /// These functions, because they are known to LLVM, can have calls
1007   /// introduced out of thin air from arbitrary IR.
1008   ArrayRef<Function *> getLibFunctions() const {
1009     return LibFunctions.getArrayRef();
1010   }
1011 
1012   /// Test whether a function is a known and defined library function tracked by
1013   /// the call graph.
1014   ///
1015   /// Because these functions are known to LLVM they are specially modeled in
1016   /// the call graph and even when all IR-level references have been removed
1017   /// remain active and reachable.
1018   bool isLibFunction(Function &F) const { return LibFunctions.count(&F); }
1019 
1020   ///@{
1021   /// \name Pre-SCC Mutation API
1022   ///
1023   /// These methods are only valid to call prior to forming any SCCs for this
1024   /// call graph. They can be used to update the core node-graph during
1025   /// a node-based inorder traversal that precedes any SCC-based traversal.
1026   ///
1027   /// Once you begin manipulating a call graph's SCCs, most mutation of the
1028   /// graph must be performed via a RefSCC method. There are some exceptions
1029   /// below.
1030 
1031   /// Update the call graph after inserting a new edge.
1032   void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
1033 
1034   /// Update the call graph after inserting a new edge.
1035   void insertEdge(Function &Source, Function &Target, Edge::Kind EK) {
1036     return insertEdge(get(Source), get(Target), EK);
1037   }
1038 
1039   /// Update the call graph after deleting an edge.
1040   void removeEdge(Node &SourceN, Node &TargetN);
1041 
1042   /// Update the call graph after deleting an edge.
1043   void removeEdge(Function &Source, Function &Target) {
1044     return removeEdge(get(Source), get(Target));
1045   }
1046 
1047   ///@}
1048 
1049   ///@{
1050   /// \name General Mutation API
1051   ///
1052   /// There are a very limited set of mutations allowed on the graph as a whole
1053   /// once SCCs have started to be formed. These routines have strict contracts
1054   /// but may be called at any point.
1055 
1056   /// Remove dead functions from the call graph.
1057   ///
1058   /// These functions should have already been passed to markDeadFunction().
1059   /// This is done as a batch to prevent compile time blowup as a result of
1060   /// handling a single function at a time.
1061   void removeDeadFunctions(ArrayRef<Function *> DeadFs);
1062 
1063   /// Mark a function as dead to be removed later by removeDeadFunctions().
1064   ///
1065   /// The function body should have no incoming or outgoing call or ref edges.
1066   /// For example, a function with a single "unreachable" instruction.
1067   void markDeadFunction(Function &F);
1068 
1069   /// Add a new function split/outlined from an existing function.
1070   ///
1071   /// The new function may only reference other functions that the original
1072   /// function did.
1073   ///
1074   /// The original function must reference (either directly or indirectly) the
1075   /// new function.
1076   ///
1077   /// The new function may also reference the original function.
1078   /// It may end up in a parent SCC in the case that the original function's
1079   /// edge to the new function is a ref edge, and the edge back is a call edge.
1080   void addSplitFunction(Function &OriginalFunction, Function &NewFunction);
1081 
1082   /// Add new ref-recursive functions split/outlined from an existing function.
1083   ///
1084   /// The new functions may only reference other functions that the original
1085   /// function did. The new functions may reference (not call) the original
1086   /// function.
1087   ///
1088   /// The original function must reference (not call) all new functions.
1089   /// All new functions must reference (not call) each other.
1090   void addSplitRefRecursiveFunctions(Function &OriginalFunction,
1091                                      ArrayRef<Function *> NewFunctions);
1092 
1093   ///@}
1094 
1095   ///@{
1096   /// \name Static helpers for code doing updates to the call graph.
1097   ///
1098   /// These helpers are used to implement parts of the call graph but are also
1099   /// useful to code doing updates or otherwise wanting to walk the IR in the
1100   /// same patterns as when we build the call graph.
1101 
1102   /// Recursively visits the defined functions whose address is reachable from
1103   /// every constant in the \p Worklist.
1104   ///
1105   /// Doesn't recurse through any constants already in the \p Visited set, and
1106   /// updates that set with every constant visited.
1107   ///
1108   /// For each defined function, calls \p Callback with that function.
1109   static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
1110                               SmallPtrSetImpl<Constant *> &Visited,
1111                               function_ref<void(Function &)> Callback);
1112 
1113   ///@}
1114 
1115 private:
1116   using node_stack_iterator = SmallVectorImpl<Node *>::reverse_iterator;
1117   using node_stack_range = iterator_range<node_stack_iterator>;
1118 
1119   /// Allocator that holds all the call graph nodes.
1120   SpecificBumpPtrAllocator<Node> BPA;
1121 
1122   /// Maps function->node for fast lookup.
1123   DenseMap<const Function *, Node *> NodeMap;
1124 
1125   /// The entry edges into the graph.
1126   ///
1127   /// These edges are from "external" sources. Put another way, they
1128   /// escape at the module scope.
1129   EdgeSequence EntryEdges;
1130 
1131   /// Allocator that holds all the call graph SCCs.
1132   SpecificBumpPtrAllocator<SCC> SCCBPA;
1133 
1134   /// Maps Function -> SCC for fast lookup.
1135   DenseMap<Node *, SCC *> SCCMap;
1136 
1137   /// Allocator that holds all the call graph RefSCCs.
1138   SpecificBumpPtrAllocator<RefSCC> RefSCCBPA;
1139 
1140   /// The post-order sequence of RefSCCs.
1141   ///
1142   /// This list is lazily formed the first time we walk the graph.
1143   SmallVector<RefSCC *, 16> PostOrderRefSCCs;
1144 
1145   /// A map from RefSCC to the index for it in the postorder sequence of
1146   /// RefSCCs.
1147   DenseMap<RefSCC *, int> RefSCCIndices;
1148 
1149   /// Defined functions that are also known library functions which the
1150   /// optimizer can reason about and therefore might introduce calls to out of
1151   /// thin air.
1152   SmallSetVector<Function *, 4> LibFunctions;
1153 
1154   /// Helper to insert a new function, with an already looked-up entry in
1155   /// the NodeMap.
1156   Node &insertInto(Function &F, Node *&MappedN);
1157 
1158   /// Helper to initialize a new node created outside of creating SCCs and add
1159   /// it to the NodeMap if necessary. For example, useful when a function is
1160   /// split.
1161   Node &initNode(Function &F);
1162 
1163   /// Helper to update pointers back to the graph object during moves.
1164   void updateGraphPtrs();
1165 
1166   /// Allocates an SCC and constructs it using the graph allocator.
1167   ///
1168   /// The arguments are forwarded to the constructor.
1169   template <typename... Ts> SCC *createSCC(Ts &&...Args) {
1170     return new (SCCBPA.Allocate()) SCC(std::forward<Ts>(Args)...);
1171   }
1172 
1173   /// Allocates a RefSCC and constructs it using the graph allocator.
1174   ///
1175   /// The arguments are forwarded to the constructor.
1176   template <typename... Ts> RefSCC *createRefSCC(Ts &&...Args) {
1177     return new (RefSCCBPA.Allocate()) RefSCC(std::forward<Ts>(Args)...);
1178   }
1179 
1180   /// Common logic for building SCCs from a sequence of roots.
1181   ///
1182   /// This is a very generic implementation of the depth-first walk and SCC
1183   /// formation algorithm. It uses a generic sequence of roots and generic
1184   /// callbacks for each step. This is designed to be used to implement both
1185   /// the RefSCC formation and SCC formation with shared logic.
1186   ///
1187   /// Currently this is a relatively naive implementation of Tarjan's DFS
1188   /// algorithm to form the SCCs.
1189   ///
1190   /// FIXME: We should consider newer variants such as Nuutila.
1191   template <typename RootsT, typename GetBeginT, typename GetEndT,
1192             typename GetNodeT, typename FormSCCCallbackT>
1193   static void buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
1194                                GetEndT &&GetEnd, GetNodeT &&GetNode,
1195                                FormSCCCallbackT &&FormSCC);
1196 
1197   /// Build the SCCs for a RefSCC out of a list of nodes.
1198   void buildSCCs(RefSCC &RC, node_stack_range Nodes);
1199 
1200   /// Get the index of a RefSCC within the postorder traversal.
1201   ///
1202   /// Requires that this RefSCC is a valid one in the (perhaps partial)
1203   /// postorder traversed part of the graph.
1204   int getRefSCCIndex(RefSCC &RC) {
1205     auto IndexIt = RefSCCIndices.find(&RC);
1206     assert(IndexIt != RefSCCIndices.end() && "RefSCC doesn't have an index!");
1207     assert(PostOrderRefSCCs[IndexIt->second] == &RC &&
1208            "Index does not point back at RC!");
1209     return IndexIt->second;
1210   }
1211 };
1212 
1213 inline LazyCallGraph::Edge::Edge() = default;
1214 inline LazyCallGraph::Edge::Edge(Node &N, Kind K) : Value(&N, K) {}
1215 
1216 inline LazyCallGraph::Edge::operator bool() const {
1217   return Value.getPointer() && !Value.getPointer()->isDead();
1218 }
1219 
1220 inline LazyCallGraph::Edge::Kind LazyCallGraph::Edge::getKind() const {
1221   assert(*this && "Queried a null edge!");
1222   return Value.getInt();
1223 }
1224 
1225 inline bool LazyCallGraph::Edge::isCall() const {
1226   assert(*this && "Queried a null edge!");
1227   return getKind() == Call;
1228 }
1229 
1230 inline LazyCallGraph::Node &LazyCallGraph::Edge::getNode() const {
1231   assert(*this && "Queried a null edge!");
1232   return *Value.getPointer();
1233 }
1234 
1235 inline Function &LazyCallGraph::Edge::getFunction() const {
1236   assert(*this && "Queried a null edge!");
1237   return getNode().getFunction();
1238 }
1239 
1240 // Provide GraphTraits specializations for call graphs.
1241 template <> struct GraphTraits<LazyCallGraph::Node *> {
1242   using NodeRef = LazyCallGraph::Node *;
1243   using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1244 
1245   static NodeRef getEntryNode(NodeRef N) { return N; }
1246   static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1247   static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1248 };
1249 template <> struct GraphTraits<LazyCallGraph *> {
1250   using NodeRef = LazyCallGraph::Node *;
1251   using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1252 
1253   static NodeRef getEntryNode(NodeRef N) { return N; }
1254   static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1255   static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1256 };
1257 
1258 /// An analysis pass which computes the call graph for a module.
1259 class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
1260   friend AnalysisInfoMixin<LazyCallGraphAnalysis>;
1261 
1262   static AnalysisKey Key;
1263 
1264 public:
1265   /// Inform generic clients of the result type.
1266   using Result = LazyCallGraph;
1267 
1268   /// Compute the \c LazyCallGraph for the module \c M.
1269   ///
1270   /// This just builds the set of entry points to the call graph. The rest is
1271   /// built lazily as it is walked.
1272   LazyCallGraph run(Module &M, ModuleAnalysisManager &AM) {
1273     FunctionAnalysisManager &FAM =
1274         AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1275     auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
1276       return FAM.getResult<TargetLibraryAnalysis>(F);
1277     };
1278     return LazyCallGraph(M, GetTLI);
1279   }
1280 };
1281 
1282 /// A pass which prints the call graph to a \c raw_ostream.
1283 ///
1284 /// This is primarily useful for testing the analysis.
1285 class LazyCallGraphPrinterPass
1286     : public PassInfoMixin<LazyCallGraphPrinterPass> {
1287   raw_ostream &OS;
1288 
1289 public:
1290   explicit LazyCallGraphPrinterPass(raw_ostream &OS);
1291 
1292   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1293 
1294   static bool isRequired() { return true; }
1295 };
1296 
1297 /// A pass which prints the call graph as a DOT file to a \c raw_ostream.
1298 ///
1299 /// This is primarily useful for visualization purposes.
1300 class LazyCallGraphDOTPrinterPass
1301     : public PassInfoMixin<LazyCallGraphDOTPrinterPass> {
1302   raw_ostream &OS;
1303 
1304 public:
1305   explicit LazyCallGraphDOTPrinterPass(raw_ostream &OS);
1306 
1307   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1308 
1309   static bool isRequired() { return true; }
1310 };
1311 
1312 extern template struct LLVM_TEMPLATE_ABI
1313     Any::TypeId<const LazyCallGraph::SCC *>;
1314 } // end namespace llvm
1315 
1316 #endif // LLVM_ANALYSIS_LAZYCALLGRAPH_H
1317