1 //===- LazyCallGraph.cpp - Analysis of a Module's call graph --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/LazyCallGraph.h" 10 #include "llvm/ADT/ArrayRef.h" 11 #include "llvm/ADT/STLExtras.h" 12 #include "llvm/ADT/ScopeExit.h" 13 #include "llvm/ADT/Sequence.h" 14 #include "llvm/ADT/SmallPtrSet.h" 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/ADT/iterator_range.h" 17 #include "llvm/Analysis/TargetLibraryInfo.h" 18 #include "llvm/Config/llvm-config.h" 19 #include "llvm/IR/CallSite.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/GlobalVariable.h" 22 #include "llvm/IR/Instruction.h" 23 #include "llvm/IR/Module.h" 24 #include "llvm/IR/PassManager.h" 25 #include "llvm/Support/Casting.h" 26 #include "llvm/Support/Compiler.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/GraphWriter.h" 29 #include "llvm/Support/raw_ostream.h" 30 #include <algorithm> 31 #include <cassert> 32 #include <cstddef> 33 #include <iterator> 34 #include <string> 35 #include <tuple> 36 #include <utility> 37 38 using namespace llvm; 39 40 #define DEBUG_TYPE "lcg" 41 42 void LazyCallGraph::EdgeSequence::insertEdgeInternal(Node &TargetN, 43 Edge::Kind EK) { 44 EdgeIndexMap.insert({&TargetN, Edges.size()}); 45 Edges.emplace_back(TargetN, EK); 46 } 47 48 void LazyCallGraph::EdgeSequence::setEdgeKind(Node &TargetN, Edge::Kind EK) { 49 Edges[EdgeIndexMap.find(&TargetN)->second].setKind(EK); 50 } 51 52 bool LazyCallGraph::EdgeSequence::removeEdgeInternal(Node &TargetN) { 53 auto IndexMapI = EdgeIndexMap.find(&TargetN); 54 if (IndexMapI == EdgeIndexMap.end()) 55 return false; 56 57 Edges[IndexMapI->second] = Edge(); 58 EdgeIndexMap.erase(IndexMapI); 59 return true; 60 } 61 62 static void addEdge(SmallVectorImpl<LazyCallGraph::Edge> &Edges, 63 DenseMap<LazyCallGraph::Node *, int> &EdgeIndexMap, 64 LazyCallGraph::Node &N, LazyCallGraph::Edge::Kind EK) { 65 if (!EdgeIndexMap.insert({&N, Edges.size()}).second) 66 return; 67 68 LLVM_DEBUG(dbgs() << " Added callable function: " << N.getName() << "\n"); 69 Edges.emplace_back(LazyCallGraph::Edge(N, EK)); 70 } 71 72 LazyCallGraph::EdgeSequence &LazyCallGraph::Node::populateSlow() { 73 assert(!Edges && "Must not have already populated the edges for this node!"); 74 75 LLVM_DEBUG(dbgs() << " Adding functions called by '" << getName() 76 << "' to the graph.\n"); 77 78 Edges = EdgeSequence(); 79 80 SmallVector<Constant *, 16> Worklist; 81 SmallPtrSet<Function *, 4> Callees; 82 SmallPtrSet<Constant *, 16> Visited; 83 84 // Find all the potential call graph edges in this function. We track both 85 // actual call edges and indirect references to functions. The direct calls 86 // are trivially added, but to accumulate the latter we walk the instructions 87 // and add every operand which is a constant to the worklist to process 88 // afterward. 89 // 90 // Note that we consider *any* function with a definition to be a viable 91 // edge. Even if the function's definition is subject to replacement by 92 // some other module (say, a weak definition) there may still be 93 // optimizations which essentially speculate based on the definition and 94 // a way to check that the specific definition is in fact the one being 95 // used. For example, this could be done by moving the weak definition to 96 // a strong (internal) definition and making the weak definition be an 97 // alias. Then a test of the address of the weak function against the new 98 // strong definition's address would be an effective way to determine the 99 // safety of optimizing a direct call edge. 100 for (BasicBlock &BB : *F) 101 for (Instruction &I : BB) { 102 if (auto CS = CallSite(&I)) 103 if (Function *Callee = CS.getCalledFunction()) 104 if (!Callee->isDeclaration()) 105 if (Callees.insert(Callee).second) { 106 Visited.insert(Callee); 107 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*Callee), 108 LazyCallGraph::Edge::Call); 109 } 110 111 for (Value *Op : I.operand_values()) 112 if (Constant *C = dyn_cast<Constant>(Op)) 113 if (Visited.insert(C).second) 114 Worklist.push_back(C); 115 } 116 117 // We've collected all the constant (and thus potentially function or 118 // function containing) operands to all of the instructions in the function. 119 // Process them (recursively) collecting every function found. 120 visitReferences(Worklist, Visited, [&](Function &F) { 121 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(F), 122 LazyCallGraph::Edge::Ref); 123 }); 124 125 // Add implicit reference edges to any defined libcall functions (if we 126 // haven't found an explicit edge). 127 for (auto *F : G->LibFunctions) 128 if (!Visited.count(F)) 129 addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*F), 130 LazyCallGraph::Edge::Ref); 131 132 return *Edges; 133 } 134 135 void LazyCallGraph::Node::replaceFunction(Function &NewF) { 136 assert(F != &NewF && "Must not replace a function with itself!"); 137 F = &NewF; 138 } 139 140 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 141 LLVM_DUMP_METHOD void LazyCallGraph::Node::dump() const { 142 dbgs() << *this << '\n'; 143 } 144 #endif 145 146 static bool isKnownLibFunction(Function &F, TargetLibraryInfo &TLI) { 147 LibFunc LF; 148 149 // Either this is a normal library function or a "vectorizable" function. 150 return TLI.getLibFunc(F, LF) || TLI.isFunctionVectorizable(F.getName()); 151 } 152 153 LazyCallGraph::LazyCallGraph(Module &M, TargetLibraryInfo &TLI) { 154 LLVM_DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier() 155 << "\n"); 156 for (Function &F : M) { 157 if (F.isDeclaration()) 158 continue; 159 // If this function is a known lib function to LLVM then we want to 160 // synthesize reference edges to it to model the fact that LLVM can turn 161 // arbitrary code into a library function call. 162 if (isKnownLibFunction(F, TLI)) 163 LibFunctions.insert(&F); 164 165 if (F.hasLocalLinkage()) 166 continue; 167 168 // External linkage defined functions have edges to them from other 169 // modules. 170 LLVM_DEBUG(dbgs() << " Adding '" << F.getName() 171 << "' to entry set of the graph.\n"); 172 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), Edge::Ref); 173 } 174 175 // Externally visible aliases of internal functions are also viable entry 176 // edges to the module. 177 for (auto &A : M.aliases()) { 178 if (A.hasLocalLinkage()) 179 continue; 180 if (Function* F = dyn_cast<Function>(A.getAliasee())) { 181 LLVM_DEBUG(dbgs() << " Adding '" << F->getName() 182 << "' with alias '" << A.getName() 183 << "' to entry set of the graph.\n"); 184 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(*F), Edge::Ref); 185 } 186 } 187 188 // Now add entry nodes for functions reachable via initializers to globals. 189 SmallVector<Constant *, 16> Worklist; 190 SmallPtrSet<Constant *, 16> Visited; 191 for (GlobalVariable &GV : M.globals()) 192 if (GV.hasInitializer()) 193 if (Visited.insert(GV.getInitializer()).second) 194 Worklist.push_back(GV.getInitializer()); 195 196 LLVM_DEBUG( 197 dbgs() << " Adding functions referenced by global initializers to the " 198 "entry set.\n"); 199 visitReferences(Worklist, Visited, [&](Function &F) { 200 addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), 201 LazyCallGraph::Edge::Ref); 202 }); 203 } 204 205 LazyCallGraph::LazyCallGraph(LazyCallGraph &&G) 206 : BPA(std::move(G.BPA)), NodeMap(std::move(G.NodeMap)), 207 EntryEdges(std::move(G.EntryEdges)), SCCBPA(std::move(G.SCCBPA)), 208 SCCMap(std::move(G.SCCMap)), 209 LibFunctions(std::move(G.LibFunctions)) { 210 updateGraphPtrs(); 211 } 212 213 LazyCallGraph &LazyCallGraph::operator=(LazyCallGraph &&G) { 214 BPA = std::move(G.BPA); 215 NodeMap = std::move(G.NodeMap); 216 EntryEdges = std::move(G.EntryEdges); 217 SCCBPA = std::move(G.SCCBPA); 218 SCCMap = std::move(G.SCCMap); 219 LibFunctions = std::move(G.LibFunctions); 220 updateGraphPtrs(); 221 return *this; 222 } 223 224 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 225 LLVM_DUMP_METHOD void LazyCallGraph::SCC::dump() const { 226 dbgs() << *this << '\n'; 227 } 228 #endif 229 230 #ifndef NDEBUG 231 void LazyCallGraph::SCC::verify() { 232 assert(OuterRefSCC && "Can't have a null RefSCC!"); 233 assert(!Nodes.empty() && "Can't have an empty SCC!"); 234 235 for (Node *N : Nodes) { 236 assert(N && "Can't have a null node!"); 237 assert(OuterRefSCC->G->lookupSCC(*N) == this && 238 "Node does not map to this SCC!"); 239 assert(N->DFSNumber == -1 && 240 "Must set DFS numbers to -1 when adding a node to an SCC!"); 241 assert(N->LowLink == -1 && 242 "Must set low link to -1 when adding a node to an SCC!"); 243 for (Edge &E : **N) 244 assert(E.getNode().isPopulated() && "Can't have an unpopulated node!"); 245 } 246 } 247 #endif 248 249 bool LazyCallGraph::SCC::isParentOf(const SCC &C) const { 250 if (this == &C) 251 return false; 252 253 for (Node &N : *this) 254 for (Edge &E : N->calls()) 255 if (OuterRefSCC->G->lookupSCC(E.getNode()) == &C) 256 return true; 257 258 // No edges found. 259 return false; 260 } 261 262 bool LazyCallGraph::SCC::isAncestorOf(const SCC &TargetC) const { 263 if (this == &TargetC) 264 return false; 265 266 LazyCallGraph &G = *OuterRefSCC->G; 267 268 // Start with this SCC. 269 SmallPtrSet<const SCC *, 16> Visited = {this}; 270 SmallVector<const SCC *, 16> Worklist = {this}; 271 272 // Walk down the graph until we run out of edges or find a path to TargetC. 273 do { 274 const SCC &C = *Worklist.pop_back_val(); 275 for (Node &N : C) 276 for (Edge &E : N->calls()) { 277 SCC *CalleeC = G.lookupSCC(E.getNode()); 278 if (!CalleeC) 279 continue; 280 281 // If the callee's SCC is the TargetC, we're done. 282 if (CalleeC == &TargetC) 283 return true; 284 285 // If this is the first time we've reached this SCC, put it on the 286 // worklist to recurse through. 287 if (Visited.insert(CalleeC).second) 288 Worklist.push_back(CalleeC); 289 } 290 } while (!Worklist.empty()); 291 292 // No paths found. 293 return false; 294 } 295 296 LazyCallGraph::RefSCC::RefSCC(LazyCallGraph &G) : G(&G) {} 297 298 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 299 LLVM_DUMP_METHOD void LazyCallGraph::RefSCC::dump() const { 300 dbgs() << *this << '\n'; 301 } 302 #endif 303 304 #ifndef NDEBUG 305 void LazyCallGraph::RefSCC::verify() { 306 assert(G && "Can't have a null graph!"); 307 assert(!SCCs.empty() && "Can't have an empty SCC!"); 308 309 // Verify basic properties of the SCCs. 310 SmallPtrSet<SCC *, 4> SCCSet; 311 for (SCC *C : SCCs) { 312 assert(C && "Can't have a null SCC!"); 313 C->verify(); 314 assert(&C->getOuterRefSCC() == this && 315 "SCC doesn't think it is inside this RefSCC!"); 316 bool Inserted = SCCSet.insert(C).second; 317 assert(Inserted && "Found a duplicate SCC!"); 318 auto IndexIt = SCCIndices.find(C); 319 assert(IndexIt != SCCIndices.end() && 320 "Found an SCC that doesn't have an index!"); 321 } 322 323 // Check that our indices map correctly. 324 for (auto &SCCIndexPair : SCCIndices) { 325 SCC *C = SCCIndexPair.first; 326 int i = SCCIndexPair.second; 327 assert(C && "Can't have a null SCC in the indices!"); 328 assert(SCCSet.count(C) && "Found an index for an SCC not in the RefSCC!"); 329 assert(SCCs[i] == C && "Index doesn't point to SCC!"); 330 } 331 332 // Check that the SCCs are in fact in post-order. 333 for (int i = 0, Size = SCCs.size(); i < Size; ++i) { 334 SCC &SourceSCC = *SCCs[i]; 335 for (Node &N : SourceSCC) 336 for (Edge &E : *N) { 337 if (!E.isCall()) 338 continue; 339 SCC &TargetSCC = *G->lookupSCC(E.getNode()); 340 if (&TargetSCC.getOuterRefSCC() == this) { 341 assert(SCCIndices.find(&TargetSCC)->second <= i && 342 "Edge between SCCs violates post-order relationship."); 343 continue; 344 } 345 } 346 } 347 } 348 #endif 349 350 bool LazyCallGraph::RefSCC::isParentOf(const RefSCC &RC) const { 351 if (&RC == this) 352 return false; 353 354 // Search all edges to see if this is a parent. 355 for (SCC &C : *this) 356 for (Node &N : C) 357 for (Edge &E : *N) 358 if (G->lookupRefSCC(E.getNode()) == &RC) 359 return true; 360 361 return false; 362 } 363 364 bool LazyCallGraph::RefSCC::isAncestorOf(const RefSCC &RC) const { 365 if (&RC == this) 366 return false; 367 368 // For each descendant of this RefSCC, see if one of its children is the 369 // argument. If not, add that descendant to the worklist and continue 370 // searching. 371 SmallVector<const RefSCC *, 4> Worklist; 372 SmallPtrSet<const RefSCC *, 4> Visited; 373 Worklist.push_back(this); 374 Visited.insert(this); 375 do { 376 const RefSCC &DescendantRC = *Worklist.pop_back_val(); 377 for (SCC &C : DescendantRC) 378 for (Node &N : C) 379 for (Edge &E : *N) { 380 auto *ChildRC = G->lookupRefSCC(E.getNode()); 381 if (ChildRC == &RC) 382 return true; 383 if (!ChildRC || !Visited.insert(ChildRC).second) 384 continue; 385 Worklist.push_back(ChildRC); 386 } 387 } while (!Worklist.empty()); 388 389 return false; 390 } 391 392 /// Generic helper that updates a postorder sequence of SCCs for a potentially 393 /// cycle-introducing edge insertion. 394 /// 395 /// A postorder sequence of SCCs of a directed graph has one fundamental 396 /// property: all deges in the DAG of SCCs point "up" the sequence. That is, 397 /// all edges in the SCC DAG point to prior SCCs in the sequence. 398 /// 399 /// This routine both updates a postorder sequence and uses that sequence to 400 /// compute the set of SCCs connected into a cycle. It should only be called to 401 /// insert a "downward" edge which will require changing the sequence to 402 /// restore it to a postorder. 403 /// 404 /// When inserting an edge from an earlier SCC to a later SCC in some postorder 405 /// sequence, all of the SCCs which may be impacted are in the closed range of 406 /// those two within the postorder sequence. The algorithm used here to restore 407 /// the state is as follows: 408 /// 409 /// 1) Starting from the source SCC, construct a set of SCCs which reach the 410 /// source SCC consisting of just the source SCC. Then scan toward the 411 /// target SCC in postorder and for each SCC, if it has an edge to an SCC 412 /// in the set, add it to the set. Otherwise, the source SCC is not 413 /// a successor, move it in the postorder sequence to immediately before 414 /// the source SCC, shifting the source SCC and all SCCs in the set one 415 /// position toward the target SCC. Stop scanning after processing the 416 /// target SCC. 417 /// 2) If the source SCC is now past the target SCC in the postorder sequence, 418 /// and thus the new edge will flow toward the start, we are done. 419 /// 3) Otherwise, starting from the target SCC, walk all edges which reach an 420 /// SCC between the source and the target, and add them to the set of 421 /// connected SCCs, then recurse through them. Once a complete set of the 422 /// SCCs the target connects to is known, hoist the remaining SCCs between 423 /// the source and the target to be above the target. Note that there is no 424 /// need to process the source SCC, it is already known to connect. 425 /// 4) At this point, all of the SCCs in the closed range between the source 426 /// SCC and the target SCC in the postorder sequence are connected, 427 /// including the target SCC and the source SCC. Inserting the edge from 428 /// the source SCC to the target SCC will form a cycle out of precisely 429 /// these SCCs. Thus we can merge all of the SCCs in this closed range into 430 /// a single SCC. 431 /// 432 /// This process has various important properties: 433 /// - Only mutates the SCCs when adding the edge actually changes the SCC 434 /// structure. 435 /// - Never mutates SCCs which are unaffected by the change. 436 /// - Updates the postorder sequence to correctly satisfy the postorder 437 /// constraint after the edge is inserted. 438 /// - Only reorders SCCs in the closed postorder sequence from the source to 439 /// the target, so easy to bound how much has changed even in the ordering. 440 /// - Big-O is the number of edges in the closed postorder range of SCCs from 441 /// source to target. 442 /// 443 /// This helper routine, in addition to updating the postorder sequence itself 444 /// will also update a map from SCCs to indices within that sequence. 445 /// 446 /// The sequence and the map must operate on pointers to the SCC type. 447 /// 448 /// Two callbacks must be provided. The first computes the subset of SCCs in 449 /// the postorder closed range from the source to the target which connect to 450 /// the source SCC via some (transitive) set of edges. The second computes the 451 /// subset of the same range which the target SCC connects to via some 452 /// (transitive) set of edges. Both callbacks should populate the set argument 453 /// provided. 454 template <typename SCCT, typename PostorderSequenceT, typename SCCIndexMapT, 455 typename ComputeSourceConnectedSetCallableT, 456 typename ComputeTargetConnectedSetCallableT> 457 static iterator_range<typename PostorderSequenceT::iterator> 458 updatePostorderSequenceForEdgeInsertion( 459 SCCT &SourceSCC, SCCT &TargetSCC, PostorderSequenceT &SCCs, 460 SCCIndexMapT &SCCIndices, 461 ComputeSourceConnectedSetCallableT ComputeSourceConnectedSet, 462 ComputeTargetConnectedSetCallableT ComputeTargetConnectedSet) { 463 int SourceIdx = SCCIndices[&SourceSCC]; 464 int TargetIdx = SCCIndices[&TargetSCC]; 465 assert(SourceIdx < TargetIdx && "Cannot have equal indices here!"); 466 467 SmallPtrSet<SCCT *, 4> ConnectedSet; 468 469 // Compute the SCCs which (transitively) reach the source. 470 ComputeSourceConnectedSet(ConnectedSet); 471 472 // Partition the SCCs in this part of the port-order sequence so only SCCs 473 // connecting to the source remain between it and the target. This is 474 // a benign partition as it preserves postorder. 475 auto SourceI = std::stable_partition( 476 SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx + 1, 477 [&ConnectedSet](SCCT *C) { return !ConnectedSet.count(C); }); 478 for (int i = SourceIdx, e = TargetIdx + 1; i < e; ++i) 479 SCCIndices.find(SCCs[i])->second = i; 480 481 // If the target doesn't connect to the source, then we've corrected the 482 // post-order and there are no cycles formed. 483 if (!ConnectedSet.count(&TargetSCC)) { 484 assert(SourceI > (SCCs.begin() + SourceIdx) && 485 "Must have moved the source to fix the post-order."); 486 assert(*std::prev(SourceI) == &TargetSCC && 487 "Last SCC to move should have bene the target."); 488 489 // Return an empty range at the target SCC indicating there is nothing to 490 // merge. 491 return make_range(std::prev(SourceI), std::prev(SourceI)); 492 } 493 494 assert(SCCs[TargetIdx] == &TargetSCC && 495 "Should not have moved target if connected!"); 496 SourceIdx = SourceI - SCCs.begin(); 497 assert(SCCs[SourceIdx] == &SourceSCC && 498 "Bad updated index computation for the source SCC!"); 499 500 501 // See whether there are any remaining intervening SCCs between the source 502 // and target. If so we need to make sure they all are reachable form the 503 // target. 504 if (SourceIdx + 1 < TargetIdx) { 505 ConnectedSet.clear(); 506 ComputeTargetConnectedSet(ConnectedSet); 507 508 // Partition SCCs so that only SCCs reached from the target remain between 509 // the source and the target. This preserves postorder. 510 auto TargetI = std::stable_partition( 511 SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1, 512 [&ConnectedSet](SCCT *C) { return ConnectedSet.count(C); }); 513 for (int i = SourceIdx + 1, e = TargetIdx + 1; i < e; ++i) 514 SCCIndices.find(SCCs[i])->second = i; 515 TargetIdx = std::prev(TargetI) - SCCs.begin(); 516 assert(SCCs[TargetIdx] == &TargetSCC && 517 "Should always end with the target!"); 518 } 519 520 // At this point, we know that connecting source to target forms a cycle 521 // because target connects back to source, and we know that all of the SCCs 522 // between the source and target in the postorder sequence participate in that 523 // cycle. 524 return make_range(SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx); 525 } 526 527 bool 528 LazyCallGraph::RefSCC::switchInternalEdgeToCall( 529 Node &SourceN, Node &TargetN, 530 function_ref<void(ArrayRef<SCC *> MergeSCCs)> MergeCB) { 531 assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!"); 532 SmallVector<SCC *, 1> DeletedSCCs; 533 534 #ifndef NDEBUG 535 // In a debug build, verify the RefSCC is valid to start with and when this 536 // routine finishes. 537 verify(); 538 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 539 #endif 540 541 SCC &SourceSCC = *G->lookupSCC(SourceN); 542 SCC &TargetSCC = *G->lookupSCC(TargetN); 543 544 // If the two nodes are already part of the same SCC, we're also done as 545 // we've just added more connectivity. 546 if (&SourceSCC == &TargetSCC) { 547 SourceN->setEdgeKind(TargetN, Edge::Call); 548 return false; // No new cycle. 549 } 550 551 // At this point we leverage the postorder list of SCCs to detect when the 552 // insertion of an edge changes the SCC structure in any way. 553 // 554 // First and foremost, we can eliminate the need for any changes when the 555 // edge is toward the beginning of the postorder sequence because all edges 556 // flow in that direction already. Thus adding a new one cannot form a cycle. 557 int SourceIdx = SCCIndices[&SourceSCC]; 558 int TargetIdx = SCCIndices[&TargetSCC]; 559 if (TargetIdx < SourceIdx) { 560 SourceN->setEdgeKind(TargetN, Edge::Call); 561 return false; // No new cycle. 562 } 563 564 // Compute the SCCs which (transitively) reach the source. 565 auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) { 566 #ifndef NDEBUG 567 // Check that the RefSCC is still valid before computing this as the 568 // results will be nonsensical of we've broken its invariants. 569 verify(); 570 #endif 571 ConnectedSet.insert(&SourceSCC); 572 auto IsConnected = [&](SCC &C) { 573 for (Node &N : C) 574 for (Edge &E : N->calls()) 575 if (ConnectedSet.count(G->lookupSCC(E.getNode()))) 576 return true; 577 578 return false; 579 }; 580 581 for (SCC *C : 582 make_range(SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1)) 583 if (IsConnected(*C)) 584 ConnectedSet.insert(C); 585 }; 586 587 // Use a normal worklist to find which SCCs the target connects to. We still 588 // bound the search based on the range in the postorder list we care about, 589 // but because this is forward connectivity we just "recurse" through the 590 // edges. 591 auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) { 592 #ifndef NDEBUG 593 // Check that the RefSCC is still valid before computing this as the 594 // results will be nonsensical of we've broken its invariants. 595 verify(); 596 #endif 597 ConnectedSet.insert(&TargetSCC); 598 SmallVector<SCC *, 4> Worklist; 599 Worklist.push_back(&TargetSCC); 600 do { 601 SCC &C = *Worklist.pop_back_val(); 602 for (Node &N : C) 603 for (Edge &E : *N) { 604 if (!E.isCall()) 605 continue; 606 SCC &EdgeC = *G->lookupSCC(E.getNode()); 607 if (&EdgeC.getOuterRefSCC() != this) 608 // Not in this RefSCC... 609 continue; 610 if (SCCIndices.find(&EdgeC)->second <= SourceIdx) 611 // Not in the postorder sequence between source and target. 612 continue; 613 614 if (ConnectedSet.insert(&EdgeC).second) 615 Worklist.push_back(&EdgeC); 616 } 617 } while (!Worklist.empty()); 618 }; 619 620 // Use a generic helper to update the postorder sequence of SCCs and return 621 // a range of any SCCs connected into a cycle by inserting this edge. This 622 // routine will also take care of updating the indices into the postorder 623 // sequence. 624 auto MergeRange = updatePostorderSequenceForEdgeInsertion( 625 SourceSCC, TargetSCC, SCCs, SCCIndices, ComputeSourceConnectedSet, 626 ComputeTargetConnectedSet); 627 628 // Run the user's callback on the merged SCCs before we actually merge them. 629 if (MergeCB) 630 MergeCB(makeArrayRef(MergeRange.begin(), MergeRange.end())); 631 632 // If the merge range is empty, then adding the edge didn't actually form any 633 // new cycles. We're done. 634 if (empty(MergeRange)) { 635 // Now that the SCC structure is finalized, flip the kind to call. 636 SourceN->setEdgeKind(TargetN, Edge::Call); 637 return false; // No new cycle. 638 } 639 640 #ifndef NDEBUG 641 // Before merging, check that the RefSCC remains valid after all the 642 // postorder updates. 643 verify(); 644 #endif 645 646 // Otherwise we need to merge all of the SCCs in the cycle into a single 647 // result SCC. 648 // 649 // NB: We merge into the target because all of these functions were already 650 // reachable from the target, meaning any SCC-wide properties deduced about it 651 // other than the set of functions within it will not have changed. 652 for (SCC *C : MergeRange) { 653 assert(C != &TargetSCC && 654 "We merge *into* the target and shouldn't process it here!"); 655 SCCIndices.erase(C); 656 TargetSCC.Nodes.append(C->Nodes.begin(), C->Nodes.end()); 657 for (Node *N : C->Nodes) 658 G->SCCMap[N] = &TargetSCC; 659 C->clear(); 660 DeletedSCCs.push_back(C); 661 } 662 663 // Erase the merged SCCs from the list and update the indices of the 664 // remaining SCCs. 665 int IndexOffset = MergeRange.end() - MergeRange.begin(); 666 auto EraseEnd = SCCs.erase(MergeRange.begin(), MergeRange.end()); 667 for (SCC *C : make_range(EraseEnd, SCCs.end())) 668 SCCIndices[C] -= IndexOffset; 669 670 // Now that the SCC structure is finalized, flip the kind to call. 671 SourceN->setEdgeKind(TargetN, Edge::Call); 672 673 // And we're done, but we did form a new cycle. 674 return true; 675 } 676 677 void LazyCallGraph::RefSCC::switchTrivialInternalEdgeToRef(Node &SourceN, 678 Node &TargetN) { 679 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); 680 681 #ifndef NDEBUG 682 // In a debug build, verify the RefSCC is valid to start with and when this 683 // routine finishes. 684 verify(); 685 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 686 #endif 687 688 assert(G->lookupRefSCC(SourceN) == this && 689 "Source must be in this RefSCC."); 690 assert(G->lookupRefSCC(TargetN) == this && 691 "Target must be in this RefSCC."); 692 assert(G->lookupSCC(SourceN) != G->lookupSCC(TargetN) && 693 "Source and Target must be in separate SCCs for this to be trivial!"); 694 695 // Set the edge kind. 696 SourceN->setEdgeKind(TargetN, Edge::Ref); 697 } 698 699 iterator_range<LazyCallGraph::RefSCC::iterator> 700 LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) { 701 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); 702 703 #ifndef NDEBUG 704 // In a debug build, verify the RefSCC is valid to start with and when this 705 // routine finishes. 706 verify(); 707 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 708 #endif 709 710 assert(G->lookupRefSCC(SourceN) == this && 711 "Source must be in this RefSCC."); 712 assert(G->lookupRefSCC(TargetN) == this && 713 "Target must be in this RefSCC."); 714 715 SCC &TargetSCC = *G->lookupSCC(TargetN); 716 assert(G->lookupSCC(SourceN) == &TargetSCC && "Source and Target must be in " 717 "the same SCC to require the " 718 "full CG update."); 719 720 // Set the edge kind. 721 SourceN->setEdgeKind(TargetN, Edge::Ref); 722 723 // Otherwise we are removing a call edge from a single SCC. This may break 724 // the cycle. In order to compute the new set of SCCs, we need to do a small 725 // DFS over the nodes within the SCC to form any sub-cycles that remain as 726 // distinct SCCs and compute a postorder over the resulting SCCs. 727 // 728 // However, we specially handle the target node. The target node is known to 729 // reach all other nodes in the original SCC by definition. This means that 730 // we want the old SCC to be replaced with an SCC containing that node as it 731 // will be the root of whatever SCC DAG results from the DFS. Assumptions 732 // about an SCC such as the set of functions called will continue to hold, 733 // etc. 734 735 SCC &OldSCC = TargetSCC; 736 SmallVector<std::pair<Node *, EdgeSequence::call_iterator>, 16> DFSStack; 737 SmallVector<Node *, 16> PendingSCCStack; 738 SmallVector<SCC *, 4> NewSCCs; 739 740 // Prepare the nodes for a fresh DFS. 741 SmallVector<Node *, 16> Worklist; 742 Worklist.swap(OldSCC.Nodes); 743 for (Node *N : Worklist) { 744 N->DFSNumber = N->LowLink = 0; 745 G->SCCMap.erase(N); 746 } 747 748 // Force the target node to be in the old SCC. This also enables us to take 749 // a very significant short-cut in the standard Tarjan walk to re-form SCCs 750 // below: whenever we build an edge that reaches the target node, we know 751 // that the target node eventually connects back to all other nodes in our 752 // walk. As a consequence, we can detect and handle participants in that 753 // cycle without walking all the edges that form this connection, and instead 754 // by relying on the fundamental guarantee coming into this operation (all 755 // nodes are reachable from the target due to previously forming an SCC). 756 TargetN.DFSNumber = TargetN.LowLink = -1; 757 OldSCC.Nodes.push_back(&TargetN); 758 G->SCCMap[&TargetN] = &OldSCC; 759 760 // Scan down the stack and DFS across the call edges. 761 for (Node *RootN : Worklist) { 762 assert(DFSStack.empty() && 763 "Cannot begin a new root with a non-empty DFS stack!"); 764 assert(PendingSCCStack.empty() && 765 "Cannot begin a new root with pending nodes for an SCC!"); 766 767 // Skip any nodes we've already reached in the DFS. 768 if (RootN->DFSNumber != 0) { 769 assert(RootN->DFSNumber == -1 && 770 "Shouldn't have any mid-DFS root nodes!"); 771 continue; 772 } 773 774 RootN->DFSNumber = RootN->LowLink = 1; 775 int NextDFSNumber = 2; 776 777 DFSStack.push_back({RootN, (*RootN)->call_begin()}); 778 do { 779 Node *N; 780 EdgeSequence::call_iterator I; 781 std::tie(N, I) = DFSStack.pop_back_val(); 782 auto E = (*N)->call_end(); 783 while (I != E) { 784 Node &ChildN = I->getNode(); 785 if (ChildN.DFSNumber == 0) { 786 // We haven't yet visited this child, so descend, pushing the current 787 // node onto the stack. 788 DFSStack.push_back({N, I}); 789 790 assert(!G->SCCMap.count(&ChildN) && 791 "Found a node with 0 DFS number but already in an SCC!"); 792 ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++; 793 N = &ChildN; 794 I = (*N)->call_begin(); 795 E = (*N)->call_end(); 796 continue; 797 } 798 799 // Check for the child already being part of some component. 800 if (ChildN.DFSNumber == -1) { 801 if (G->lookupSCC(ChildN) == &OldSCC) { 802 // If the child is part of the old SCC, we know that it can reach 803 // every other node, so we have formed a cycle. Pull the entire DFS 804 // and pending stacks into it. See the comment above about setting 805 // up the old SCC for why we do this. 806 int OldSize = OldSCC.size(); 807 OldSCC.Nodes.push_back(N); 808 OldSCC.Nodes.append(PendingSCCStack.begin(), PendingSCCStack.end()); 809 PendingSCCStack.clear(); 810 while (!DFSStack.empty()) 811 OldSCC.Nodes.push_back(DFSStack.pop_back_val().first); 812 for (Node &N : make_range(OldSCC.begin() + OldSize, OldSCC.end())) { 813 N.DFSNumber = N.LowLink = -1; 814 G->SCCMap[&N] = &OldSCC; 815 } 816 N = nullptr; 817 break; 818 } 819 820 // If the child has already been added to some child component, it 821 // couldn't impact the low-link of this parent because it isn't 822 // connected, and thus its low-link isn't relevant so skip it. 823 ++I; 824 continue; 825 } 826 827 // Track the lowest linked child as the lowest link for this node. 828 assert(ChildN.LowLink > 0 && "Must have a positive low-link number!"); 829 if (ChildN.LowLink < N->LowLink) 830 N->LowLink = ChildN.LowLink; 831 832 // Move to the next edge. 833 ++I; 834 } 835 if (!N) 836 // Cleared the DFS early, start another round. 837 break; 838 839 // We've finished processing N and its descendants, put it on our pending 840 // SCC stack to eventually get merged into an SCC of nodes. 841 PendingSCCStack.push_back(N); 842 843 // If this node is linked to some lower entry, continue walking up the 844 // stack. 845 if (N->LowLink != N->DFSNumber) 846 continue; 847 848 // Otherwise, we've completed an SCC. Append it to our post order list of 849 // SCCs. 850 int RootDFSNumber = N->DFSNumber; 851 // Find the range of the node stack by walking down until we pass the 852 // root DFS number. 853 auto SCCNodes = make_range( 854 PendingSCCStack.rbegin(), 855 find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) { 856 return N->DFSNumber < RootDFSNumber; 857 })); 858 859 // Form a new SCC out of these nodes and then clear them off our pending 860 // stack. 861 NewSCCs.push_back(G->createSCC(*this, SCCNodes)); 862 for (Node &N : *NewSCCs.back()) { 863 N.DFSNumber = N.LowLink = -1; 864 G->SCCMap[&N] = NewSCCs.back(); 865 } 866 PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end()); 867 } while (!DFSStack.empty()); 868 } 869 870 // Insert the remaining SCCs before the old one. The old SCC can reach all 871 // other SCCs we form because it contains the target node of the removed edge 872 // of the old SCC. This means that we will have edges into all of the new 873 // SCCs, which means the old one must come last for postorder. 874 int OldIdx = SCCIndices[&OldSCC]; 875 SCCs.insert(SCCs.begin() + OldIdx, NewSCCs.begin(), NewSCCs.end()); 876 877 // Update the mapping from SCC* to index to use the new SCC*s, and remove the 878 // old SCC from the mapping. 879 for (int Idx = OldIdx, Size = SCCs.size(); Idx < Size; ++Idx) 880 SCCIndices[SCCs[Idx]] = Idx; 881 882 return make_range(SCCs.begin() + OldIdx, 883 SCCs.begin() + OldIdx + NewSCCs.size()); 884 } 885 886 void LazyCallGraph::RefSCC::switchOutgoingEdgeToCall(Node &SourceN, 887 Node &TargetN) { 888 assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!"); 889 890 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 891 assert(G->lookupRefSCC(TargetN) != this && 892 "Target must not be in this RefSCC."); 893 #ifdef EXPENSIVE_CHECKS 894 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) && 895 "Target must be a descendant of the Source."); 896 #endif 897 898 // Edges between RefSCCs are the same regardless of call or ref, so we can 899 // just flip the edge here. 900 SourceN->setEdgeKind(TargetN, Edge::Call); 901 902 #ifndef NDEBUG 903 // Check that the RefSCC is still valid. 904 verify(); 905 #endif 906 } 907 908 void LazyCallGraph::RefSCC::switchOutgoingEdgeToRef(Node &SourceN, 909 Node &TargetN) { 910 assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); 911 912 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 913 assert(G->lookupRefSCC(TargetN) != this && 914 "Target must not be in this RefSCC."); 915 #ifdef EXPENSIVE_CHECKS 916 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) && 917 "Target must be a descendant of the Source."); 918 #endif 919 920 // Edges between RefSCCs are the same regardless of call or ref, so we can 921 // just flip the edge here. 922 SourceN->setEdgeKind(TargetN, Edge::Ref); 923 924 #ifndef NDEBUG 925 // Check that the RefSCC is still valid. 926 verify(); 927 #endif 928 } 929 930 void LazyCallGraph::RefSCC::insertInternalRefEdge(Node &SourceN, 931 Node &TargetN) { 932 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 933 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); 934 935 SourceN->insertEdgeInternal(TargetN, Edge::Ref); 936 937 #ifndef NDEBUG 938 // Check that the RefSCC is still valid. 939 verify(); 940 #endif 941 } 942 943 void LazyCallGraph::RefSCC::insertOutgoingEdge(Node &SourceN, Node &TargetN, 944 Edge::Kind EK) { 945 // First insert it into the caller. 946 SourceN->insertEdgeInternal(TargetN, EK); 947 948 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 949 950 assert(G->lookupRefSCC(TargetN) != this && 951 "Target must not be in this RefSCC."); 952 #ifdef EXPENSIVE_CHECKS 953 assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) && 954 "Target must be a descendant of the Source."); 955 #endif 956 957 #ifndef NDEBUG 958 // Check that the RefSCC is still valid. 959 verify(); 960 #endif 961 } 962 963 SmallVector<LazyCallGraph::RefSCC *, 1> 964 LazyCallGraph::RefSCC::insertIncomingRefEdge(Node &SourceN, Node &TargetN) { 965 assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); 966 RefSCC &SourceC = *G->lookupRefSCC(SourceN); 967 assert(&SourceC != this && "Source must not be in this RefSCC."); 968 #ifdef EXPENSIVE_CHECKS 969 assert(SourceC.isDescendantOf(*this) && 970 "Source must be a descendant of the Target."); 971 #endif 972 973 SmallVector<RefSCC *, 1> DeletedRefSCCs; 974 975 #ifndef NDEBUG 976 // In a debug build, verify the RefSCC is valid to start with and when this 977 // routine finishes. 978 verify(); 979 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 980 #endif 981 982 int SourceIdx = G->RefSCCIndices[&SourceC]; 983 int TargetIdx = G->RefSCCIndices[this]; 984 assert(SourceIdx < TargetIdx && 985 "Postorder list doesn't see edge as incoming!"); 986 987 // Compute the RefSCCs which (transitively) reach the source. We do this by 988 // working backwards from the source using the parent set in each RefSCC, 989 // skipping any RefSCCs that don't fall in the postorder range. This has the 990 // advantage of walking the sparser parent edge (in high fan-out graphs) but 991 // more importantly this removes examining all forward edges in all RefSCCs 992 // within the postorder range which aren't in fact connected. Only connected 993 // RefSCCs (and their edges) are visited here. 994 auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) { 995 Set.insert(&SourceC); 996 auto IsConnected = [&](RefSCC &RC) { 997 for (SCC &C : RC) 998 for (Node &N : C) 999 for (Edge &E : *N) 1000 if (Set.count(G->lookupRefSCC(E.getNode()))) 1001 return true; 1002 1003 return false; 1004 }; 1005 1006 for (RefSCC *C : make_range(G->PostOrderRefSCCs.begin() + SourceIdx + 1, 1007 G->PostOrderRefSCCs.begin() + TargetIdx + 1)) 1008 if (IsConnected(*C)) 1009 Set.insert(C); 1010 }; 1011 1012 // Use a normal worklist to find which SCCs the target connects to. We still 1013 // bound the search based on the range in the postorder list we care about, 1014 // but because this is forward connectivity we just "recurse" through the 1015 // edges. 1016 auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) { 1017 Set.insert(this); 1018 SmallVector<RefSCC *, 4> Worklist; 1019 Worklist.push_back(this); 1020 do { 1021 RefSCC &RC = *Worklist.pop_back_val(); 1022 for (SCC &C : RC) 1023 for (Node &N : C) 1024 for (Edge &E : *N) { 1025 RefSCC &EdgeRC = *G->lookupRefSCC(E.getNode()); 1026 if (G->getRefSCCIndex(EdgeRC) <= SourceIdx) 1027 // Not in the postorder sequence between source and target. 1028 continue; 1029 1030 if (Set.insert(&EdgeRC).second) 1031 Worklist.push_back(&EdgeRC); 1032 } 1033 } while (!Worklist.empty()); 1034 }; 1035 1036 // Use a generic helper to update the postorder sequence of RefSCCs and return 1037 // a range of any RefSCCs connected into a cycle by inserting this edge. This 1038 // routine will also take care of updating the indices into the postorder 1039 // sequence. 1040 iterator_range<SmallVectorImpl<RefSCC *>::iterator> MergeRange = 1041 updatePostorderSequenceForEdgeInsertion( 1042 SourceC, *this, G->PostOrderRefSCCs, G->RefSCCIndices, 1043 ComputeSourceConnectedSet, ComputeTargetConnectedSet); 1044 1045 // Build a set so we can do fast tests for whether a RefSCC will end up as 1046 // part of the merged RefSCC. 1047 SmallPtrSet<RefSCC *, 16> MergeSet(MergeRange.begin(), MergeRange.end()); 1048 1049 // This RefSCC will always be part of that set, so just insert it here. 1050 MergeSet.insert(this); 1051 1052 // Now that we have identified all of the SCCs which need to be merged into 1053 // a connected set with the inserted edge, merge all of them into this SCC. 1054 SmallVector<SCC *, 16> MergedSCCs; 1055 int SCCIndex = 0; 1056 for (RefSCC *RC : MergeRange) { 1057 assert(RC != this && "We're merging into the target RefSCC, so it " 1058 "shouldn't be in the range."); 1059 1060 // Walk the inner SCCs to update their up-pointer and walk all the edges to 1061 // update any parent sets. 1062 // FIXME: We should try to find a way to avoid this (rather expensive) edge 1063 // walk by updating the parent sets in some other manner. 1064 for (SCC &InnerC : *RC) { 1065 InnerC.OuterRefSCC = this; 1066 SCCIndices[&InnerC] = SCCIndex++; 1067 for (Node &N : InnerC) 1068 G->SCCMap[&N] = &InnerC; 1069 } 1070 1071 // Now merge in the SCCs. We can actually move here so try to reuse storage 1072 // the first time through. 1073 if (MergedSCCs.empty()) 1074 MergedSCCs = std::move(RC->SCCs); 1075 else 1076 MergedSCCs.append(RC->SCCs.begin(), RC->SCCs.end()); 1077 RC->SCCs.clear(); 1078 DeletedRefSCCs.push_back(RC); 1079 } 1080 1081 // Append our original SCCs to the merged list and move it into place. 1082 for (SCC &InnerC : *this) 1083 SCCIndices[&InnerC] = SCCIndex++; 1084 MergedSCCs.append(SCCs.begin(), SCCs.end()); 1085 SCCs = std::move(MergedSCCs); 1086 1087 // Remove the merged away RefSCCs from the post order sequence. 1088 for (RefSCC *RC : MergeRange) 1089 G->RefSCCIndices.erase(RC); 1090 int IndexOffset = MergeRange.end() - MergeRange.begin(); 1091 auto EraseEnd = 1092 G->PostOrderRefSCCs.erase(MergeRange.begin(), MergeRange.end()); 1093 for (RefSCC *RC : make_range(EraseEnd, G->PostOrderRefSCCs.end())) 1094 G->RefSCCIndices[RC] -= IndexOffset; 1095 1096 // At this point we have a merged RefSCC with a post-order SCCs list, just 1097 // connect the nodes to form the new edge. 1098 SourceN->insertEdgeInternal(TargetN, Edge::Ref); 1099 1100 // We return the list of SCCs which were merged so that callers can 1101 // invalidate any data they have associated with those SCCs. Note that these 1102 // SCCs are no longer in an interesting state (they are totally empty) but 1103 // the pointers will remain stable for the life of the graph itself. 1104 return DeletedRefSCCs; 1105 } 1106 1107 void LazyCallGraph::RefSCC::removeOutgoingEdge(Node &SourceN, Node &TargetN) { 1108 assert(G->lookupRefSCC(SourceN) == this && 1109 "The source must be a member of this RefSCC."); 1110 assert(G->lookupRefSCC(TargetN) != this && 1111 "The target must not be a member of this RefSCC"); 1112 1113 #ifndef NDEBUG 1114 // In a debug build, verify the RefSCC is valid to start with and when this 1115 // routine finishes. 1116 verify(); 1117 auto VerifyOnExit = make_scope_exit([&]() { verify(); }); 1118 #endif 1119 1120 // First remove it from the node. 1121 bool Removed = SourceN->removeEdgeInternal(TargetN); 1122 (void)Removed; 1123 assert(Removed && "Target not in the edge set for this caller?"); 1124 } 1125 1126 SmallVector<LazyCallGraph::RefSCC *, 1> 1127 LazyCallGraph::RefSCC::removeInternalRefEdge(Node &SourceN, 1128 ArrayRef<Node *> TargetNs) { 1129 // We return a list of the resulting *new* RefSCCs in post-order. 1130 SmallVector<RefSCC *, 1> Result; 1131 1132 #ifndef NDEBUG 1133 // In a debug build, verify the RefSCC is valid to start with and that either 1134 // we return an empty list of result RefSCCs and this RefSCC remains valid, 1135 // or we return new RefSCCs and this RefSCC is dead. 1136 verify(); 1137 auto VerifyOnExit = make_scope_exit([&]() { 1138 // If we didn't replace our RefSCC with new ones, check that this one 1139 // remains valid. 1140 if (G) 1141 verify(); 1142 }); 1143 #endif 1144 1145 // First remove the actual edges. 1146 for (Node *TargetN : TargetNs) { 1147 assert(!(*SourceN)[*TargetN].isCall() && 1148 "Cannot remove a call edge, it must first be made a ref edge"); 1149 1150 bool Removed = SourceN->removeEdgeInternal(*TargetN); 1151 (void)Removed; 1152 assert(Removed && "Target not in the edge set for this caller?"); 1153 } 1154 1155 // Direct self references don't impact the ref graph at all. 1156 if (llvm::all_of(TargetNs, 1157 [&](Node *TargetN) { return &SourceN == TargetN; })) 1158 return Result; 1159 1160 // If all targets are in the same SCC as the source, because no call edges 1161 // were removed there is no RefSCC structure change. 1162 SCC &SourceC = *G->lookupSCC(SourceN); 1163 if (llvm::all_of(TargetNs, [&](Node *TargetN) { 1164 return G->lookupSCC(*TargetN) == &SourceC; 1165 })) 1166 return Result; 1167 1168 // We build somewhat synthetic new RefSCCs by providing a postorder mapping 1169 // for each inner SCC. We store these inside the low-link field of the nodes 1170 // rather than associated with SCCs because this saves a round-trip through 1171 // the node->SCC map and in the common case, SCCs are small. We will verify 1172 // that we always give the same number to every node in the SCC such that 1173 // these are equivalent. 1174 int PostOrderNumber = 0; 1175 1176 // Reset all the other nodes to prepare for a DFS over them, and add them to 1177 // our worklist. 1178 SmallVector<Node *, 8> Worklist; 1179 for (SCC *C : SCCs) { 1180 for (Node &N : *C) 1181 N.DFSNumber = N.LowLink = 0; 1182 1183 Worklist.append(C->Nodes.begin(), C->Nodes.end()); 1184 } 1185 1186 // Track the number of nodes in this RefSCC so that we can quickly recognize 1187 // an important special case of the edge removal not breaking the cycle of 1188 // this RefSCC. 1189 const int NumRefSCCNodes = Worklist.size(); 1190 1191 SmallVector<std::pair<Node *, EdgeSequence::iterator>, 4> DFSStack; 1192 SmallVector<Node *, 4> PendingRefSCCStack; 1193 do { 1194 assert(DFSStack.empty() && 1195 "Cannot begin a new root with a non-empty DFS stack!"); 1196 assert(PendingRefSCCStack.empty() && 1197 "Cannot begin a new root with pending nodes for an SCC!"); 1198 1199 Node *RootN = Worklist.pop_back_val(); 1200 // Skip any nodes we've already reached in the DFS. 1201 if (RootN->DFSNumber != 0) { 1202 assert(RootN->DFSNumber == -1 && 1203 "Shouldn't have any mid-DFS root nodes!"); 1204 continue; 1205 } 1206 1207 RootN->DFSNumber = RootN->LowLink = 1; 1208 int NextDFSNumber = 2; 1209 1210 DFSStack.push_back({RootN, (*RootN)->begin()}); 1211 do { 1212 Node *N; 1213 EdgeSequence::iterator I; 1214 std::tie(N, I) = DFSStack.pop_back_val(); 1215 auto E = (*N)->end(); 1216 1217 assert(N->DFSNumber != 0 && "We should always assign a DFS number " 1218 "before processing a node."); 1219 1220 while (I != E) { 1221 Node &ChildN = I->getNode(); 1222 if (ChildN.DFSNumber == 0) { 1223 // Mark that we should start at this child when next this node is the 1224 // top of the stack. We don't start at the next child to ensure this 1225 // child's lowlink is reflected. 1226 DFSStack.push_back({N, I}); 1227 1228 // Continue, resetting to the child node. 1229 ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++; 1230 N = &ChildN; 1231 I = ChildN->begin(); 1232 E = ChildN->end(); 1233 continue; 1234 } 1235 if (ChildN.DFSNumber == -1) { 1236 // If this child isn't currently in this RefSCC, no need to process 1237 // it. 1238 ++I; 1239 continue; 1240 } 1241 1242 // Track the lowest link of the children, if any are still in the stack. 1243 // Any child not on the stack will have a LowLink of -1. 1244 assert(ChildN.LowLink != 0 && 1245 "Low-link must not be zero with a non-zero DFS number."); 1246 if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink) 1247 N->LowLink = ChildN.LowLink; 1248 ++I; 1249 } 1250 1251 // We've finished processing N and its descendants, put it on our pending 1252 // stack to eventually get merged into a RefSCC. 1253 PendingRefSCCStack.push_back(N); 1254 1255 // If this node is linked to some lower entry, continue walking up the 1256 // stack. 1257 if (N->LowLink != N->DFSNumber) { 1258 assert(!DFSStack.empty() && 1259 "We never found a viable root for a RefSCC to pop off!"); 1260 continue; 1261 } 1262 1263 // Otherwise, form a new RefSCC from the top of the pending node stack. 1264 int RefSCCNumber = PostOrderNumber++; 1265 int RootDFSNumber = N->DFSNumber; 1266 1267 // Find the range of the node stack by walking down until we pass the 1268 // root DFS number. Update the DFS numbers and low link numbers in the 1269 // process to avoid re-walking this list where possible. 1270 auto StackRI = find_if(reverse(PendingRefSCCStack), [&](Node *N) { 1271 if (N->DFSNumber < RootDFSNumber) 1272 // We've found the bottom. 1273 return true; 1274 1275 // Update this node and keep scanning. 1276 N->DFSNumber = -1; 1277 // Save the post-order number in the lowlink field so that we can use 1278 // it to map SCCs into new RefSCCs after we finish the DFS. 1279 N->LowLink = RefSCCNumber; 1280 return false; 1281 }); 1282 auto RefSCCNodes = make_range(StackRI.base(), PendingRefSCCStack.end()); 1283 1284 // If we find a cycle containing all nodes originally in this RefSCC then 1285 // the removal hasn't changed the structure at all. This is an important 1286 // special case and we can directly exit the entire routine more 1287 // efficiently as soon as we discover it. 1288 if (llvm::size(RefSCCNodes) == NumRefSCCNodes) { 1289 // Clear out the low link field as we won't need it. 1290 for (Node *N : RefSCCNodes) 1291 N->LowLink = -1; 1292 // Return the empty result immediately. 1293 return Result; 1294 } 1295 1296 // We've already marked the nodes internally with the RefSCC number so 1297 // just clear them off the stack and continue. 1298 PendingRefSCCStack.erase(RefSCCNodes.begin(), PendingRefSCCStack.end()); 1299 } while (!DFSStack.empty()); 1300 1301 assert(DFSStack.empty() && "Didn't flush the entire DFS stack!"); 1302 assert(PendingRefSCCStack.empty() && "Didn't flush all pending nodes!"); 1303 } while (!Worklist.empty()); 1304 1305 assert(PostOrderNumber > 1 && 1306 "Should never finish the DFS when the existing RefSCC remains valid!"); 1307 1308 // Otherwise we create a collection of new RefSCC nodes and build 1309 // a radix-sort style map from postorder number to these new RefSCCs. We then 1310 // append SCCs to each of these RefSCCs in the order they occurred in the 1311 // original SCCs container. 1312 for (int i = 0; i < PostOrderNumber; ++i) 1313 Result.push_back(G->createRefSCC(*G)); 1314 1315 // Insert the resulting postorder sequence into the global graph postorder 1316 // sequence before the current RefSCC in that sequence, and then remove the 1317 // current one. 1318 // 1319 // FIXME: It'd be nice to change the APIs so that we returned an iterator 1320 // range over the global postorder sequence and generally use that sequence 1321 // rather than building a separate result vector here. 1322 int Idx = G->getRefSCCIndex(*this); 1323 G->PostOrderRefSCCs.erase(G->PostOrderRefSCCs.begin() + Idx); 1324 G->PostOrderRefSCCs.insert(G->PostOrderRefSCCs.begin() + Idx, Result.begin(), 1325 Result.end()); 1326 for (int i : seq<int>(Idx, G->PostOrderRefSCCs.size())) 1327 G->RefSCCIndices[G->PostOrderRefSCCs[i]] = i; 1328 1329 for (SCC *C : SCCs) { 1330 // We store the SCC number in the node's low-link field above. 1331 int SCCNumber = C->begin()->LowLink; 1332 // Clear out all of the SCC's node's low-link fields now that we're done 1333 // using them as side-storage. 1334 for (Node &N : *C) { 1335 assert(N.LowLink == SCCNumber && 1336 "Cannot have different numbers for nodes in the same SCC!"); 1337 N.LowLink = -1; 1338 } 1339 1340 RefSCC &RC = *Result[SCCNumber]; 1341 int SCCIndex = RC.SCCs.size(); 1342 RC.SCCs.push_back(C); 1343 RC.SCCIndices[C] = SCCIndex; 1344 C->OuterRefSCC = &RC; 1345 } 1346 1347 // Now that we've moved things into the new RefSCCs, clear out our current 1348 // one. 1349 G = nullptr; 1350 SCCs.clear(); 1351 SCCIndices.clear(); 1352 1353 #ifndef NDEBUG 1354 // Verify the new RefSCCs we've built. 1355 for (RefSCC *RC : Result) 1356 RC->verify(); 1357 #endif 1358 1359 // Return the new list of SCCs. 1360 return Result; 1361 } 1362 1363 void LazyCallGraph::RefSCC::handleTrivialEdgeInsertion(Node &SourceN, 1364 Node &TargetN) { 1365 // The only trivial case that requires any graph updates is when we add new 1366 // ref edge and may connect different RefSCCs along that path. This is only 1367 // because of the parents set. Every other part of the graph remains constant 1368 // after this edge insertion. 1369 assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); 1370 RefSCC &TargetRC = *G->lookupRefSCC(TargetN); 1371 if (&TargetRC == this) 1372 return; 1373 1374 #ifdef EXPENSIVE_CHECKS 1375 assert(TargetRC.isDescendantOf(*this) && 1376 "Target must be a descendant of the Source."); 1377 #endif 1378 } 1379 1380 void LazyCallGraph::RefSCC::insertTrivialCallEdge(Node &SourceN, 1381 Node &TargetN) { 1382 #ifndef NDEBUG 1383 // Check that the RefSCC is still valid when we finish. 1384 auto ExitVerifier = make_scope_exit([this] { verify(); }); 1385 1386 #ifdef EXPENSIVE_CHECKS 1387 // Check that we aren't breaking some invariants of the SCC graph. Note that 1388 // this is quadratic in the number of edges in the call graph! 1389 SCC &SourceC = *G->lookupSCC(SourceN); 1390 SCC &TargetC = *G->lookupSCC(TargetN); 1391 if (&SourceC != &TargetC) 1392 assert(SourceC.isAncestorOf(TargetC) && 1393 "Call edge is not trivial in the SCC graph!"); 1394 #endif // EXPENSIVE_CHECKS 1395 #endif // NDEBUG 1396 1397 // First insert it into the source or find the existing edge. 1398 auto InsertResult = 1399 SourceN->EdgeIndexMap.insert({&TargetN, SourceN->Edges.size()}); 1400 if (!InsertResult.second) { 1401 // Already an edge, just update it. 1402 Edge &E = SourceN->Edges[InsertResult.first->second]; 1403 if (E.isCall()) 1404 return; // Nothing to do! 1405 E.setKind(Edge::Call); 1406 } else { 1407 // Create the new edge. 1408 SourceN->Edges.emplace_back(TargetN, Edge::Call); 1409 } 1410 1411 // Now that we have the edge, handle the graph fallout. 1412 handleTrivialEdgeInsertion(SourceN, TargetN); 1413 } 1414 1415 void LazyCallGraph::RefSCC::insertTrivialRefEdge(Node &SourceN, Node &TargetN) { 1416 #ifndef NDEBUG 1417 // Check that the RefSCC is still valid when we finish. 1418 auto ExitVerifier = make_scope_exit([this] { verify(); }); 1419 1420 #ifdef EXPENSIVE_CHECKS 1421 // Check that we aren't breaking some invariants of the RefSCC graph. 1422 RefSCC &SourceRC = *G->lookupRefSCC(SourceN); 1423 RefSCC &TargetRC = *G->lookupRefSCC(TargetN); 1424 if (&SourceRC != &TargetRC) 1425 assert(SourceRC.isAncestorOf(TargetRC) && 1426 "Ref edge is not trivial in the RefSCC graph!"); 1427 #endif // EXPENSIVE_CHECKS 1428 #endif // NDEBUG 1429 1430 // First insert it into the source or find the existing edge. 1431 auto InsertResult = 1432 SourceN->EdgeIndexMap.insert({&TargetN, SourceN->Edges.size()}); 1433 if (!InsertResult.second) 1434 // Already an edge, we're done. 1435 return; 1436 1437 // Create the new edge. 1438 SourceN->Edges.emplace_back(TargetN, Edge::Ref); 1439 1440 // Now that we have the edge, handle the graph fallout. 1441 handleTrivialEdgeInsertion(SourceN, TargetN); 1442 } 1443 1444 void LazyCallGraph::RefSCC::replaceNodeFunction(Node &N, Function &NewF) { 1445 Function &OldF = N.getFunction(); 1446 1447 #ifndef NDEBUG 1448 // Check that the RefSCC is still valid when we finish. 1449 auto ExitVerifier = make_scope_exit([this] { verify(); }); 1450 1451 assert(G->lookupRefSCC(N) == this && 1452 "Cannot replace the function of a node outside this RefSCC."); 1453 1454 assert(G->NodeMap.find(&NewF) == G->NodeMap.end() && 1455 "Must not have already walked the new function!'"); 1456 1457 // It is important that this replacement not introduce graph changes so we 1458 // insist that the caller has already removed every use of the original 1459 // function and that all uses of the new function correspond to existing 1460 // edges in the graph. The common and expected way to use this is when 1461 // replacing the function itself in the IR without changing the call graph 1462 // shape and just updating the analysis based on that. 1463 assert(&OldF != &NewF && "Cannot replace a function with itself!"); 1464 assert(OldF.use_empty() && 1465 "Must have moved all uses from the old function to the new!"); 1466 #endif 1467 1468 N.replaceFunction(NewF); 1469 1470 // Update various call graph maps. 1471 G->NodeMap.erase(&OldF); 1472 G->NodeMap[&NewF] = &N; 1473 } 1474 1475 void LazyCallGraph::insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK) { 1476 assert(SCCMap.empty() && 1477 "This method cannot be called after SCCs have been formed!"); 1478 1479 return SourceN->insertEdgeInternal(TargetN, EK); 1480 } 1481 1482 void LazyCallGraph::removeEdge(Node &SourceN, Node &TargetN) { 1483 assert(SCCMap.empty() && 1484 "This method cannot be called after SCCs have been formed!"); 1485 1486 bool Removed = SourceN->removeEdgeInternal(TargetN); 1487 (void)Removed; 1488 assert(Removed && "Target not in the edge set for this caller?"); 1489 } 1490 1491 void LazyCallGraph::removeDeadFunction(Function &F) { 1492 // FIXME: This is unnecessarily restrictive. We should be able to remove 1493 // functions which recursively call themselves. 1494 assert(F.use_empty() && 1495 "This routine should only be called on trivially dead functions!"); 1496 1497 // We shouldn't remove library functions as they are never really dead while 1498 // the call graph is in use -- every function definition refers to them. 1499 assert(!isLibFunction(F) && 1500 "Must not remove lib functions from the call graph!"); 1501 1502 auto NI = NodeMap.find(&F); 1503 if (NI == NodeMap.end()) 1504 // Not in the graph at all! 1505 return; 1506 1507 Node &N = *NI->second; 1508 NodeMap.erase(NI); 1509 1510 // Remove this from the entry edges if present. 1511 EntryEdges.removeEdgeInternal(N); 1512 1513 if (SCCMap.empty()) { 1514 // No SCCs have been formed, so removing this is fine and there is nothing 1515 // else necessary at this point but clearing out the node. 1516 N.clear(); 1517 return; 1518 } 1519 1520 // Cannot remove a function which has yet to be visited in the DFS walk, so 1521 // if we have a node at all then we must have an SCC and RefSCC. 1522 auto CI = SCCMap.find(&N); 1523 assert(CI != SCCMap.end() && 1524 "Tried to remove a node without an SCC after DFS walk started!"); 1525 SCC &C = *CI->second; 1526 SCCMap.erase(CI); 1527 RefSCC &RC = C.getOuterRefSCC(); 1528 1529 // This node must be the only member of its SCC as it has no callers, and 1530 // that SCC must be the only member of a RefSCC as it has no references. 1531 // Validate these properties first. 1532 assert(C.size() == 1 && "Dead functions must be in a singular SCC"); 1533 assert(RC.size() == 1 && "Dead functions must be in a singular RefSCC"); 1534 1535 auto RCIndexI = RefSCCIndices.find(&RC); 1536 int RCIndex = RCIndexI->second; 1537 PostOrderRefSCCs.erase(PostOrderRefSCCs.begin() + RCIndex); 1538 RefSCCIndices.erase(RCIndexI); 1539 for (int i = RCIndex, Size = PostOrderRefSCCs.size(); i < Size; ++i) 1540 RefSCCIndices[PostOrderRefSCCs[i]] = i; 1541 1542 // Finally clear out all the data structures from the node down through the 1543 // components. 1544 N.clear(); 1545 N.G = nullptr; 1546 N.F = nullptr; 1547 C.clear(); 1548 RC.clear(); 1549 RC.G = nullptr; 1550 1551 // Nothing to delete as all the objects are allocated in stable bump pointer 1552 // allocators. 1553 } 1554 1555 LazyCallGraph::Node &LazyCallGraph::insertInto(Function &F, Node *&MappedN) { 1556 return *new (MappedN = BPA.Allocate()) Node(*this, F); 1557 } 1558 1559 void LazyCallGraph::updateGraphPtrs() { 1560 // Walk the node map to update their graph pointers. While this iterates in 1561 // an unstable order, the order has no effect so it remains correct. 1562 for (auto &FunctionNodePair : NodeMap) 1563 FunctionNodePair.second->G = this; 1564 1565 for (auto *RC : PostOrderRefSCCs) 1566 RC->G = this; 1567 } 1568 1569 template <typename RootsT, typename GetBeginT, typename GetEndT, 1570 typename GetNodeT, typename FormSCCCallbackT> 1571 void LazyCallGraph::buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin, 1572 GetEndT &&GetEnd, GetNodeT &&GetNode, 1573 FormSCCCallbackT &&FormSCC) { 1574 using EdgeItT = decltype(GetBegin(std::declval<Node &>())); 1575 1576 SmallVector<std::pair<Node *, EdgeItT>, 16> DFSStack; 1577 SmallVector<Node *, 16> PendingSCCStack; 1578 1579 // Scan down the stack and DFS across the call edges. 1580 for (Node *RootN : Roots) { 1581 assert(DFSStack.empty() && 1582 "Cannot begin a new root with a non-empty DFS stack!"); 1583 assert(PendingSCCStack.empty() && 1584 "Cannot begin a new root with pending nodes for an SCC!"); 1585 1586 // Skip any nodes we've already reached in the DFS. 1587 if (RootN->DFSNumber != 0) { 1588 assert(RootN->DFSNumber == -1 && 1589 "Shouldn't have any mid-DFS root nodes!"); 1590 continue; 1591 } 1592 1593 RootN->DFSNumber = RootN->LowLink = 1; 1594 int NextDFSNumber = 2; 1595 1596 DFSStack.push_back({RootN, GetBegin(*RootN)}); 1597 do { 1598 Node *N; 1599 EdgeItT I; 1600 std::tie(N, I) = DFSStack.pop_back_val(); 1601 auto E = GetEnd(*N); 1602 while (I != E) { 1603 Node &ChildN = GetNode(I); 1604 if (ChildN.DFSNumber == 0) { 1605 // We haven't yet visited this child, so descend, pushing the current 1606 // node onto the stack. 1607 DFSStack.push_back({N, I}); 1608 1609 ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++; 1610 N = &ChildN; 1611 I = GetBegin(*N); 1612 E = GetEnd(*N); 1613 continue; 1614 } 1615 1616 // If the child has already been added to some child component, it 1617 // couldn't impact the low-link of this parent because it isn't 1618 // connected, and thus its low-link isn't relevant so skip it. 1619 if (ChildN.DFSNumber == -1) { 1620 ++I; 1621 continue; 1622 } 1623 1624 // Track the lowest linked child as the lowest link for this node. 1625 assert(ChildN.LowLink > 0 && "Must have a positive low-link number!"); 1626 if (ChildN.LowLink < N->LowLink) 1627 N->LowLink = ChildN.LowLink; 1628 1629 // Move to the next edge. 1630 ++I; 1631 } 1632 1633 // We've finished processing N and its descendants, put it on our pending 1634 // SCC stack to eventually get merged into an SCC of nodes. 1635 PendingSCCStack.push_back(N); 1636 1637 // If this node is linked to some lower entry, continue walking up the 1638 // stack. 1639 if (N->LowLink != N->DFSNumber) 1640 continue; 1641 1642 // Otherwise, we've completed an SCC. Append it to our post order list of 1643 // SCCs. 1644 int RootDFSNumber = N->DFSNumber; 1645 // Find the range of the node stack by walking down until we pass the 1646 // root DFS number. 1647 auto SCCNodes = make_range( 1648 PendingSCCStack.rbegin(), 1649 find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) { 1650 return N->DFSNumber < RootDFSNumber; 1651 })); 1652 // Form a new SCC out of these nodes and then clear them off our pending 1653 // stack. 1654 FormSCC(SCCNodes); 1655 PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end()); 1656 } while (!DFSStack.empty()); 1657 } 1658 } 1659 1660 /// Build the internal SCCs for a RefSCC from a sequence of nodes. 1661 /// 1662 /// Appends the SCCs to the provided vector and updates the map with their 1663 /// indices. Both the vector and map must be empty when passed into this 1664 /// routine. 1665 void LazyCallGraph::buildSCCs(RefSCC &RC, node_stack_range Nodes) { 1666 assert(RC.SCCs.empty() && "Already built SCCs!"); 1667 assert(RC.SCCIndices.empty() && "Already mapped SCC indices!"); 1668 1669 for (Node *N : Nodes) { 1670 assert(N->LowLink >= (*Nodes.begin())->LowLink && 1671 "We cannot have a low link in an SCC lower than its root on the " 1672 "stack!"); 1673 1674 // This node will go into the next RefSCC, clear out its DFS and low link 1675 // as we scan. 1676 N->DFSNumber = N->LowLink = 0; 1677 } 1678 1679 // Each RefSCC contains a DAG of the call SCCs. To build these, we do 1680 // a direct walk of the call edges using Tarjan's algorithm. We reuse the 1681 // internal storage as we won't need it for the outer graph's DFS any longer. 1682 buildGenericSCCs( 1683 Nodes, [](Node &N) { return N->call_begin(); }, 1684 [](Node &N) { return N->call_end(); }, 1685 [](EdgeSequence::call_iterator I) -> Node & { return I->getNode(); }, 1686 [this, &RC](node_stack_range Nodes) { 1687 RC.SCCs.push_back(createSCC(RC, Nodes)); 1688 for (Node &N : *RC.SCCs.back()) { 1689 N.DFSNumber = N.LowLink = -1; 1690 SCCMap[&N] = RC.SCCs.back(); 1691 } 1692 }); 1693 1694 // Wire up the SCC indices. 1695 for (int i = 0, Size = RC.SCCs.size(); i < Size; ++i) 1696 RC.SCCIndices[RC.SCCs[i]] = i; 1697 } 1698 1699 void LazyCallGraph::buildRefSCCs() { 1700 if (EntryEdges.empty() || !PostOrderRefSCCs.empty()) 1701 // RefSCCs are either non-existent or already built! 1702 return; 1703 1704 assert(RefSCCIndices.empty() && "Already mapped RefSCC indices!"); 1705 1706 SmallVector<Node *, 16> Roots; 1707 for (Edge &E : *this) 1708 Roots.push_back(&E.getNode()); 1709 1710 // The roots will be popped of a stack, so use reverse to get a less 1711 // surprising order. This doesn't change any of the semantics anywhere. 1712 std::reverse(Roots.begin(), Roots.end()); 1713 1714 buildGenericSCCs( 1715 Roots, 1716 [](Node &N) { 1717 // We need to populate each node as we begin to walk its edges. 1718 N.populate(); 1719 return N->begin(); 1720 }, 1721 [](Node &N) { return N->end(); }, 1722 [](EdgeSequence::iterator I) -> Node & { return I->getNode(); }, 1723 [this](node_stack_range Nodes) { 1724 RefSCC *NewRC = createRefSCC(*this); 1725 buildSCCs(*NewRC, Nodes); 1726 1727 // Push the new node into the postorder list and remember its position 1728 // in the index map. 1729 bool Inserted = 1730 RefSCCIndices.insert({NewRC, PostOrderRefSCCs.size()}).second; 1731 (void)Inserted; 1732 assert(Inserted && "Cannot already have this RefSCC in the index map!"); 1733 PostOrderRefSCCs.push_back(NewRC); 1734 #ifndef NDEBUG 1735 NewRC->verify(); 1736 #endif 1737 }); 1738 } 1739 1740 AnalysisKey LazyCallGraphAnalysis::Key; 1741 1742 LazyCallGraphPrinterPass::LazyCallGraphPrinterPass(raw_ostream &OS) : OS(OS) {} 1743 1744 static void printNode(raw_ostream &OS, LazyCallGraph::Node &N) { 1745 OS << " Edges in function: " << N.getFunction().getName() << "\n"; 1746 for (LazyCallGraph::Edge &E : N.populate()) 1747 OS << " " << (E.isCall() ? "call" : "ref ") << " -> " 1748 << E.getFunction().getName() << "\n"; 1749 1750 OS << "\n"; 1751 } 1752 1753 static void printSCC(raw_ostream &OS, LazyCallGraph::SCC &C) { 1754 ptrdiff_t Size = size(C); 1755 OS << " SCC with " << Size << " functions:\n"; 1756 1757 for (LazyCallGraph::Node &N : C) 1758 OS << " " << N.getFunction().getName() << "\n"; 1759 } 1760 1761 static void printRefSCC(raw_ostream &OS, LazyCallGraph::RefSCC &C) { 1762 ptrdiff_t Size = size(C); 1763 OS << " RefSCC with " << Size << " call SCCs:\n"; 1764 1765 for (LazyCallGraph::SCC &InnerC : C) 1766 printSCC(OS, InnerC); 1767 1768 OS << "\n"; 1769 } 1770 1771 PreservedAnalyses LazyCallGraphPrinterPass::run(Module &M, 1772 ModuleAnalysisManager &AM) { 1773 LazyCallGraph &G = AM.getResult<LazyCallGraphAnalysis>(M); 1774 1775 OS << "Printing the call graph for module: " << M.getModuleIdentifier() 1776 << "\n\n"; 1777 1778 for (Function &F : M) 1779 printNode(OS, G.get(F)); 1780 1781 G.buildRefSCCs(); 1782 for (LazyCallGraph::RefSCC &C : G.postorder_ref_sccs()) 1783 printRefSCC(OS, C); 1784 1785 return PreservedAnalyses::all(); 1786 } 1787 1788 LazyCallGraphDOTPrinterPass::LazyCallGraphDOTPrinterPass(raw_ostream &OS) 1789 : OS(OS) {} 1790 1791 static void printNodeDOT(raw_ostream &OS, LazyCallGraph::Node &N) { 1792 std::string Name = "\"" + DOT::EscapeString(N.getFunction().getName()) + "\""; 1793 1794 for (LazyCallGraph::Edge &E : N.populate()) { 1795 OS << " " << Name << " -> \"" 1796 << DOT::EscapeString(E.getFunction().getName()) << "\""; 1797 if (!E.isCall()) // It is a ref edge. 1798 OS << " [style=dashed,label=\"ref\"]"; 1799 OS << ";\n"; 1800 } 1801 1802 OS << "\n"; 1803 } 1804 1805 PreservedAnalyses LazyCallGraphDOTPrinterPass::run(Module &M, 1806 ModuleAnalysisManager &AM) { 1807 LazyCallGraph &G = AM.getResult<LazyCallGraphAnalysis>(M); 1808 1809 OS << "digraph \"" << DOT::EscapeString(M.getModuleIdentifier()) << "\" {\n"; 1810 1811 for (Function &F : M) 1812 printNodeDOT(OS, G.get(F)); 1813 1814 OS << "}\n"; 1815 1816 return PreservedAnalyses::all(); 1817 } 1818