xref: /llvm-project/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp (revision 7574042e2a5887e09d740adf9f4ed605f8f00ec5)
1 //===- SparseAnalysis.cpp - Sparse data-flow analysis ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Analysis/DataFlow/SparseAnalysis.h"
10 #include "mlir/Analysis/DataFlow/DeadCodeAnalysis.h"
11 #include "mlir/Analysis/DataFlowFramework.h"
12 #include "mlir/IR/Attributes.h"
13 #include "mlir/IR/Operation.h"
14 #include "mlir/IR/Region.h"
15 #include "mlir/IR/SymbolTable.h"
16 #include "mlir/IR/Value.h"
17 #include "mlir/IR/ValueRange.h"
18 #include "mlir/Interfaces/CallInterfaces.h"
19 #include "mlir/Interfaces/ControlFlowInterfaces.h"
20 #include "mlir/Support/LLVM.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/Support/Casting.h"
23 #include <cassert>
24 #include <optional>
25 
26 using namespace mlir;
27 using namespace mlir::dataflow;
28 
29 //===----------------------------------------------------------------------===//
30 // AbstractSparseLattice
31 //===----------------------------------------------------------------------===//
32 
33 void AbstractSparseLattice::onUpdate(DataFlowSolver *solver) const {
34   AnalysisState::onUpdate(solver);
35 
36   // Push all users of the value to the queue.
37   for (Operation *user : anchor.get<Value>().getUsers())
38     for (DataFlowAnalysis *analysis : useDefSubscribers)
39       solver->enqueue({user, analysis});
40 }
41 
42 //===----------------------------------------------------------------------===//
43 // AbstractSparseForwardDataFlowAnalysis
44 //===----------------------------------------------------------------------===//
45 
46 AbstractSparseForwardDataFlowAnalysis::AbstractSparseForwardDataFlowAnalysis(
47     DataFlowSolver &solver)
48     : DataFlowAnalysis(solver) {
49   registerAnchorKind<CFGEdge>();
50 }
51 
52 LogicalResult
53 AbstractSparseForwardDataFlowAnalysis::initialize(Operation *top) {
54   // Mark the entry block arguments as having reached their pessimistic
55   // fixpoints.
56   for (Region &region : top->getRegions()) {
57     if (region.empty())
58       continue;
59     for (Value argument : region.front().getArguments())
60       setToEntryState(getLatticeElement(argument));
61   }
62 
63   return initializeRecursively(top);
64 }
65 
66 LogicalResult
67 AbstractSparseForwardDataFlowAnalysis::initializeRecursively(Operation *op) {
68   // Initialize the analysis by visiting every owner of an SSA value (all
69   // operations and blocks).
70   if (failed(visitOperation(op)))
71     return failure();
72 
73   for (Region &region : op->getRegions()) {
74     for (Block &block : region) {
75       getOrCreate<Executable>(&block)->blockContentSubscribe(this);
76       visitBlock(&block);
77       for (Operation &op : block)
78         if (failed(initializeRecursively(&op)))
79           return failure();
80     }
81   }
82 
83   return success();
84 }
85 
86 LogicalResult AbstractSparseForwardDataFlowAnalysis::visit(ProgramPoint point) {
87   if (Operation *op = llvm::dyn_cast_if_present<Operation *>(point))
88     return visitOperation(op);
89   visitBlock(point.get<Block *>());
90   return success();
91 }
92 
93 LogicalResult
94 AbstractSparseForwardDataFlowAnalysis::visitOperation(Operation *op) {
95   // Exit early on operations with no results.
96   if (op->getNumResults() == 0)
97     return success();
98 
99   // If the containing block is not executable, bail out.
100   if (!getOrCreate<Executable>(op->getBlock())->isLive())
101     return success();
102 
103   // Get the result lattices.
104   SmallVector<AbstractSparseLattice *> resultLattices;
105   resultLattices.reserve(op->getNumResults());
106   for (Value result : op->getResults()) {
107     AbstractSparseLattice *resultLattice = getLatticeElement(result);
108     resultLattices.push_back(resultLattice);
109   }
110 
111   // The results of a region branch operation are determined by control-flow.
112   if (auto branch = dyn_cast<RegionBranchOpInterface>(op)) {
113     visitRegionSuccessors({branch}, branch,
114                           /*successor=*/RegionBranchPoint::parent(),
115                           resultLattices);
116     return success();
117   }
118 
119   // Grab the lattice elements of the operands.
120   SmallVector<const AbstractSparseLattice *> operandLattices;
121   operandLattices.reserve(op->getNumOperands());
122   for (Value operand : op->getOperands()) {
123     AbstractSparseLattice *operandLattice = getLatticeElement(operand);
124     operandLattice->useDefSubscribe(this);
125     operandLattices.push_back(operandLattice);
126   }
127 
128   if (auto call = dyn_cast<CallOpInterface>(op)) {
129     // If the call operation is to an external function, attempt to infer the
130     // results from the call arguments.
131     auto callable =
132         dyn_cast_if_present<CallableOpInterface>(call.resolveCallable());
133     if (!getSolverConfig().isInterprocedural() ||
134         (callable && !callable.getCallableRegion())) {
135       visitExternalCallImpl(call, operandLattices, resultLattices);
136       return success();
137     }
138 
139     // Otherwise, the results of a call operation are determined by the
140     // callgraph.
141     const auto *predecessors = getOrCreateFor<PredecessorState>(op, call);
142     // If not all return sites are known, then conservatively assume we can't
143     // reason about the data-flow.
144     if (!predecessors->allPredecessorsKnown()) {
145       setAllToEntryStates(resultLattices);
146       return success();
147     }
148     for (Operation *predecessor : predecessors->getKnownPredecessors())
149       for (auto &&[operand, resLattice] :
150            llvm::zip(predecessor->getOperands(), resultLattices))
151         join(resLattice, *getLatticeElementFor(op, operand));
152     return success();
153   }
154 
155   // Invoke the operation transfer function.
156   return visitOperationImpl(op, operandLattices, resultLattices);
157 }
158 
159 void AbstractSparseForwardDataFlowAnalysis::visitBlock(Block *block) {
160   // Exit early on blocks with no arguments.
161   if (block->getNumArguments() == 0)
162     return;
163 
164   // If the block is not executable, bail out.
165   if (!getOrCreate<Executable>(block)->isLive())
166     return;
167 
168   // Get the argument lattices.
169   SmallVector<AbstractSparseLattice *> argLattices;
170   argLattices.reserve(block->getNumArguments());
171   for (BlockArgument argument : block->getArguments()) {
172     AbstractSparseLattice *argLattice = getLatticeElement(argument);
173     argLattices.push_back(argLattice);
174   }
175 
176   // The argument lattices of entry blocks are set by region control-flow or the
177   // callgraph.
178   if (block->isEntryBlock()) {
179     // Check if this block is the entry block of a callable region.
180     auto callable = dyn_cast<CallableOpInterface>(block->getParentOp());
181     if (callable && callable.getCallableRegion() == block->getParent()) {
182       const auto *callsites = getOrCreateFor<PredecessorState>(block, callable);
183       // If not all callsites are known, conservatively mark all lattices as
184       // having reached their pessimistic fixpoints.
185       if (!callsites->allPredecessorsKnown() ||
186           !getSolverConfig().isInterprocedural()) {
187         return setAllToEntryStates(argLattices);
188       }
189       for (Operation *callsite : callsites->getKnownPredecessors()) {
190         auto call = cast<CallOpInterface>(callsite);
191         for (auto it : llvm::zip(call.getArgOperands(), argLattices))
192           join(std::get<1>(it), *getLatticeElementFor(block, std::get<0>(it)));
193       }
194       return;
195     }
196 
197     // Check if the lattices can be determined from region control flow.
198     if (auto branch = dyn_cast<RegionBranchOpInterface>(block->getParentOp())) {
199       return visitRegionSuccessors(block, branch, block->getParent(),
200                                    argLattices);
201     }
202 
203     // Otherwise, we can't reason about the data-flow.
204     return visitNonControlFlowArgumentsImpl(block->getParentOp(),
205                                             RegionSuccessor(block->getParent()),
206                                             argLattices, /*firstIndex=*/0);
207   }
208 
209   // Iterate over the predecessors of the non-entry block.
210   for (Block::pred_iterator it = block->pred_begin(), e = block->pred_end();
211        it != e; ++it) {
212     Block *predecessor = *it;
213 
214     // If the edge from the predecessor block to the current block is not live,
215     // bail out.
216     auto *edgeExecutable =
217         getOrCreate<Executable>(getLatticeAnchor<CFGEdge>(predecessor, block));
218     edgeExecutable->blockContentSubscribe(this);
219     if (!edgeExecutable->isLive())
220       continue;
221 
222     // Check if we can reason about the data-flow from the predecessor.
223     if (auto branch =
224             dyn_cast<BranchOpInterface>(predecessor->getTerminator())) {
225       SuccessorOperands operands =
226           branch.getSuccessorOperands(it.getSuccessorIndex());
227       for (auto [idx, lattice] : llvm::enumerate(argLattices)) {
228         if (Value operand = operands[idx]) {
229           join(lattice, *getLatticeElementFor(block, operand));
230         } else {
231           // Conservatively consider internally produced arguments as entry
232           // points.
233           setAllToEntryStates(lattice);
234         }
235       }
236     } else {
237       return setAllToEntryStates(argLattices);
238     }
239   }
240 }
241 
242 void AbstractSparseForwardDataFlowAnalysis::visitRegionSuccessors(
243     ProgramPoint point, RegionBranchOpInterface branch,
244     RegionBranchPoint successor, ArrayRef<AbstractSparseLattice *> lattices) {
245   const auto *predecessors = getOrCreateFor<PredecessorState>(point, point);
246   assert(predecessors->allPredecessorsKnown() &&
247          "unexpected unresolved region successors");
248 
249   for (Operation *op : predecessors->getKnownPredecessors()) {
250     // Get the incoming successor operands.
251     std::optional<OperandRange> operands;
252 
253     // Check if the predecessor is the parent op.
254     if (op == branch) {
255       operands = branch.getEntrySuccessorOperands(successor);
256       // Otherwise, try to deduce the operands from a region return-like op.
257     } else if (auto regionTerminator =
258                    dyn_cast<RegionBranchTerminatorOpInterface>(op)) {
259       operands = regionTerminator.getSuccessorOperands(successor);
260     }
261 
262     if (!operands) {
263       // We can't reason about the data-flow.
264       return setAllToEntryStates(lattices);
265     }
266 
267     ValueRange inputs = predecessors->getSuccessorInputs(op);
268     assert(inputs.size() == operands->size() &&
269            "expected the same number of successor inputs as operands");
270 
271     unsigned firstIndex = 0;
272     if (inputs.size() != lattices.size()) {
273       if (llvm::dyn_cast_if_present<Operation *>(point)) {
274         if (!inputs.empty())
275           firstIndex = cast<OpResult>(inputs.front()).getResultNumber();
276         visitNonControlFlowArgumentsImpl(
277             branch,
278             RegionSuccessor(
279                 branch->getResults().slice(firstIndex, inputs.size())),
280             lattices, firstIndex);
281       } else {
282         if (!inputs.empty())
283           firstIndex = cast<BlockArgument>(inputs.front()).getArgNumber();
284         Region *region = point.get<Block *>()->getParent();
285         visitNonControlFlowArgumentsImpl(
286             branch,
287             RegionSuccessor(region, region->getArguments().slice(
288                                         firstIndex, inputs.size())),
289             lattices, firstIndex);
290       }
291     }
292 
293     for (auto it : llvm::zip(*operands, lattices.drop_front(firstIndex)))
294       join(std::get<1>(it), *getLatticeElementFor(point, std::get<0>(it)));
295   }
296 }
297 
298 const AbstractSparseLattice *
299 AbstractSparseForwardDataFlowAnalysis::getLatticeElementFor(ProgramPoint point,
300                                                             Value value) {
301   AbstractSparseLattice *state = getLatticeElement(value);
302   addDependency(state, point);
303   return state;
304 }
305 
306 void AbstractSparseForwardDataFlowAnalysis::setAllToEntryStates(
307     ArrayRef<AbstractSparseLattice *> lattices) {
308   for (AbstractSparseLattice *lattice : lattices)
309     setToEntryState(lattice);
310 }
311 
312 void AbstractSparseForwardDataFlowAnalysis::join(
313     AbstractSparseLattice *lhs, const AbstractSparseLattice &rhs) {
314   propagateIfChanged(lhs, lhs->join(rhs));
315 }
316 
317 //===----------------------------------------------------------------------===//
318 // AbstractSparseBackwardDataFlowAnalysis
319 //===----------------------------------------------------------------------===//
320 
321 AbstractSparseBackwardDataFlowAnalysis::AbstractSparseBackwardDataFlowAnalysis(
322     DataFlowSolver &solver, SymbolTableCollection &symbolTable)
323     : DataFlowAnalysis(solver), symbolTable(symbolTable) {
324   registerAnchorKind<CFGEdge>();
325 }
326 
327 LogicalResult
328 AbstractSparseBackwardDataFlowAnalysis::initialize(Operation *top) {
329   return initializeRecursively(top);
330 }
331 
332 LogicalResult
333 AbstractSparseBackwardDataFlowAnalysis::initializeRecursively(Operation *op) {
334   if (failed(visitOperation(op)))
335     return failure();
336 
337   for (Region &region : op->getRegions()) {
338     for (Block &block : region) {
339       getOrCreate<Executable>(&block)->blockContentSubscribe(this);
340       // Initialize ops in reverse order, so we can do as much initial
341       // propagation as possible without having to go through the
342       // solver queue.
343       for (auto it = block.rbegin(); it != block.rend(); it++)
344         if (failed(initializeRecursively(&*it)))
345           return failure();
346     }
347   }
348   return success();
349 }
350 
351 LogicalResult
352 AbstractSparseBackwardDataFlowAnalysis::visit(ProgramPoint point) {
353   if (Operation *op = llvm::dyn_cast_if_present<Operation *>(point))
354     return visitOperation(op);
355   // For backward dataflow, we don't have to do any work for the blocks
356   // themselves. CFG edges between blocks are processed by the BranchOp
357   // logic in `visitOperation`, and entry blocks for functions are tied
358   // to the CallOp arguments by visitOperation.
359   return success();
360 }
361 
362 SmallVector<AbstractSparseLattice *>
363 AbstractSparseBackwardDataFlowAnalysis::getLatticeElements(ValueRange values) {
364   SmallVector<AbstractSparseLattice *> resultLattices;
365   resultLattices.reserve(values.size());
366   for (Value result : values) {
367     AbstractSparseLattice *resultLattice = getLatticeElement(result);
368     resultLattices.push_back(resultLattice);
369   }
370   return resultLattices;
371 }
372 
373 SmallVector<const AbstractSparseLattice *>
374 AbstractSparseBackwardDataFlowAnalysis::getLatticeElementsFor(
375     ProgramPoint point, ValueRange values) {
376   SmallVector<const AbstractSparseLattice *> resultLattices;
377   resultLattices.reserve(values.size());
378   for (Value result : values) {
379     const AbstractSparseLattice *resultLattice =
380         getLatticeElementFor(point, result);
381     resultLattices.push_back(resultLattice);
382   }
383   return resultLattices;
384 }
385 
386 static MutableArrayRef<OpOperand> operandsToOpOperands(OperandRange &operands) {
387   return MutableArrayRef<OpOperand>(operands.getBase(), operands.size());
388 }
389 
390 LogicalResult
391 AbstractSparseBackwardDataFlowAnalysis::visitOperation(Operation *op) {
392   // If we're in a dead block, bail out.
393   if (!getOrCreate<Executable>(op->getBlock())->isLive())
394     return success();
395 
396   SmallVector<AbstractSparseLattice *> operandLattices =
397       getLatticeElements(op->getOperands());
398   SmallVector<const AbstractSparseLattice *> resultLattices =
399       getLatticeElementsFor(op, op->getResults());
400 
401   // Block arguments of region branch operations flow back into the operands
402   // of the parent op
403   if (auto branch = dyn_cast<RegionBranchOpInterface>(op)) {
404     visitRegionSuccessors(branch, operandLattices);
405     return success();
406   }
407 
408   if (auto branch = dyn_cast<BranchOpInterface>(op)) {
409     // Block arguments of successor blocks flow back into our operands.
410 
411     // We remember all operands not forwarded to any block in a BitVector.
412     // We can't just cut out a range here, since the non-forwarded ops might
413     // be non-contiguous (if there's more than one successor).
414     BitVector unaccounted(op->getNumOperands(), true);
415 
416     for (auto [index, block] : llvm::enumerate(op->getSuccessors())) {
417       SuccessorOperands successorOperands = branch.getSuccessorOperands(index);
418       OperandRange forwarded = successorOperands.getForwardedOperands();
419       if (!forwarded.empty()) {
420         MutableArrayRef<OpOperand> operands = op->getOpOperands().slice(
421             forwarded.getBeginOperandIndex(), forwarded.size());
422         for (OpOperand &operand : operands) {
423           unaccounted.reset(operand.getOperandNumber());
424           if (std::optional<BlockArgument> blockArg =
425                   detail::getBranchSuccessorArgument(
426                       successorOperands, operand.getOperandNumber(), block)) {
427             meet(getLatticeElement(operand.get()),
428                  *getLatticeElementFor(op, *blockArg));
429           }
430         }
431       }
432     }
433     // Operands not forwarded to successor blocks are typically parameters
434     // of the branch operation itself (for example the boolean for if/else).
435     for (int index : unaccounted.set_bits()) {
436       OpOperand &operand = op->getOpOperand(index);
437       visitBranchOperand(operand);
438     }
439     return success();
440   }
441 
442   // For function calls, connect the arguments of the entry blocks to the
443   // operands of the call op that are forwarded to these arguments.
444   if (auto call = dyn_cast<CallOpInterface>(op)) {
445     Operation *callableOp = call.resolveCallable(&symbolTable);
446     if (auto callable = dyn_cast_or_null<CallableOpInterface>(callableOp)) {
447       // Not all operands of a call op forward to arguments. Such operands are
448       // stored in `unaccounted`.
449       BitVector unaccounted(op->getNumOperands(), true);
450 
451       // If the call invokes an external function (or a function treated as
452       // external due to config), defer to the corresponding extension hook.
453       // By default, it just does `visitCallOperand` for all operands.
454       OperandRange argOperands = call.getArgOperands();
455       MutableArrayRef<OpOperand> argOpOperands =
456           operandsToOpOperands(argOperands);
457       Region *region = callable.getCallableRegion();
458       if (!region || region->empty() ||
459           !getSolverConfig().isInterprocedural()) {
460         visitExternalCallImpl(call, operandLattices, resultLattices);
461         return success();
462       }
463 
464       // Otherwise, propagate information from the entry point of the function
465       // back to operands whenever possible.
466       Block &block = region->front();
467       for (auto [blockArg, argOpOperand] :
468            llvm::zip(block.getArguments(), argOpOperands)) {
469         meet(getLatticeElement(argOpOperand.get()),
470              *getLatticeElementFor(op, blockArg));
471         unaccounted.reset(argOpOperand.getOperandNumber());
472       }
473 
474       // Handle the operands of the call op that aren't forwarded to any
475       // arguments.
476       for (int index : unaccounted.set_bits()) {
477         OpOperand &opOperand = op->getOpOperand(index);
478         visitCallOperand(opOperand);
479       }
480       return success();
481     }
482   }
483 
484   // When the region of an op implementing `RegionBranchOpInterface` has a
485   // terminator implementing `RegionBranchTerminatorOpInterface` or a
486   // return-like terminator, the region's successors' arguments flow back into
487   // the "successor operands" of this terminator.
488   //
489   // A successor operand with respect to an op implementing
490   // `RegionBranchOpInterface` is an operand that is forwarded to a region
491   // successor's input. There are two types of successor operands: the operands
492   // of this op itself and the operands of the terminators of the regions of
493   // this op.
494   if (auto terminator = dyn_cast<RegionBranchTerminatorOpInterface>(op)) {
495     if (auto branch = dyn_cast<RegionBranchOpInterface>(op->getParentOp())) {
496       visitRegionSuccessorsFromTerminator(terminator, branch);
497       return success();
498     }
499   }
500 
501   if (op->hasTrait<OpTrait::ReturnLike>()) {
502     // Going backwards, the operands of the return are derived from the
503     // results of all CallOps calling this CallableOp.
504     if (auto callable = dyn_cast<CallableOpInterface>(op->getParentOp())) {
505       const PredecessorState *callsites =
506           getOrCreateFor<PredecessorState>(op, callable);
507       if (callsites->allPredecessorsKnown()) {
508         for (Operation *call : callsites->getKnownPredecessors()) {
509           SmallVector<const AbstractSparseLattice *> callResultLattices =
510               getLatticeElementsFor(op, call->getResults());
511           for (auto [op, result] :
512                llvm::zip(operandLattices, callResultLattices))
513             meet(op, *result);
514         }
515       } else {
516         // If we don't know all the callers, we can't know where the
517         // returned values go. Note that, in particular, this will trigger
518         // for the return ops of any public functions.
519         setAllToExitStates(operandLattices);
520       }
521       return success();
522     }
523   }
524 
525   return visitOperationImpl(op, operandLattices, resultLattices);
526 }
527 
528 void AbstractSparseBackwardDataFlowAnalysis::visitRegionSuccessors(
529     RegionBranchOpInterface branch,
530     ArrayRef<AbstractSparseLattice *> operandLattices) {
531   Operation *op = branch.getOperation();
532   SmallVector<RegionSuccessor> successors;
533   SmallVector<Attribute> operands(op->getNumOperands(), nullptr);
534   branch.getEntrySuccessorRegions(operands, successors);
535 
536   // All operands not forwarded to any successor. This set can be non-contiguous
537   // in the presence of multiple successors.
538   BitVector unaccounted(op->getNumOperands(), true);
539 
540   for (RegionSuccessor &successor : successors) {
541     OperandRange operands = branch.getEntrySuccessorOperands(successor);
542     MutableArrayRef<OpOperand> opoperands = operandsToOpOperands(operands);
543     ValueRange inputs = successor.getSuccessorInputs();
544     for (auto [operand, input] : llvm::zip(opoperands, inputs)) {
545       meet(getLatticeElement(operand.get()), *getLatticeElementFor(op, input));
546       unaccounted.reset(operand.getOperandNumber());
547     }
548   }
549   // All operands not forwarded to regions are typically parameters of the
550   // branch operation itself (for example the boolean for if/else).
551   for (int index : unaccounted.set_bits()) {
552     visitBranchOperand(op->getOpOperand(index));
553   }
554 }
555 
556 void AbstractSparseBackwardDataFlowAnalysis::
557     visitRegionSuccessorsFromTerminator(
558         RegionBranchTerminatorOpInterface terminator,
559         RegionBranchOpInterface branch) {
560   assert(isa<RegionBranchTerminatorOpInterface>(terminator) &&
561          "expected a `RegionBranchTerminatorOpInterface` op");
562   assert(terminator->getParentOp() == branch.getOperation() &&
563          "expected `branch` to be the parent op of `terminator`");
564 
565   SmallVector<Attribute> operandAttributes(terminator->getNumOperands(),
566                                            nullptr);
567   SmallVector<RegionSuccessor> successors;
568   terminator.getSuccessorRegions(operandAttributes, successors);
569   // All operands not forwarded to any successor. This set can be
570   // non-contiguous in the presence of multiple successors.
571   BitVector unaccounted(terminator->getNumOperands(), true);
572 
573   for (const RegionSuccessor &successor : successors) {
574     ValueRange inputs = successor.getSuccessorInputs();
575     OperandRange operands = terminator.getSuccessorOperands(successor);
576     MutableArrayRef<OpOperand> opOperands = operandsToOpOperands(operands);
577     for (auto [opOperand, input] : llvm::zip(opOperands, inputs)) {
578       meet(getLatticeElement(opOperand.get()),
579            *getLatticeElementFor(terminator, input));
580       unaccounted.reset(const_cast<OpOperand &>(opOperand).getOperandNumber());
581     }
582   }
583   // Visit operands of the branch op not forwarded to the next region.
584   // (Like e.g. the boolean of `scf.conditional`)
585   for (int index : unaccounted.set_bits()) {
586     visitBranchOperand(terminator->getOpOperand(index));
587   }
588 }
589 
590 const AbstractSparseLattice *
591 AbstractSparseBackwardDataFlowAnalysis::getLatticeElementFor(ProgramPoint point,
592                                                              Value value) {
593   AbstractSparseLattice *state = getLatticeElement(value);
594   addDependency(state, point);
595   return state;
596 }
597 
598 void AbstractSparseBackwardDataFlowAnalysis::setAllToExitStates(
599     ArrayRef<AbstractSparseLattice *> lattices) {
600   for (AbstractSparseLattice *lattice : lattices)
601     setToExitState(lattice);
602 }
603 
604 void AbstractSparseBackwardDataFlowAnalysis::meet(
605     AbstractSparseLattice *lhs, const AbstractSparseLattice &rhs) {
606   propagateIfChanged(lhs, lhs->meet(rhs));
607 }
608