1 //===- SparseAnalysis.cpp - Sparse data-flow analysis ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Analysis/DataFlow/SparseAnalysis.h" 10 #include "mlir/Analysis/DataFlow/DeadCodeAnalysis.h" 11 #include "mlir/Analysis/DataFlowFramework.h" 12 #include "mlir/Interfaces/CallInterfaces.h" 13 14 using namespace mlir; 15 using namespace mlir::dataflow; 16 17 //===----------------------------------------------------------------------===// 18 // AbstractSparseLattice 19 //===----------------------------------------------------------------------===// 20 21 void AbstractSparseLattice::onUpdate(DataFlowSolver *solver) const { 22 AnalysisState::onUpdate(solver); 23 24 // Push all users of the value to the queue. 25 for (Operation *user : point.get<Value>().getUsers()) 26 for (DataFlowAnalysis *analysis : useDefSubscribers) 27 solver->enqueue({user, analysis}); 28 } 29 30 //===----------------------------------------------------------------------===// 31 // AbstractSparseForwardDataFlowAnalysis 32 //===----------------------------------------------------------------------===// 33 34 AbstractSparseForwardDataFlowAnalysis::AbstractSparseForwardDataFlowAnalysis( 35 DataFlowSolver &solver) 36 : DataFlowAnalysis(solver) { 37 registerPointKind<CFGEdge>(); 38 } 39 40 LogicalResult 41 AbstractSparseForwardDataFlowAnalysis::initialize(Operation *top) { 42 // Mark the entry block arguments as having reached their pessimistic 43 // fixpoints. 44 for (Region ®ion : top->getRegions()) { 45 if (region.empty()) 46 continue; 47 for (Value argument : region.front().getArguments()) 48 setToEntryState(getLatticeElement(argument)); 49 } 50 51 return initializeRecursively(top); 52 } 53 54 LogicalResult 55 AbstractSparseForwardDataFlowAnalysis::initializeRecursively(Operation *op) { 56 // Initialize the analysis by visiting every owner of an SSA value (all 57 // operations and blocks). 58 visitOperation(op); 59 for (Region ®ion : op->getRegions()) { 60 for (Block &block : region) { 61 getOrCreate<Executable>(&block)->blockContentSubscribe(this); 62 visitBlock(&block); 63 for (Operation &op : block) 64 if (failed(initializeRecursively(&op))) 65 return failure(); 66 } 67 } 68 69 return success(); 70 } 71 72 LogicalResult AbstractSparseForwardDataFlowAnalysis::visit(ProgramPoint point) { 73 if (Operation *op = llvm::dyn_cast_if_present<Operation *>(point)) 74 visitOperation(op); 75 else if (Block *block = llvm::dyn_cast_if_present<Block *>(point)) 76 visitBlock(block); 77 else 78 return failure(); 79 return success(); 80 } 81 82 void AbstractSparseForwardDataFlowAnalysis::visitOperation(Operation *op) { 83 // Exit early on operations with no results. 84 if (op->getNumResults() == 0) 85 return; 86 87 // If the containing block is not executable, bail out. 88 if (!getOrCreate<Executable>(op->getBlock())->isLive()) 89 return; 90 91 // Get the result lattices. 92 SmallVector<AbstractSparseLattice *> resultLattices; 93 resultLattices.reserve(op->getNumResults()); 94 for (Value result : op->getResults()) { 95 AbstractSparseLattice *resultLattice = getLatticeElement(result); 96 resultLattices.push_back(resultLattice); 97 } 98 99 // The results of a region branch operation are determined by control-flow. 100 if (auto branch = dyn_cast<RegionBranchOpInterface>(op)) { 101 return visitRegionSuccessors({branch}, branch, 102 /*successorIndex=*/std::nullopt, 103 resultLattices); 104 } 105 106 // The results of a call operation are determined by the callgraph. 107 if (auto call = dyn_cast<CallOpInterface>(op)) { 108 const auto *predecessors = getOrCreateFor<PredecessorState>(op, call); 109 // If not all return sites are known, then conservatively assume we can't 110 // reason about the data-flow. 111 if (!predecessors->allPredecessorsKnown()) 112 return setAllToEntryStates(resultLattices); 113 for (Operation *predecessor : predecessors->getKnownPredecessors()) 114 for (auto it : llvm::zip(predecessor->getOperands(), resultLattices)) 115 join(std::get<1>(it), *getLatticeElementFor(op, std::get<0>(it))); 116 return; 117 } 118 119 // Grab the lattice elements of the operands. 120 SmallVector<const AbstractSparseLattice *> operandLattices; 121 operandLattices.reserve(op->getNumOperands()); 122 for (Value operand : op->getOperands()) { 123 AbstractSparseLattice *operandLattice = getLatticeElement(operand); 124 operandLattice->useDefSubscribe(this); 125 operandLattices.push_back(operandLattice); 126 } 127 128 // Invoke the operation transfer function. 129 visitOperationImpl(op, operandLattices, resultLattices); 130 } 131 132 void AbstractSparseForwardDataFlowAnalysis::visitBlock(Block *block) { 133 // Exit early on blocks with no arguments. 134 if (block->getNumArguments() == 0) 135 return; 136 137 // If the block is not executable, bail out. 138 if (!getOrCreate<Executable>(block)->isLive()) 139 return; 140 141 // Get the argument lattices. 142 SmallVector<AbstractSparseLattice *> argLattices; 143 argLattices.reserve(block->getNumArguments()); 144 for (BlockArgument argument : block->getArguments()) { 145 AbstractSparseLattice *argLattice = getLatticeElement(argument); 146 argLattices.push_back(argLattice); 147 } 148 149 // The argument lattices of entry blocks are set by region control-flow or the 150 // callgraph. 151 if (block->isEntryBlock()) { 152 // Check if this block is the entry block of a callable region. 153 auto callable = dyn_cast<CallableOpInterface>(block->getParentOp()); 154 if (callable && callable.getCallableRegion() == block->getParent()) { 155 const auto *callsites = getOrCreateFor<PredecessorState>(block, callable); 156 // If not all callsites are known, conservatively mark all lattices as 157 // having reached their pessimistic fixpoints. 158 if (!callsites->allPredecessorsKnown()) 159 return setAllToEntryStates(argLattices); 160 for (Operation *callsite : callsites->getKnownPredecessors()) { 161 auto call = cast<CallOpInterface>(callsite); 162 for (auto it : llvm::zip(call.getArgOperands(), argLattices)) 163 join(std::get<1>(it), *getLatticeElementFor(block, std::get<0>(it))); 164 } 165 return; 166 } 167 168 // Check if the lattices can be determined from region control flow. 169 if (auto branch = dyn_cast<RegionBranchOpInterface>(block->getParentOp())) { 170 return visitRegionSuccessors( 171 block, branch, block->getParent()->getRegionNumber(), argLattices); 172 } 173 174 // Otherwise, we can't reason about the data-flow. 175 return visitNonControlFlowArgumentsImpl(block->getParentOp(), 176 RegionSuccessor(block->getParent()), 177 argLattices, /*firstIndex=*/0); 178 } 179 180 // Iterate over the predecessors of the non-entry block. 181 for (Block::pred_iterator it = block->pred_begin(), e = block->pred_end(); 182 it != e; ++it) { 183 Block *predecessor = *it; 184 185 // If the edge from the predecessor block to the current block is not live, 186 // bail out. 187 auto *edgeExecutable = 188 getOrCreate<Executable>(getProgramPoint<CFGEdge>(predecessor, block)); 189 edgeExecutable->blockContentSubscribe(this); 190 if (!edgeExecutable->isLive()) 191 continue; 192 193 // Check if we can reason about the data-flow from the predecessor. 194 if (auto branch = 195 dyn_cast<BranchOpInterface>(predecessor->getTerminator())) { 196 SuccessorOperands operands = 197 branch.getSuccessorOperands(it.getSuccessorIndex()); 198 for (auto [idx, lattice] : llvm::enumerate(argLattices)) { 199 if (Value operand = operands[idx]) { 200 join(lattice, *getLatticeElementFor(block, operand)); 201 } else { 202 // Conservatively consider internally produced arguments as entry 203 // points. 204 setAllToEntryStates(lattice); 205 } 206 } 207 } else { 208 return setAllToEntryStates(argLattices); 209 } 210 } 211 } 212 213 void AbstractSparseForwardDataFlowAnalysis::visitRegionSuccessors( 214 ProgramPoint point, RegionBranchOpInterface branch, 215 std::optional<unsigned> successorIndex, 216 ArrayRef<AbstractSparseLattice *> lattices) { 217 const auto *predecessors = getOrCreateFor<PredecessorState>(point, point); 218 assert(predecessors->allPredecessorsKnown() && 219 "unexpected unresolved region successors"); 220 221 for (Operation *op : predecessors->getKnownPredecessors()) { 222 // Get the incoming successor operands. 223 std::optional<OperandRange> operands; 224 225 // Check if the predecessor is the parent op. 226 if (op == branch) { 227 operands = branch.getSuccessorEntryOperands(successorIndex); 228 // Otherwise, try to deduce the operands from a region return-like op. 229 } else { 230 if (isRegionReturnLike(op)) 231 operands = getRegionBranchSuccessorOperands(op, successorIndex); 232 } 233 234 if (!operands) { 235 // We can't reason about the data-flow. 236 return setAllToEntryStates(lattices); 237 } 238 239 ValueRange inputs = predecessors->getSuccessorInputs(op); 240 assert(inputs.size() == operands->size() && 241 "expected the same number of successor inputs as operands"); 242 243 unsigned firstIndex = 0; 244 if (inputs.size() != lattices.size()) { 245 if (llvm::dyn_cast_if_present<Operation *>(point)) { 246 if (!inputs.empty()) 247 firstIndex = cast<OpResult>(inputs.front()).getResultNumber(); 248 visitNonControlFlowArgumentsImpl( 249 branch, 250 RegionSuccessor( 251 branch->getResults().slice(firstIndex, inputs.size())), 252 lattices, firstIndex); 253 } else { 254 if (!inputs.empty()) 255 firstIndex = cast<BlockArgument>(inputs.front()).getArgNumber(); 256 Region *region = point.get<Block *>()->getParent(); 257 visitNonControlFlowArgumentsImpl( 258 branch, 259 RegionSuccessor(region, region->getArguments().slice( 260 firstIndex, inputs.size())), 261 lattices, firstIndex); 262 } 263 } 264 265 for (auto it : llvm::zip(*operands, lattices.drop_front(firstIndex))) 266 join(std::get<1>(it), *getLatticeElementFor(point, std::get<0>(it))); 267 } 268 } 269 270 const AbstractSparseLattice * 271 AbstractSparseForwardDataFlowAnalysis::getLatticeElementFor(ProgramPoint point, 272 Value value) { 273 AbstractSparseLattice *state = getLatticeElement(value); 274 addDependency(state, point); 275 return state; 276 } 277 278 void AbstractSparseForwardDataFlowAnalysis::setAllToEntryStates( 279 ArrayRef<AbstractSparseLattice *> lattices) { 280 for (AbstractSparseLattice *lattice : lattices) 281 setToEntryState(lattice); 282 } 283 284 void AbstractSparseForwardDataFlowAnalysis::join( 285 AbstractSparseLattice *lhs, const AbstractSparseLattice &rhs) { 286 propagateIfChanged(lhs, lhs->join(rhs)); 287 } 288 289 //===----------------------------------------------------------------------===// 290 // AbstractSparseBackwardDataFlowAnalysis 291 //===----------------------------------------------------------------------===// 292 293 AbstractSparseBackwardDataFlowAnalysis::AbstractSparseBackwardDataFlowAnalysis( 294 DataFlowSolver &solver, SymbolTableCollection &symbolTable) 295 : DataFlowAnalysis(solver), symbolTable(symbolTable) { 296 registerPointKind<CFGEdge>(); 297 } 298 299 LogicalResult 300 AbstractSparseBackwardDataFlowAnalysis::initialize(Operation *top) { 301 return initializeRecursively(top); 302 } 303 304 LogicalResult 305 AbstractSparseBackwardDataFlowAnalysis::initializeRecursively(Operation *op) { 306 visitOperation(op); 307 for (Region ®ion : op->getRegions()) { 308 for (Block &block : region) { 309 getOrCreate<Executable>(&block)->blockContentSubscribe(this); 310 // Initialize ops in reverse order, so we can do as much initial 311 // propagation as possible without having to go through the 312 // solver queue. 313 for (auto it = block.rbegin(); it != block.rend(); it++) 314 if (failed(initializeRecursively(&*it))) 315 return failure(); 316 } 317 } 318 return success(); 319 } 320 321 LogicalResult 322 AbstractSparseBackwardDataFlowAnalysis::visit(ProgramPoint point) { 323 if (Operation *op = llvm::dyn_cast_if_present<Operation *>(point)) 324 visitOperation(op); 325 else if (llvm::dyn_cast_if_present<Block *>(point)) 326 // For backward dataflow, we don't have to do any work for the blocks 327 // themselves. CFG edges between blocks are processed by the BranchOp 328 // logic in `visitOperation`, and entry blocks for functions are tied 329 // to the CallOp arguments by visitOperation. 330 return success(); 331 else 332 return failure(); 333 return success(); 334 } 335 336 SmallVector<AbstractSparseLattice *> 337 AbstractSparseBackwardDataFlowAnalysis::getLatticeElements(ValueRange values) { 338 SmallVector<AbstractSparseLattice *> resultLattices; 339 resultLattices.reserve(values.size()); 340 for (Value result : values) { 341 AbstractSparseLattice *resultLattice = getLatticeElement(result); 342 resultLattices.push_back(resultLattice); 343 } 344 return resultLattices; 345 } 346 347 SmallVector<const AbstractSparseLattice *> 348 AbstractSparseBackwardDataFlowAnalysis::getLatticeElementsFor( 349 ProgramPoint point, ValueRange values) { 350 SmallVector<const AbstractSparseLattice *> resultLattices; 351 resultLattices.reserve(values.size()); 352 for (Value result : values) { 353 const AbstractSparseLattice *resultLattice = 354 getLatticeElementFor(point, result); 355 resultLattices.push_back(resultLattice); 356 } 357 return resultLattices; 358 } 359 360 static MutableArrayRef<OpOperand> operandsToOpOperands(OperandRange &operands) { 361 return MutableArrayRef<OpOperand>(operands.getBase(), operands.size()); 362 } 363 364 void AbstractSparseBackwardDataFlowAnalysis::visitOperation(Operation *op) { 365 // If we're in a dead block, bail out. 366 if (!getOrCreate<Executable>(op->getBlock())->isLive()) 367 return; 368 369 SmallVector<AbstractSparseLattice *> operandLattices = 370 getLatticeElements(op->getOperands()); 371 SmallVector<const AbstractSparseLattice *> resultLattices = 372 getLatticeElementsFor(op, op->getResults()); 373 374 // Block arguments of region branch operations flow back into the operands 375 // of the parent op 376 if (auto branch = dyn_cast<RegionBranchOpInterface>(op)) { 377 visitRegionSuccessors(branch, operandLattices); 378 return; 379 } 380 381 if (auto branch = dyn_cast<BranchOpInterface>(op)) { 382 // Block arguments of successor blocks flow back into our operands. 383 384 // We remember all operands not forwarded to any block in a BitVector. 385 // We can't just cut out a range here, since the non-forwarded ops might 386 // be non-contiguous (if there's more than one successor). 387 BitVector unaccounted(op->getNumOperands(), true); 388 389 for (auto [index, block] : llvm::enumerate(op->getSuccessors())) { 390 SuccessorOperands successorOperands = branch.getSuccessorOperands(index); 391 OperandRange forwarded = successorOperands.getForwardedOperands(); 392 if (!forwarded.empty()) { 393 MutableArrayRef<OpOperand> operands = op->getOpOperands().slice( 394 forwarded.getBeginOperandIndex(), forwarded.size()); 395 for (OpOperand &operand : operands) { 396 unaccounted.reset(operand.getOperandNumber()); 397 if (std::optional<BlockArgument> blockArg = 398 detail::getBranchSuccessorArgument( 399 successorOperands, operand.getOperandNumber(), block)) { 400 meet(getLatticeElement(operand.get()), 401 *getLatticeElementFor(op, *blockArg)); 402 } 403 } 404 } 405 } 406 // Operands not forwarded to successor blocks are typically parameters 407 // of the branch operation itself (for example the boolean for if/else). 408 for (int index : unaccounted.set_bits()) { 409 OpOperand &operand = op->getOpOperand(index); 410 visitBranchOperand(operand); 411 } 412 return; 413 } 414 415 // For function calls, connect the arguments of the entry blocks 416 // to the operands of the call op. 417 if (auto call = dyn_cast<CallOpInterface>(op)) { 418 Operation *callableOp = call.resolveCallable(&symbolTable); 419 if (auto callable = dyn_cast_or_null<CallableOpInterface>(callableOp)) { 420 Region *region = callable.getCallableRegion(); 421 if (region && !region->empty()) { 422 Block &block = region->front(); 423 for (auto [blockArg, operand] : 424 llvm::zip(block.getArguments(), operandLattices)) { 425 meet(operand, *getLatticeElementFor(op, blockArg)); 426 } 427 } 428 return; 429 } 430 } 431 432 // When the region of an op implementing `RegionBranchOpInterface` has a 433 // terminator implementing `RegionBranchTerminatorOpInterface` or a 434 // return-like terminator, the region's successors' arguments flow back into 435 // the "successor operands" of this terminator. 436 // 437 // A successor operand with respect to an op implementing 438 // `RegionBranchOpInterface` is an operand that is forwarded to a region 439 // successor's input. There are two types of successor operands: the operands 440 // of this op itself and the operands of the terminators of the regions of 441 // this op. 442 if (isa<RegionBranchTerminatorOpInterface>(op) || 443 op->hasTrait<OpTrait::ReturnLike>()) { 444 if (auto branch = dyn_cast<RegionBranchOpInterface>(op->getParentOp())) { 445 visitRegionSuccessorsFromTerminator(op, branch); 446 return; 447 } 448 } 449 450 if (op->hasTrait<OpTrait::ReturnLike>()) { 451 // Going backwards, the operands of the return are derived from the 452 // results of all CallOps calling this CallableOp. 453 if (auto callable = dyn_cast<CallableOpInterface>(op->getParentOp())) { 454 const PredecessorState *callsites = 455 getOrCreateFor<PredecessorState>(op, callable); 456 if (callsites->allPredecessorsKnown()) { 457 for (Operation *call : callsites->getKnownPredecessors()) { 458 SmallVector<const AbstractSparseLattice *> callResultLattices = 459 getLatticeElementsFor(op, call->getResults()); 460 for (auto [op, result] : 461 llvm::zip(operandLattices, callResultLattices)) 462 meet(op, *result); 463 } 464 } else { 465 // If we don't know all the callers, we can't know where the 466 // returned values go. Note that, in particular, this will trigger 467 // for the return ops of any public functions. 468 setAllToExitStates(operandLattices); 469 } 470 return; 471 } 472 } 473 474 visitOperationImpl(op, operandLattices, resultLattices); 475 } 476 477 void AbstractSparseBackwardDataFlowAnalysis::visitRegionSuccessors( 478 RegionBranchOpInterface branch, 479 ArrayRef<AbstractSparseLattice *> operandLattices) { 480 Operation *op = branch.getOperation(); 481 SmallVector<RegionSuccessor> successors; 482 SmallVector<Attribute> operands(op->getNumOperands(), nullptr); 483 branch.getSuccessorRegions(/*index=*/{}, operands, successors); 484 485 // All operands not forwarded to any successor. This set can be non-contiguous 486 // in the presence of multiple successors. 487 BitVector unaccounted(op->getNumOperands(), true); 488 489 for (RegionSuccessor &successor : successors) { 490 Region *region = successor.getSuccessor(); 491 OperandRange operands = 492 region ? branch.getSuccessorEntryOperands(region->getRegionNumber()) 493 : branch.getSuccessorEntryOperands({}); 494 MutableArrayRef<OpOperand> opoperands = operandsToOpOperands(operands); 495 ValueRange inputs = successor.getSuccessorInputs(); 496 for (auto [operand, input] : llvm::zip(opoperands, inputs)) { 497 meet(getLatticeElement(operand.get()), *getLatticeElementFor(op, input)); 498 unaccounted.reset(operand.getOperandNumber()); 499 } 500 } 501 // All operands not forwarded to regions are typically parameters of the 502 // branch operation itself (for example the boolean for if/else). 503 for (int index : unaccounted.set_bits()) { 504 visitBranchOperand(op->getOpOperand(index)); 505 } 506 } 507 508 void AbstractSparseBackwardDataFlowAnalysis:: 509 visitRegionSuccessorsFromTerminator(Operation *terminator, 510 RegionBranchOpInterface branch) { 511 assert(isa<RegionBranchTerminatorOpInterface>(terminator) || 512 terminator->hasTrait<OpTrait::ReturnLike>() && 513 "expected a `RegionBranchTerminatorOpInterface` op or a " 514 "return-like op"); 515 assert(terminator->getParentOp() == branch.getOperation() && 516 "expected `branch` to be the parent op of `terminator`"); 517 518 SmallVector<Attribute> operandAttributes(terminator->getNumOperands(), 519 nullptr); 520 SmallVector<RegionSuccessor> successors; 521 branch.getSuccessorRegions(terminator->getParentRegion()->getRegionNumber(), 522 operandAttributes, successors); 523 // All operands not forwarded to any successor. This set can be 524 // non-contiguous in the presence of multiple successors. 525 BitVector unaccounted(terminator->getNumOperands(), true); 526 527 for (const RegionSuccessor &successor : successors) { 528 ValueRange inputs = successor.getSuccessorInputs(); 529 Region *region = successor.getSuccessor(); 530 OperandRange operands = 531 region ? *getRegionBranchSuccessorOperands(terminator, 532 region->getRegionNumber()) 533 : *getRegionBranchSuccessorOperands(terminator, {}); 534 MutableArrayRef<OpOperand> opOperands = operandsToOpOperands(operands); 535 for (auto [opOperand, input] : llvm::zip(opOperands, inputs)) { 536 meet(getLatticeElement(opOperand.get()), 537 *getLatticeElementFor(terminator, input)); 538 unaccounted.reset(const_cast<OpOperand &>(opOperand).getOperandNumber()); 539 } 540 } 541 // Visit operands of the branch op not forwarded to the next region. 542 // (Like e.g. the boolean of `scf.conditional`) 543 for (int index : unaccounted.set_bits()) { 544 visitBranchOperand(terminator->getOpOperand(index)); 545 } 546 } 547 548 const AbstractSparseLattice * 549 AbstractSparseBackwardDataFlowAnalysis::getLatticeElementFor(ProgramPoint point, 550 Value value) { 551 AbstractSparseLattice *state = getLatticeElement(value); 552 addDependency(state, point); 553 return state; 554 } 555 556 void AbstractSparseBackwardDataFlowAnalysis::setAllToExitStates( 557 ArrayRef<AbstractSparseLattice *> lattices) { 558 for (AbstractSparseLattice *lattice : lattices) 559 setToExitState(lattice); 560 } 561 562 void AbstractSparseBackwardDataFlowAnalysis::meet( 563 AbstractSparseLattice *lhs, const AbstractSparseLattice &rhs) { 564 propagateIfChanged(lhs, lhs->meet(rhs)); 565 } 566