1 //===- Fusion.cpp - Implementation of linalg Fusion -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Fusion pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Dialect/Affine/IR/AffineOps.h" 14 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 15 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h" 16 #include "mlir/Dialect/Linalg/IR/Linalg.h" 17 #include "mlir/Dialect/Linalg/Passes.h" 18 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 19 #include "mlir/Dialect/Linalg/Utils/Utils.h" 20 #include "mlir/Dialect/MemRef/IR/MemRef.h" 21 #include "mlir/Dialect/Tensor/IR/Tensor.h" 22 #include "mlir/IR/AffineExpr.h" 23 #include "mlir/IR/AffineMap.h" 24 #include "mlir/IR/Dominance.h" 25 #include "mlir/Support/LLVM.h" 26 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 27 #include "mlir/Transforms/RegionUtils.h" 28 #include "llvm/ADT/MapVector.h" 29 #include "llvm/ADT/ScopeExit.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 33 #include <set> 34 35 #define DEBUG_TYPE "linalg-fusion" 36 37 using namespace mlir; 38 using namespace mlir::linalg; 39 40 /// Implements a simple high-level fusion pass on linalg structured operations. 41 /// 42 /// In each block, linalg ops are processed in reverse textual order. 43 /// Given a linalg op `O`, fusion occurs by: 44 /// 1. inspecting the linalg ops that write into the views read by `O`. There 45 /// are 2 cases: 46 /// a) buffer case: use the SSA value of the views and a simple alias 47 /// analysis on subview ops to determine producer-consumer dependences; 48 /// b) tensor case: use SSA use-def chains on extract_slice ops; 49 /// 2. greedily fuse the linalg ops that produce the subview/extract_slice. 50 /// 3. inspect the fused ops and determine whether they have other remaining 51 /// LinalgOp uses. If not, then erase the original producing linalg op. 52 /// 53 /// More advanced use cases, analyses as well as profitability heuristics are 54 /// left for future work. 55 56 struct ShapeDimension { 57 Value shape; 58 unsigned dimension; 59 }; 60 61 // Given an `op`, returns the first (`shape`, `dimension`) pair that identifies 62 // the loop range at `loopDepth`. The semantics of the loopToOperandRangesMaps 63 // guarantees at least one such dimension is found. If multiple candidates exist 64 // they must agree by construction (i.e. have the same size) and we just return 65 // the first one. 66 static ShapeDimension 67 getShapeDefiningLoopRange(LinalgOp op, unsigned loopDepth, 68 bool fromSubViewOpOnly = false) { 69 // Iterate over the inputs and outputs in order. 70 // Extract the subranges from the linearized ranges. 71 for (OpOperand *opOperand : op.getInputAndOutputOperands()) { 72 // The method `getRangeFromOperandShape` requires using SubViewOp or 73 // ExtractSliceOps. If the value isn't defined from there continue. 74 // todo: The method should be adapted to get the values from 75 // `ViewInterface`. The interface needs a `getOrCreateRanges` method which 76 // currently returns a `linalg.range`. The fix here is to move this op to 77 // `std` dialect and add the method to `ViewInterface`. 78 if (fromSubViewOpOnly && 79 !isa_and_nonnull<memref::SubViewOp, tensor::ExtractSliceOp>( 80 opOperand->get().getDefiningOp())) 81 continue; 82 83 AffineMap map = op.getTiedIndexingMap(opOperand); 84 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange I/O idx: " 85 << opOperand->getOperandNumber() << "\n"); 86 LLVM_DEBUG(llvm::dbgs() 87 << "getShapeDefiningLoopRange map: " << map << "\n"); 88 SmallVector<Value, 8> shapeRanges(map.getNumResults(), nullptr); 89 for (const auto &en : llvm::enumerate(map.getResults())) { 90 auto dimExpr = en.value().dyn_cast<AffineDimExpr>(); 91 if (!dimExpr) 92 continue; 93 if (loopDepth == en.value().cast<AffineDimExpr>().getPosition()) { 94 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange loopDepth: " 95 << loopDepth << "\n"); 96 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange shape: " 97 << opOperand->get() << "\n"); 98 return ShapeDimension{opOperand->get(), 99 static_cast<unsigned>(en.index())}; 100 } 101 } 102 } 103 llvm_unreachable("Expect to be able to extract a shape defining loop range"); 104 } 105 106 static SmallVector<Value> getTiledOperands(LinalgOp producer) { 107 return producer.getInputAndOutputOperands(); 108 } 109 110 /// Fuses the producer by cloning the `producer`. The `fusedLoopsAndRanges` 111 /// provides the loop range information for the fused loops. The rest are 112 /// obtained from the producer itself, since they are not tiled + fused. 113 static LinalgOp fuse(OpBuilder &b, LinalgOp producer, 114 const DenseMap<unsigned, Range> &fusedLoopsAndRanges) { 115 SmallVector<OpFoldResult> ivs, tileSizes, sizeBounds; 116 SmallVector<Range> loopRanges; 117 Location loc = producer.getLoc(); 118 119 for (unsigned i = 0, e = producer.getNumLoops(); i < e; ++i) { 120 auto shapeDim = getShapeDefiningLoopRange(producer, i); 121 OpFoldResult dim = 122 createFoldedDimOp(b, loc, shapeDim.shape, shapeDim.dimension); 123 sizeBounds.push_back(dim); 124 auto it = fusedLoopsAndRanges.find(i); 125 if (it != fusedLoopsAndRanges.end()) { 126 ivs.push_back(it->second.offset); 127 tileSizes.push_back(it->second.size); 128 loopRanges.push_back(it->second); 129 LLVM_DEBUG(llvm::dbgs() << "tiled loop#" << i << " with LoopRange " 130 << loopRanges.back() << "\n"); 131 } else { 132 tileSizes.push_back(b.getIndexAttr(0)); 133 loopRanges.push_back(Range{b.getIndexAttr(0), dim, b.getIndexAttr(1)}); 134 LLVM_DEBUG(llvm::dbgs() << "full loop#" << i << " with LoopRange " 135 << loopRanges.back() << "\n"); 136 } 137 } 138 139 SmallVector<Value, 8> clonedShapes; 140 clonedShapes.reserve(producer.getNumInputsAndOutputs()); 141 142 // Compute subranges for all tensor input/output operands. 143 clonedShapes.append(makeTiledShapes( 144 b, loc, producer, getTiledOperands(producer), ivs, tileSizes, sizeBounds, 145 /**omitPartialTileCheck=*/false)); 146 147 // Iterate over the results in order. 148 // Extract the subtensor type from the linearized range. 149 // Since we do not enforce any canonicalizations on the fly, this is always 150 // fully dynamic at construction time. 151 SmallVector<Type, 4> resultTypes; 152 resultTypes.reserve(producer->getNumResults()); 153 for (RankedTensorType t : producer.getOutputTensorTypes()) { 154 unsigned rank = t.getRank(); 155 SmallVector<int64_t, 4> staticOffsetsVector( 156 rank, ShapedType::kDynamicStrideOrOffset); 157 SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamicSize); 158 SmallVector<int64_t, 4> staticStridesVector( 159 rank, ShapedType::kDynamicStrideOrOffset); 160 resultTypes.push_back(tensor::ExtractSliceOp::inferResultType( 161 t.cast<RankedTensorType>(), staticOffsetsVector, staticSizesVector, 162 staticStridesVector)); 163 } 164 165 Operation *clonedOp = producer.clone(b, loc, resultTypes, clonedShapes); 166 167 // Shift all IndexOp results by the tile offset. 168 SmallVector<OpFoldResult> allIvs = llvm::to_vector( 169 llvm::map_range(loopRanges, [&](Range range) { return range.offset; })); 170 offsetIndices(b, clonedOp, allIvs); 171 172 return clonedOp; 173 } 174 175 /// Get the loop range for a dimension `dim` based on the `shapedOperand`. It is 176 /// expected to be defined by a subview op or an extract_slice op. 177 static Range getRangeFromOperandShape(OpBuilder &b, Location loc, 178 Value shapedOperand, unsigned dim) { 179 Operation *shapeProducingOp = shapedOperand.getDefiningOp(); 180 if (auto subViewOp = dyn_cast<memref::SubViewOp>(shapeProducingOp)) 181 return subViewOp.getOrCreateRanges(b, loc)[dim]; 182 if (auto sliceOp = dyn_cast<tensor::ExtractSliceOp>(shapeProducingOp)) 183 return sliceOp.getOrCreateRanges(b, loc)[dim]; 184 llvm_unreachable("SubviewOp or ExtractSliceOp expected"); 185 } 186 187 /// Fuses the producer into the loop immediately enclosing the consumer. 188 /// This is achieved by "recomputing" the producer at the time it 189 /// is needed just before the consumer. 190 static LinalgOp fuse(OpBuilder &b, LinalgOp producerOp, AffineMap producerMap, 191 OpOperand &consumerOpOperand) { 192 LLVM_DEBUG(llvm::dbgs() << "Producer map: " << producerMap << "\n"); 193 DenseMap<unsigned, Range> fusedLoopsAndRanges; 194 Value shapedOperand = consumerOpOperand.get(); 195 for (const auto &en : llvm::enumerate(producerMap.getResults())) { 196 unsigned posInProducerLoop = en.value().cast<AffineDimExpr>().getPosition(); 197 fusedLoopsAndRanges[posInProducerLoop] = getRangeFromOperandShape( 198 b, consumerOpOperand.getOwner()->getLoc(), shapedOperand, en.index()); 199 } 200 return fuse(b, producerOp, fusedLoopsAndRanges); 201 } 202 203 // Encode structural fusion safety preconditions. 204 // Some of these will be lifted in the future with better analysis. 205 static bool isStructurallyFusableProducer(LinalgOp producer, Value consumedView, 206 LinalgOp consumer) { 207 assert(producer.hasBufferSemantics() && 208 "expected linalg op with buffer semantics"); 209 assert(consumer.hasBufferSemantics() && 210 "expected linalg op with buffer semantics"); 211 if (producer.getNumOutputs() != 1) { 212 LLVM_DEBUG(llvm::dbgs() << "\nNot structurally fusable (multi-output)"); 213 return false; 214 } 215 // Only fuse when the producer block dominates. 216 DominanceInfo dom(producer.getOperation()); 217 if (!dom.dominates(producer->getBlock(), consumer->getBlock())) { 218 LLVM_DEBUG( 219 llvm::dbgs() 220 << "\nNot structurally fusable (producer block does not dominate)"); 221 return false; 222 } 223 return true; 224 } 225 226 bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph, 227 LinalgOp consumer, 228 Value consumedView, 229 LinalgOp producer) { 230 assert(producer.hasBufferSemantics() && 231 "expected linalg op with buffer semantics"); 232 assert(consumer.hasBufferSemantics() && 233 "expected linalg op with buffer semantics"); 234 // Make some simple structural checks that alleviate the need for more 235 // complex analyses. 236 if (!isStructurallyFusableProducer(producer, consumedView, consumer)) { 237 LLVM_DEBUG(llvm::dbgs() << "\n***Not static last write due to structure:\t" 238 << *producer.getOperation()); 239 return false; 240 } 241 // Check for any interleaved write to consumedView. 242 if (!graph.findCoveringWrites(producer, consumer, consumedView).empty()) { 243 LLVM_DEBUG(llvm::dbgs() << "\n***Not fusable due to interleaved write:\t" 244 << *producer.getOperation()); 245 return false; 246 } 247 return true; 248 } 249 250 bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph, 251 LinalgOp consumer, Value consumedView, 252 LinalgOp producer) { 253 assert(producer.hasBufferSemantics() && 254 "expected linalg op with buffer semantics"); 255 assert(consumer.hasBufferSemantics() && 256 "expected linalg op with buffer semantics"); 257 if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer)) 258 return false; 259 // Check for any fusion-preventing dependence to any shape read/written that 260 // would violate dependences. 261 if (!graph.findCoveringDependences(producer, consumer).empty()) { 262 LLVM_DEBUG(llvm::dbgs() 263 << "\n***Not fusable due to an interleaved dependence:\t" 264 << *producer.getOperation()); 265 return false; 266 } 267 return true; 268 } 269 270 /// For `consumer` with buffer semantics, find the Linalg operation on buffers 271 /// that is the last writer of `consumerOpOperand`. For now the fusable 272 /// dependence is returned as an instance of the `dependenceGraph`. 273 static FailureOr<LinalgDependenceGraph::LinalgDependenceGraphElem> 274 findFusableProducer(OpOperand &consumerOpOperand, 275 const LinalgDependenceGraph &dependenceGraph) { 276 LLVM_DEBUG(llvm::dbgs() << "findFusableProducer for: " 277 << consumerOpOperand.get() << " @" 278 << consumerOpOperand.getOperandNumber() << " in " 279 << *consumerOpOperand.getOwner() << "\n"); 280 LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner()); 281 if (!consumerOp) 282 return failure(); 283 284 // Only consider RAW and WAW atm. 285 for (auto depType : { 286 LinalgDependenceGraph::DependenceType::RAW, 287 LinalgDependenceGraph::DependenceType::WAW, 288 }) { 289 LLVM_DEBUG(llvm::dbgs() 290 << "Dependencies into: " << *consumerOp.getOperation() << "\n"); 291 for (auto dependence : llvm::make_filter_range( 292 dependenceGraph.getDependencesInto(consumerOp, depType), 293 [&](LinalgDependenceGraph::LinalgDependenceGraphElem elem) { 294 LLVM_DEBUG(llvm::dbgs() << "Inspect dependence btw: " 295 << elem.getIndexingValue() << " and " 296 << elem.getDependentValue() << "\n"); 297 Value v = elem.getIndexingValue(); 298 Optional<unsigned> operandNum = 299 elem.getIndexingOpViewOperandNum(); 300 return isa<LinalgOp>(elem.getDependentOp()) && 301 v == consumerOpOperand.get() && operandNum && 302 *operandNum == consumerOpOperand.getOperandNumber(); 303 })) { 304 // Consumer consumes this view, `isStructurallyFusableProducer` also 305 // checks whether it is a strict subview of the producer view. 306 auto producer = cast<LinalgOp>(dependence.getDependentOp()); 307 LLVM_DEBUG(llvm::dbgs() 308 << "\n" 309 << LinalgDependenceGraph::getDependenceTypeStr(depType) 310 << "producer: " << *dependence.getDependentOp() 311 << " view: " << dependence.getDependentValue() << "\n"); 312 313 // If the producer and consumer have tensor semantics, the only dependence 314 // between them is through a RAW dependence and they are fusable by 315 // construction. For buffer semantics need additional checks. 316 if (producer.hasBufferSemantics() && consumerOp.hasBufferSemantics() && 317 isFusableInto(dependenceGraph, consumerOp, consumerOpOperand.get(), 318 producer)) 319 return dependence; 320 if (producer.hasTensorSemantics() && consumerOp.hasTensorSemantics()) { 321 assert(dependence.dependenceType == 322 LinalgDependenceGraph::DependenceType::RAW); 323 return dependence; 324 } 325 } 326 } 327 return failure(); 328 } 329 330 FailureOr<FusionInfo> 331 mlir::linalg::fuseProducerOfBuffer(OpBuilder &b, OpOperand &consumerOpOperand, 332 const LinalgDependenceGraph &graph) { 333 Optional<LinalgDependenceGraph::LinalgDependenceGraphElem> fusableDependence = 334 findFusableProducer(consumerOpOperand, graph); 335 if (!fusableDependence) 336 return failure(); 337 338 LinalgOp producerOp = dyn_cast<LinalgOp>(fusableDependence->getDependentOp()); 339 if (!producerOp) 340 return failure(); 341 342 // If producer is already in the same block as consumer, we are done. 343 if (consumerOpOperand.get().getParentBlock() == 344 fusableDependence->getDependentValue().getParentBlock()) 345 return failure(); 346 347 Optional<AffineMap> producerMap = 348 fusableDependence->getDependentOpViewIndexingMap(); 349 if (!producerMap) 350 return failure(); 351 352 // Must be a subview or an extract_slice to guarantee there are loops we can 353 // fuse into. 354 auto subView = consumerOpOperand.get().getDefiningOp<memref::SubViewOp>(); 355 if (!subView) { 356 LLVM_DEBUG(llvm::dbgs() << "\nNot fusable (not a subview)"); 357 return failure(); 358 } 359 360 // Fuse `producer` just before `consumer`. 361 OpBuilder::InsertionGuard g(b); 362 b.setInsertionPoint(consumerOpOperand.getOwner()); 363 LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: " 364 << *consumerOpOperand.getOwner() << "\n"); 365 366 auto fusedProducer = fuse(b, producerOp, *producerMap, consumerOpOperand); 367 return FusionInfo{producerOp, fusedProducer}; 368 } 369 370 /// Walk back use-def chain through scf::For yields. 371 /// Sets `producer` and `outputIndex` if it finds a producer LinalgOp 372 373 // TODO(ravishankarm, ntv): This can be moved into the dependence graphs 374 // dependence tracking since the dependence tracking is similar to what is done 375 // w.r.t to buffers. 376 static void getProducerOfTensor(Value tensor, OpResult &opResult) { 377 if (!tensor.getType().isa<RankedTensorType>()) 378 return; 379 380 while (true) { 381 LLVM_DEBUG(llvm::dbgs() << "\ngetProducerOfTensor: " << tensor); 382 if (auto linalgOp = tensor.getDefiningOp<LinalgOp>()) { 383 opResult = tensor.cast<OpResult>(); 384 return; 385 } 386 if (auto sliceOp = tensor.getDefiningOp<tensor::ExtractSliceOp>()) { 387 tensor = sliceOp.getSource(); 388 continue; 389 } 390 if (auto blockArg = tensor.dyn_cast<BlockArgument>()) { 391 if (auto forOp = blockArg.getDefiningOp<scf::ForOp>()) { 392 tensor = *(forOp.getIterOperands().begin() + blockArg.getArgNumber()); 393 continue; 394 } 395 } 396 return; 397 } 398 } 399 400 FailureOr<FusionInfo> 401 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpOperand &consumerOpOperand) { 402 Value inputTensor = consumerOpOperand.get(); 403 OpResult producerOpResult; 404 getProducerOfTensor(inputTensor, producerOpResult); 405 if (!producerOpResult) { 406 LLVM_DEBUG(llvm::dbgs() << "\nUnable to find producer"); 407 return failure(); 408 } 409 return fuseProducerOfTensor(b, producerOpResult, consumerOpOperand); 410 } 411 412 FailureOr<FusionInfo> 413 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult, 414 OpOperand &consumerOpOperand) { 415 auto producerOp = dyn_cast<LinalgOp>(producerOpResult.getOwner()); 416 if (!producerOp) 417 return failure(); 418 419 LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner()); 420 if (!consumerOp) 421 return failure(); 422 423 Value inputTensor = consumerOpOperand.get(); 424 425 // Must be an extract_slice op to guarantee there are loops we can fuse into. 426 auto sliceOp = inputTensor.getDefiningOp<tensor::ExtractSliceOp>(); 427 if (!sliceOp) { 428 LLVM_DEBUG(llvm::dbgs() 429 << "\nNot fusable, not an extract_slice op: " << inputTensor); 430 return failure(); 431 } 432 433 // If producer is already in the same block as consumer, we are done. 434 if (consumerOpOperand.get().getParentBlock() == 435 producerOpResult.getParentBlock()) 436 return failure(); 437 438 // Insert fused `producer` just before `consumer`. 439 OpBuilder::InsertionGuard g(b); 440 b.setInsertionPoint(consumerOp); 441 LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: " << *consumerOp << "\n"); 442 OpOperand *opOperand = 443 producerOp.getOutputOperand(producerOpResult.getResultNumber()); 444 LinalgOp fusedProducer = 445 fuse(b, producerOp, producerOp.getTiedIndexingMap(opOperand), 446 consumerOpOperand); 447 448 // Replace use. 449 // Canonicalizations are not guaranteed to have happened before constructing 450 // `fusedProducer`. In the tensor case this can result in temporary type 451 // mismatches. Insert a `tensor.cast` op to propagate the transformation 452 // invariant that types are compatible. 453 Value def = fusedProducer->getResult(producerOpResult.getResultNumber()); 454 Type consumerType = consumerOpOperand.get().getType(); 455 if (consumerType != def.getType()) 456 def = b.create<tensor::CastOp>(fusedProducer.getLoc(), consumerType, def); 457 consumerOpOperand.set(def); 458 return FusionInfo{cast<LinalgOp>(producerOpResult.getOwner()), fusedProducer}; 459 } 460