1 //===- Fusion.cpp - Implementation of linalg Fusion -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Fusion pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PassDetail.h" 14 #include "mlir/Dialect/Affine/IR/AffineOps.h" 15 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 16 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h" 17 #include "mlir/Dialect/Linalg/IR/Linalg.h" 18 #include "mlir/Dialect/Linalg/Passes.h" 19 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 20 #include "mlir/Dialect/Linalg/Utils/Utils.h" 21 #include "mlir/Dialect/MemRef/IR/MemRef.h" 22 #include "mlir/Dialect/Tensor/IR/Tensor.h" 23 #include "mlir/IR/AffineExpr.h" 24 #include "mlir/IR/AffineMap.h" 25 #include "mlir/IR/Dominance.h" 26 #include "mlir/Support/LLVM.h" 27 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 28 #include "mlir/Transforms/RegionUtils.h" 29 #include "llvm/ADT/MapVector.h" 30 #include "llvm/ADT/ScopeExit.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/Debug.h" 33 34 #include <set> 35 36 #define DEBUG_TYPE "linalg-fusion" 37 38 using namespace mlir; 39 using namespace mlir::linalg; 40 41 /// Implements a simple high-level fusion pass on linalg structured operations. 42 /// 43 /// In each block, linalg ops are processed in reverse textual order. 44 /// Given a linalg op `O`, fusion occurs by: 45 /// 1. inspecting the linalg ops that write into the views read by `O`. There 46 /// are 2 cases: 47 /// a) buffer case: use the SSA value of the views and a simple alias 48 /// analysis on subview ops to determine producer-consumer dependences; 49 /// b) tensor case: use SSA use-def chains on extract_slice ops; 50 /// 2. greedily fuse the linalg ops that produce the subview/extract_slice. 51 /// 3. inspect the fused ops and determine whether they have other remaining 52 /// LinalgOp uses. If not, then erase the original producing linalg op. 53 /// 54 /// More advanced use cases, analyses as well as profitability heuristics are 55 /// left for future work. 56 57 struct ShapeDimension { 58 Value shape; 59 unsigned dimension; 60 }; 61 62 // Given an `op`, returns the first (`shape`, `dimension`) pair that identifies 63 // the loop range at `loopDepth`. The semantics of the loopToOperandRangesMaps 64 // guarantees at least one such dimension is found. If multiple candidates exist 65 // they must agree by construction (i.e. have the same size) and we just return 66 // the first one. 67 static ShapeDimension 68 getShapeDefiningLoopRange(LinalgOp op, unsigned loopDepth, 69 bool fromSubViewOpOnly = false) { 70 // Iterate over the inputs and outputs in order. 71 // Extract the subranges from the linearized ranges. 72 for (OpOperand *opOperand : op.getInputAndOutputOperands()) { 73 // The method `getRangeFromOperandShape` requires using SubViewOp or 74 // ExtractSliceOps. If the value isn't defined from there continue. 75 // todo: The method should be adapted to get the values from 76 // `ViewInterface`. The interface needs a `getOrCreateRanges` method which 77 // currently returns a `linalg.range`. The fix here is to move this op to 78 // `std` dialect and add the method to `ViewInterface`. 79 if (fromSubViewOpOnly && 80 !isa_and_nonnull<memref::SubViewOp, tensor::ExtractSliceOp>( 81 opOperand->get().getDefiningOp())) 82 continue; 83 84 AffineMap map = op.getTiedIndexingMap(opOperand); 85 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange I/O idx: " 86 << opOperand->getOperandNumber() << "\n"); 87 LLVM_DEBUG(llvm::dbgs() 88 << "getShapeDefiningLoopRange map: " << map << "\n"); 89 SmallVector<Value, 8> shapeRanges(map.getNumResults(), nullptr); 90 for (const auto &en : llvm::enumerate(map.getResults())) { 91 auto dimExpr = en.value().dyn_cast<AffineDimExpr>(); 92 if (!dimExpr) 93 continue; 94 if (loopDepth == en.value().cast<AffineDimExpr>().getPosition()) { 95 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange loopDepth: " 96 << loopDepth << "\n"); 97 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange shape: " 98 << opOperand->get() << "\n"); 99 return ShapeDimension{opOperand->get(), 100 static_cast<unsigned>(en.index())}; 101 } 102 } 103 } 104 llvm_unreachable("Expect to be able to extract a shape defining loop range"); 105 } 106 107 static SmallVector<Value> getTiledOperands(LinalgOp producer) { 108 return producer.getInputAndOutputOperands(); 109 } 110 111 /// Fuses the producer by cloning the `producer`. The `fusedLoopsAndRanges` 112 /// provides the loop range information for the fused loops. The rest are 113 /// obtained from the producer itself, since they are not tiled + fused. 114 static LinalgOp fuse(OpBuilder &b, LinalgOp producer, 115 const DenseMap<unsigned, Range> &fusedLoopsAndRanges) { 116 SmallVector<Value, 8> ivs, tileSizes, sizeBounds; 117 SmallVector<Range, 8> loopRanges; 118 Location loc = producer.getLoc(); 119 auto zero = b.create<arith::ConstantIndexOp>(loc, 0); 120 auto one = b.create<arith::ConstantIndexOp>(loc, 1); 121 122 for (unsigned i = 0, e = producer.getNumLoops(); i < e; ++i) { 123 auto shapeDim = getShapeDefiningLoopRange(producer, i); 124 Value dim = createOrFoldDimOp(b, loc, shapeDim.shape, shapeDim.dimension); 125 sizeBounds.push_back(dim); 126 auto it = fusedLoopsAndRanges.find(i); 127 if (it != fusedLoopsAndRanges.end()) { 128 ivs.push_back(it->second.offset); 129 tileSizes.push_back(it->second.size); 130 loopRanges.push_back(it->second); 131 LLVM_DEBUG(llvm::dbgs() << "tiled loop#" << i << " with LoopRange " 132 << loopRanges.back() << "\n"); 133 } else { 134 tileSizes.push_back(zero); 135 loopRanges.push_back(Range{zero, dim, one}); 136 LLVM_DEBUG(llvm::dbgs() << "full loop#" << i << " with LoopRange " 137 << loopRanges.back() << "\n"); 138 } 139 } 140 141 SmallVector<Value, 8> clonedShapes; 142 clonedShapes.reserve(producer.getNumInputsAndOutputs()); 143 144 // Compute subranges for all tensor input/output operands. 145 clonedShapes.append(makeTiledShapes( 146 b, loc, producer, getTiledOperands(producer), ivs, tileSizes, sizeBounds, 147 /**omitPartialTileCheck=*/false)); 148 149 // Iterate over the results in order. 150 // Extract the subtensor type from the linearized range. 151 // Since we do not enforce any canonicalizations on the fly, this is always 152 // fully dynamic at construction time. 153 SmallVector<Type, 4> resultTypes; 154 resultTypes.reserve(producer->getNumResults()); 155 for (RankedTensorType t : producer.getOutputTensorTypes()) { 156 unsigned rank = t.getRank(); 157 SmallVector<int64_t, 4> staticOffsetsVector( 158 rank, ShapedType::kDynamicStrideOrOffset); 159 SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamicSize); 160 SmallVector<int64_t, 4> staticStridesVector( 161 rank, ShapedType::kDynamicStrideOrOffset); 162 resultTypes.push_back(tensor::ExtractSliceOp::inferResultType( 163 t.cast<RankedTensorType>(), staticOffsetsVector, staticSizesVector, 164 staticStridesVector)); 165 } 166 167 Operation *clonedOp = producer.clone(b, loc, resultTypes, clonedShapes); 168 169 // Shift all IndexOp results by the tile offset. 170 SmallVector<Value> allIvs; 171 llvm::transform(loopRanges, std::back_inserter(allIvs), 172 [](Range range) { return range.offset; }); 173 addTileLoopIvsToIndexOpResults(b, clonedOp, allIvs); 174 175 return clonedOp; 176 } 177 178 /// Get the loop range for a dimension `dim` based on the `shapedOperand`. It is 179 /// expected to be defined by a subview op or an extract_slice op. 180 static Range getRangeFromOperandShape(OpBuilder &b, Location loc, 181 Value shapedOperand, unsigned dim) { 182 Operation *shapeProducingOp = shapedOperand.getDefiningOp(); 183 if (auto subViewOp = dyn_cast<memref::SubViewOp>(shapeProducingOp)) 184 return subViewOp.getOrCreateRanges(b, loc)[dim]; 185 if (auto sliceOp = dyn_cast<tensor::ExtractSliceOp>(shapeProducingOp)) 186 return sliceOp.getOrCreateRanges(b, loc)[dim]; 187 llvm_unreachable("SubviewOp or ExtractSliceOp expected"); 188 } 189 190 /// Fuses the producer into the loop immediately enclosing the consumer. 191 /// This is achieved by "recomputing" the producer at the time it 192 /// is needed just before the consumer. 193 static LinalgOp fuse(OpBuilder &b, LinalgOp producerOp, AffineMap producerMap, 194 OpOperand &consumerOpOperand) { 195 LLVM_DEBUG(llvm::dbgs() << "Producer map: " << producerMap << "\n"); 196 DenseMap<unsigned, Range> fusedLoopsAndRanges; 197 Value shapedOperand = consumerOpOperand.get(); 198 for (const auto &en : llvm::enumerate(producerMap.getResults())) { 199 unsigned posInProducerLoop = en.value().cast<AffineDimExpr>().getPosition(); 200 fusedLoopsAndRanges[posInProducerLoop] = getRangeFromOperandShape( 201 b, consumerOpOperand.getOwner()->getLoc(), shapedOperand, en.index()); 202 } 203 return fuse(b, producerOp, fusedLoopsAndRanges); 204 } 205 206 // Encode structural fusion safety preconditions. 207 // Some of these will be lifted in the future with better analysis. 208 static bool isStructurallyFusableProducer(LinalgOp producer, Value consumedView, 209 LinalgOp consumer) { 210 assert(producer.hasBufferSemantics() && 211 "expected linalg op with buffer semantics"); 212 assert(consumer.hasBufferSemantics() && 213 "expected linalg op with buffer semantics"); 214 if (producer.getNumOutputs() != 1) { 215 LLVM_DEBUG(llvm::dbgs() << "\nNot structurally fusable (multi-output)"); 216 return false; 217 } 218 // Only fuse when the producer block dominates. 219 DominanceInfo dom(producer.getOperation()); 220 if (!dom.dominates(producer->getBlock(), consumer->getBlock())) { 221 LLVM_DEBUG( 222 llvm::dbgs() 223 << "\nNot structurally fusable (producer block does not dominate)"); 224 return false; 225 } 226 return true; 227 } 228 229 bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph, 230 LinalgOp consumer, 231 Value consumedView, 232 LinalgOp producer) { 233 assert(producer.hasBufferSemantics() && 234 "expected linalg op with buffer semantics"); 235 assert(consumer.hasBufferSemantics() && 236 "expected linalg op with buffer semantics"); 237 // Make some simple structural checks that alleviate the need for more 238 // complex analyses. 239 if (!isStructurallyFusableProducer(producer, consumedView, consumer)) { 240 LLVM_DEBUG(llvm::dbgs() << "\n***Not static last write due to structure:\t" 241 << *producer.getOperation()); 242 return false; 243 } 244 // Check for any interleaved write to consumedView. 245 if (!graph.findCoveringWrites(producer, consumer, consumedView).empty()) { 246 LLVM_DEBUG(llvm::dbgs() << "\n***Not fusable due to interleaved write:\t" 247 << *producer.getOperation()); 248 return false; 249 } 250 return true; 251 } 252 253 bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph, 254 LinalgOp consumer, Value consumedView, 255 LinalgOp producer) { 256 assert(producer.hasBufferSemantics() && 257 "expected linalg op with buffer semantics"); 258 assert(consumer.hasBufferSemantics() && 259 "expected linalg op with buffer semantics"); 260 if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer)) 261 return false; 262 // Check for any fusion-preventing dependence to any shape read/written that 263 // would violate dependences. 264 if (!graph.findCoveringDependences(producer, consumer).empty()) { 265 LLVM_DEBUG(llvm::dbgs() 266 << "\n***Not fusable due to an interleaved dependence:\t" 267 << *producer.getOperation()); 268 return false; 269 } 270 return true; 271 } 272 273 /// For `consumer` with buffer semantics, find the Linalg operation on buffers 274 /// that is the last writer of `consumerOpOperand`. For now the fusable 275 /// dependence is returned as an instance of the `dependenceGraph`. 276 static FailureOr<LinalgDependenceGraph::LinalgDependenceGraphElem> 277 findFusableProducer(OpOperand &consumerOpOperand, 278 const LinalgDependenceGraph &dependenceGraph) { 279 LLVM_DEBUG(llvm::dbgs() << "findFusableProducer for: " 280 << consumerOpOperand.get() << " @" 281 << consumerOpOperand.getOperandNumber() << " in " 282 << *consumerOpOperand.getOwner() << "\n"); 283 LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner()); 284 if (!consumerOp) 285 return failure(); 286 287 // Only consider RAW and WAW atm. 288 for (auto depType : { 289 LinalgDependenceGraph::DependenceType::RAW, 290 LinalgDependenceGraph::DependenceType::WAW, 291 }) { 292 LLVM_DEBUG(llvm::dbgs() 293 << "Dependencies into: " << *consumerOp.getOperation() << "\n"); 294 for (auto dependence : llvm::make_filter_range( 295 dependenceGraph.getDependencesInto(consumerOp, depType), 296 [&](LinalgDependenceGraph::LinalgDependenceGraphElem elem) { 297 LLVM_DEBUG(llvm::dbgs() << "Inspect dependence btw: " 298 << elem.getIndexingValue() << " and " 299 << elem.getDependentValue() << "\n"); 300 Value v = elem.getIndexingValue(); 301 Optional<unsigned> operandNum = 302 elem.getIndexingOpViewOperandNum(); 303 return isa<LinalgOp>(elem.getDependentOp()) && 304 v == consumerOpOperand.get() && operandNum && 305 *operandNum == consumerOpOperand.getOperandNumber(); 306 })) { 307 // Consumer consumes this view, `isStructurallyFusableProducer` also 308 // checks whether it is a strict subview of the producer view. 309 auto producer = cast<LinalgOp>(dependence.getDependentOp()); 310 LLVM_DEBUG(llvm::dbgs() 311 << "\n" 312 << LinalgDependenceGraph::getDependenceTypeStr(depType) 313 << "producer: " << *dependence.getDependentOp() 314 << " view: " << dependence.getDependentValue() << "\n"); 315 316 // If the producer and consumer have tensor semantics, the only dependence 317 // between them is through a RAW dependence and they are fusable by 318 // construction. For buffer semantics need additional checks. 319 if (producer.hasBufferSemantics() && consumerOp.hasBufferSemantics() && 320 isFusableInto(dependenceGraph, consumerOp, consumerOpOperand.get(), 321 producer)) 322 return dependence; 323 if (producer.hasTensorSemantics() && consumerOp.hasTensorSemantics()) { 324 assert(dependence.dependenceType == 325 LinalgDependenceGraph::DependenceType::RAW); 326 return dependence; 327 } 328 } 329 } 330 return failure(); 331 } 332 333 FailureOr<FusionInfo> 334 mlir::linalg::fuseProducerOfBuffer(OpBuilder &b, OpOperand &consumerOpOperand, 335 const LinalgDependenceGraph &graph) { 336 Optional<LinalgDependenceGraph::LinalgDependenceGraphElem> fusableDependence = 337 findFusableProducer(consumerOpOperand, graph); 338 if (!fusableDependence) 339 return failure(); 340 341 LinalgOp producerOp = dyn_cast<LinalgOp>(fusableDependence->getDependentOp()); 342 if (!producerOp) 343 return failure(); 344 345 // If producer is already in the same block as consumer, we are done. 346 if (consumerOpOperand.get().getParentBlock() == 347 fusableDependence->getDependentValue().getParentBlock()) 348 return failure(); 349 350 Optional<AffineMap> producerMap = 351 fusableDependence->getDependentOpViewIndexingMap(); 352 if (!producerMap) 353 return failure(); 354 355 // Must be a subview or an extract_slice to guarantee there are loops we can 356 // fuse into. 357 auto subView = consumerOpOperand.get().getDefiningOp<memref::SubViewOp>(); 358 if (!subView) { 359 LLVM_DEBUG(llvm::dbgs() << "\nNot fusable (not a subview)"); 360 return failure(); 361 } 362 363 // Fuse `producer` just before `consumer`. 364 OpBuilder::InsertionGuard g(b); 365 b.setInsertionPoint(consumerOpOperand.getOwner()); 366 LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: " 367 << *consumerOpOperand.getOwner() << "\n"); 368 369 auto fusedProducer = fuse(b, producerOp, *producerMap, consumerOpOperand); 370 return FusionInfo{producerOp, fusedProducer}; 371 } 372 373 /// Walk back use-def chain through scf::For yields. 374 /// Sets `producer` and `outputIndex` if it finds a producer LinalgOp 375 376 // TODO(ravishankarm, ntv): This can be moved into the dependence graphs 377 // dependence tracking since the dependence tracking is similar to what is done 378 // w.r.t to buffers. 379 static void getProducerOfTensor(Value tensor, OpResult &opResult) { 380 if (!tensor.getType().isa<RankedTensorType>()) 381 return; 382 383 while (true) { 384 LLVM_DEBUG(llvm::dbgs() << "\ngetProducerOfTensor: " << tensor); 385 if (auto linalgOp = tensor.getDefiningOp<LinalgOp>()) { 386 opResult = tensor.cast<OpResult>(); 387 return; 388 } 389 if (auto sliceOp = tensor.getDefiningOp<tensor::ExtractSliceOp>()) { 390 tensor = sliceOp.getSource(); 391 continue; 392 } 393 if (auto blockArg = tensor.dyn_cast<BlockArgument>()) { 394 if (auto forOp = blockArg.getDefiningOp<scf::ForOp>()) { 395 tensor = *(forOp.getIterOperands().begin() + blockArg.getArgNumber()); 396 continue; 397 } 398 } 399 return; 400 } 401 } 402 403 FailureOr<FusionInfo> 404 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpOperand &consumerOpOperand) { 405 Value inputTensor = consumerOpOperand.get(); 406 OpResult producerOpResult; 407 getProducerOfTensor(inputTensor, producerOpResult); 408 if (!producerOpResult) { 409 LLVM_DEBUG(llvm::dbgs() << "\nUnable to find producer"); 410 return failure(); 411 } 412 return fuseProducerOfTensor(b, producerOpResult, consumerOpOperand); 413 } 414 415 FailureOr<FusionInfo> 416 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult, 417 OpOperand &consumerOpOperand) { 418 auto producerOp = dyn_cast<LinalgOp>(producerOpResult.getOwner()); 419 if (!producerOp) 420 return failure(); 421 422 LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner()); 423 if (!consumerOp) 424 return failure(); 425 426 Value inputTensor = consumerOpOperand.get(); 427 428 // Must be an extract_slice op to guarantee there are loops we can fuse into. 429 auto sliceOp = inputTensor.getDefiningOp<tensor::ExtractSliceOp>(); 430 if (!sliceOp) { 431 LLVM_DEBUG(llvm::dbgs() 432 << "\nNot fusable, not an extract_slice op: " << inputTensor); 433 return failure(); 434 } 435 436 // If producer is already in the same block as consumer, we are done. 437 if (consumerOpOperand.get().getParentBlock() == 438 producerOpResult.getParentBlock()) 439 return failure(); 440 441 // Insert fused `producer` just before `consumer`. 442 OpBuilder::InsertionGuard g(b); 443 b.setInsertionPoint(consumerOp); 444 LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: " << *consumerOp << "\n"); 445 OpOperand *opOperand = 446 producerOp.getOutputOperand(producerOpResult.getResultNumber()); 447 LinalgOp fusedProducer = 448 fuse(b, producerOp, producerOp.getTiedIndexingMap(opOperand), 449 consumerOpOperand); 450 451 // Replace use. 452 // Canonicalizations are not guaranteed to have happened before constructing 453 // `fusedProducer`. In the tensor case this can result in temporary type 454 // mismatches. Insert a `tensor.cast` op to propagate the transformation 455 // invariant that types are compatible. 456 Value def = fusedProducer->getResult(producerOpResult.getResultNumber()); 457 Type consumerType = consumerOpOperand.get().getType(); 458 if (consumerType != def.getType()) 459 def = b.create<tensor::CastOp>(fusedProducer.getLoc(), consumerType, def); 460 consumerOpOperand.set(def); 461 return FusionInfo{cast<LinalgOp>(producerOpResult.getOwner()), fusedProducer}; 462 } 463 464 /// Prune all dimensions that are of reduction iterator type from `map`. 465 static AffineMap pruneReductionDimsFromMap(ArrayRef<Attribute> iteratorTypes, 466 AffineMap map) { 467 llvm::SmallBitVector projectedDims(iteratorTypes.size()); 468 for (const auto &attr : llvm::enumerate(iteratorTypes)) { 469 if (!isParallelIterator(attr.value())) 470 projectedDims.set(attr.index()); 471 } 472 return getProjectedMap(map, projectedDims); 473 } 474 475 /// Returns the mapping from iterations in the consumer that write to the same 476 /// location as the iterations in the producer. To do so use 477 /// - indexing map of the fused view in the consumer : consumerIndexMap 478 /// - indexing map of the fused view in the producer : producerIndexMap 479 /// consumerLoopToProducerLoop = 480 /// inverse(producerIndexMap).compose(consumerIndexMap) 481 static FailureOr<AffineMap> getConsumerLoopToProducerLoopMap( 482 LinalgDependenceGraph::LinalgDependenceGraphElem dependence) { 483 auto producer = dyn_cast<LinalgOp>(dependence.getDependentOp()); 484 if (!producer) 485 return failure(); 486 487 Optional<AffineMap> producerIndexingMap = 488 dependence.getDependentOpViewIndexingMap(); 489 Optional<AffineMap> consumerIndexingMap = 490 dependence.getIndexingOpViewIndexingMap(); 491 if (!producerIndexingMap || !consumerIndexingMap) 492 return failure(); 493 494 AffineMap prunedProducerIndexingMap = pruneReductionDimsFromMap( 495 producer.iterator_types().getValue(), *producerIndexingMap); 496 if (!prunedProducerIndexingMap.isPermutation()) 497 return failure(); 498 499 if (consumerIndexingMap->getNumResults() != 500 prunedProducerIndexingMap.getNumResults()) 501 return failure(); 502 503 LLVM_DEBUG({ 504 llvm::dbgs() << "\t producerMap : "; 505 producerIndexingMap->print(llvm::dbgs()); 506 llvm::dbgs() << " pruned : "; 507 prunedProducerIndexingMap.print(llvm::dbgs()); 508 llvm::dbgs() << "\n"; 509 llvm::dbgs() << "\t consumerMap : "; 510 consumerIndexingMap->print(llvm::dbgs()); 511 llvm::dbgs() << "\n"; 512 }); 513 514 AffineMap invProducerIndexMap = inversePermutation(prunedProducerIndexingMap); 515 if (!invProducerIndexMap) 516 return failure(); 517 518 return invProducerIndexMap.compose(*consumerIndexingMap); 519 } 520 521 /// Given a projected permutation `map`, returns true if the map changes the 522 /// order in which the fused loop dimension appear. 523 static bool doesTransposeAccess(AffineMap map, 524 const std::set<unsigned> &fusableLoops) { 525 Optional<unsigned> lastFusableLoop; 526 for (unsigned pos : llvm::map_range(map.getResults(), [](AffineExpr expr) { 527 return expr.cast<AffineDimExpr>().getPosition(); 528 })) { 529 if (!fusableLoops.count(pos)) 530 continue; 531 if (!lastFusableLoop) { 532 lastFusableLoop = pos; 533 continue; 534 } 535 if (pos <= *lastFusableLoop) 536 return true; 537 lastFusableLoop = pos; 538 } 539 return false; 540 } 541 542 /// Returns the positions of the loop in `op` that can be tiled based on the 543 /// operations that are to be fused with it. For example, in a 544 /// 545 /// linalg.matmul ins(%a, %b : ...) outs(%c : ...) 546 /// 547 /// if the producer of %a needs to be fused with this op, only the `i` loop of 548 /// the matmul can be tiled while fusing. If producer of %a, and %b are to be 549 /// fused, then no loops can be tiled while fusing. The conditions used are: 550 /// 1. Only parallel loops can be used for tile + fuse. Find the number of 551 /// common outer parallel loops between the op and its producers being fused. 552 /// 2. Of the parallel loops only some can be fused. Only those loops can be 553 /// fused such where the fusable loops iteration space only touches one tile 554 /// of the fused operation. This is because the producer (which is writing 555 /// the fused subview) has update semantics. 556 /// 557 /// Since an inverse computation is needed, we need to consider the projection 558 /// of the producerIndexMap w.r.t the parallel loops. The actual fusable loops 559 /// are the dimensions of the consumerLoopToProducerLoop map that correspond to 560 /// parallel loops and appear in the result of the map 561 /// 562 /// Example 1: 563 /// linalg.fill(%cst, %c) 564 /// linalg.matmul ins(%a, %b) outs(%c) 565 /// Number of parallel loops : 2 566 /// producerIndexMap = affine_map<(i, j) ->(i , j)> 567 /// consumerIndexMap = affine_map<(i, j, k) -> (i, j)> 568 /// consumerLoopToProducerLoop = affine_map<(i, j, k) -> (i, j)> 569 /// Fused dimensions : i, j 570 /// 571 /// Example 2: 572 /// linalg.matmul ins(%a, %b) outs(%c) 573 /// linalg.generic {indexing_maps = [affine_map<(i, j) -> (j, i)>, ... 574 /// iterator_types = ["parallel", "parallel"]} 575 /// ins(%c) ... 576 /// 577 /// Number of parallel loops = 2: 578 /// producerIndexMap (projected to parallel loops) = 579 /// affine_map<(i, j) -> (i, j)> 580 /// consumerLoopToProducerLoop2 = affine_map<(i, j) -> (j, i)> 581 /// Fused dimensions : i, j 582 /// 583 /// Example 3: 584 /// memref.copy(%s, %b) 585 /// linalg.matmul ins(%a, %b) outs(%c) 586 /// 587 /// Number of parallel loops = 2 588 /// produceIndexMap : affine_map<(i, j) -> (i, j)> 589 /// consumerLoopToProduceLoops = affine_map<(i, j, k) -> (k, j)> 590 /// submap with only parallel loops = affine_map<(i, j) -> (j)> 591 /// Fused dimensions : j 592 static std::set<unsigned> 593 collectFusableLoops(ArrayRef<LinalgOp> ops, 594 const FusableOpDependencesTy &fusableDependences) { 595 assert(!ops.empty()); 596 auto getNumOuterParallelLoops = [](LinalgOp linalgOp) { 597 return linalgOp.iterator_types() 598 .getValue() 599 .take_while([](Attribute attr) -> bool { 600 return attr.cast<StringAttr>().getValue() == 601 getParallelIteratorTypeName(); 602 }) 603 .size(); 604 }; 605 606 size_t numOuterParallelLoops = getNumOuterParallelLoops(ops.back()); 607 for (auto op : ops.drop_back()) { 608 numOuterParallelLoops = 609 std::min(numOuterParallelLoops, getNumOuterParallelLoops(op)); 610 } 611 612 std::set<unsigned> fusableLoops; 613 auto range = llvm::seq<unsigned>(0, numOuterParallelLoops); 614 fusableLoops.insert(range.begin(), range.end()); 615 616 for (auto op : reverse(ops)) { 617 for (auto dependence : fusableDependences.lookup(op)) { 618 LLVM_DEBUG({ 619 llvm::dbgs() << "\t fusable :"; 620 for (unsigned i : fusableLoops) 621 llvm::dbgs() << " " << i; 622 llvm::dbgs() << "\n"; 623 }); 624 625 Optional<AffineMap> consumerLoopToProducerLoop = 626 getConsumerLoopToProducerLoopMap(dependence); 627 if (!consumerLoopToProducerLoop) { 628 op.emitRemark("failed to get map from consumer loop to producer loop"); 629 return {}; 630 } 631 // todo: This condition is only an implementation limitation. When fusing 632 // the operation, if the accesses in the producer/consumer are transposes 633 // of each other, the loop bounds for the tiled producer can be 634 // manipulated accordingly. This requires some additional bookkeeping in 635 // the implementation of tile+fuse that is deferred to later. 636 if (doesTransposeAccess(*consumerLoopToProducerLoop, fusableLoops)) { 637 op.emitRemark("unhandled fusion when fusion requires permutation"); 638 return {}; 639 } 640 641 std::set<unsigned> candidates; 642 for (AffineExpr expr : consumerLoopToProducerLoop->getResults()) { 643 unsigned position = expr.cast<AffineDimExpr>().getPosition(); 644 if (fusableLoops.count(position)) 645 candidates.insert(position); 646 } 647 LLVM_DEBUG({ 648 llvm::dbgs() << "\t candidates :"; 649 for (unsigned i : candidates) 650 llvm::dbgs() << " " << i; 651 llvm::dbgs() << "\n"; 652 }); 653 if (candidates.empty()) 654 return {}; 655 std::swap(candidates, fusableLoops); 656 } 657 } 658 659 return fusableLoops; 660 } 661 662 /// Find all dependences that are fusable. 663 FusableOpDependencesTy mlir::linalg::findAllFusableDependences( 664 ArrayRef<LinalgOp> ops, const LinalgDependenceGraph &dependenceGraph) { 665 FusableOpDependencesTy fusableDependences; 666 DenseMap<Operation *, SmallVector<AffineMap, 1>> fusedProducerIndexingMap; 667 for (LinalgOp op : reverse(ops)) { 668 for (OpOperand *opOperand : op.getInputAndOutputOperands()) { 669 Optional<LinalgDependenceGraph::LinalgDependenceGraphElem> 670 fusableDependence = findFusableProducer(*opOperand, dependenceGraph); 671 if (!fusableDependence) 672 continue; 673 LinalgOp producerOp = 674 dyn_cast<LinalgOp>(fusableDependence->getDependentOp()); 675 if (!producerOp) 676 continue; 677 // Do not fuse dependences that are to operations not in the same basic 678 // block. This avoid moving fused operations across loops that might 679 // themselves carry dependency making the fusion illegal. 680 if (producerOp->getBlock() != op->getBlock()) 681 continue; 682 683 // Make sure that the indexing map of the view used for fusion in the 684 // producer is a projected permutation. 685 Optional<AffineMap> producerMap = 686 fusableDependence->getDependentOpViewIndexingMap(); 687 Optional<AffineMap> consumerMap = 688 fusableDependence->getIndexingOpViewIndexingMap(); 689 assert( 690 consumerMap && 691 "unable to find indexing map of operand/result of indexing OpView"); 692 fusedProducerIndexingMap[producerOp.getOperation()].push_back( 693 *consumerMap); 694 if (!producerMap || !producerMap->isProjectedPermutation() || 695 !consumerMap->isProjectedPermutation()) 696 continue; 697 698 fusableDependences[producerOp.getOperation()].push_back( 699 *fusableDependence); 700 } 701 } 702 // TODO: Currently fusion would not be legal if the fusable dependence is to 703 // the same producer but different indexing map in the consumer. Fix this, but 704 // in the meanwhile disallow such a fusion. 705 for (auto useIndexingMapsList : fusedProducerIndexingMap) { 706 AffineMap map1 = useIndexingMapsList.second.front(); 707 for (AffineMap map2 : 708 ArrayRef<AffineMap>(useIndexingMapsList.second).drop_front()) { 709 if (map1 != map2) { 710 fusableDependences.erase(useIndexingMapsList.first); 711 break; 712 } 713 } 714 } 715 return fusableDependences; 716 } 717 718 /// Tile the fused loops in the root operation, by setting the tile sizes for 719 /// all other loops to zero (those will be tiled later). 720 static FailureOr<TiledLinalgOp> 721 tileRootOperation(OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizeVector, 722 const LinalgTilingOptions &options, 723 const std::set<unsigned> &fusedLoops) { 724 SmallVector<Value, 4> tileSizes(tileSizeVector.begin(), tileSizeVector.end()); 725 auto zero = b.create<arith::ConstantIndexOp>(op.getLoc(), 0); 726 for (unsigned i = 0, e = tileSizes.size(); i != e; ++i) 727 if (!fusedLoops.count(i)) 728 tileSizes[i] = zero; 729 LinalgTilingOptions tileFusedLoopsOptions = options; 730 tileFusedLoopsOptions.setTileSizes(tileSizes); 731 // TODO: Propagate RewriterBase everywhere. 732 IRRewriter rewriter(b); 733 return tileLinalgOp(rewriter, op, tileFusedLoopsOptions); 734 } 735 736 /// Fuse the operations in `fusionCandidates` with `tiledOp`. Latter is expected 737 /// to be a tiled operation such that it is valid to fuse all operations in 738 /// `fusionCandidates`, i.e. move the operation within the inter-tile loops of 739 /// `tiledOp`. 740 static SmallVector<LinalgOp, 1> 741 fuseOperations(OpBuilder &b, LinalgOp rootOp, TiledLinalgOp tiledLinalgOp, 742 ArrayRef<LinalgOp> fusionCandidates, 743 const FusableOpDependencesTy &fusableDependences, 744 const std::set<unsigned> &fusedLoops) { 745 LinalgOp tiledOp = tiledLinalgOp.op; 746 OpBuilder::InsertionGuard guard(b); 747 b.setInsertionPoint(tiledOp); 748 749 DenseMap<unsigned, Range> fusedLoopsAndRanges; 750 for (unsigned loop : fusedLoops) { 751 ShapeDimension shapeDim = getShapeDefiningLoopRange(tiledOp, loop, true); 752 fusedLoopsAndRanges[loop] = getRangeFromOperandShape( 753 b, tiledOp.getLoc(), shapeDim.shape, shapeDim.dimension); 754 } 755 756 SmallVector<LinalgOp, 1> fusedOps(fusionCandidates.size()); 757 DenseMap<Operation *, LinalgOp> origOpToFusedOp; 758 origOpToFusedOp[rootOp.getOperation()] = tiledOp; 759 for (const auto &candidate : enumerate(llvm::reverse(fusionCandidates))) { 760 LinalgOp origOp = candidate.value(); 761 LinalgOp fusedOp = fuse(b, origOp, fusedLoopsAndRanges); 762 origOpToFusedOp[origOp.getOperation()] = fusedOp; 763 fusedOps[fusionCandidates.size() - candidate.index() - 1] = fusedOp; 764 765 // Prepare the builder for the next insertion point. 766 auto guard = llvm::make_scope_exit([&]() { b.setInsertionPoint(fusedOp); }); 767 if (!origOp.hasTensorSemantics()) 768 continue; 769 770 // If the producer consumer operations are linalg operations on tensors, the 771 // dependence is due to value produced (as a return tensor) by the producer 772 // and used in the consumer. The returned value of the fused op needs to be 773 // made the operand of the tiled/fused consumer operation. By construction 774 // the value returned by the producer is the value used by the consumer. 775 for (auto &dependence : fusableDependences.lookup(origOp.getOperation())) { 776 if (dependence.dependenceType != 777 LinalgDependenceGraph::DependenceType::RAW) 778 continue; 779 780 unsigned resultIndex = 781 dependence.getDependentOpViewResultNum().getValue(); 782 LinalgOp consumer = origOpToFusedOp.lookup(dependence.getIndexingOp()); 783 if (!consumer) 784 continue; 785 786 Value replacementValue = fusedOp.getOperation()->getResult(resultIndex); 787 consumer.getOperation()->setOperand( 788 dependence.getIndexingOpViewOperandNum().getValue(), 789 replacementValue); 790 } 791 792 // At this point, all Linalg uses of the tensors produced by `origOp` have 793 // been replaced. However, there may still be "output tensor"-like uses 794 // coming from WAW dependencies. 795 // All these uses are iter_args of the outermost loop (TODO: add a check). 796 // Such iter_args uses serve 2 purposes: 797 // 1. give a shape to the output 798 // 2. encode destructive updates that may be inplaceable by bufferization. 799 // To keep the second type of information while letting the unfused op die 800 // unused, we need to forward the producer output operand. 801 if (auto forOp = dyn_cast<scf::ForOp>(tiledLinalgOp.loops.front())) { 802 for (auto &operand : forOp.getIterOpOperands()) { 803 if (auto opResult = operand.get().dyn_cast<OpResult>()) { 804 if (opResult.getOwner() == origOp) { 805 Value output = 806 origOp.getOutputOperand(opResult.getResultNumber())->get(); 807 assert(output.getType().isa<RankedTensorType>()); 808 operand.set(output); 809 } 810 } 811 } 812 } 813 } 814 return fusedOps; 815 } 816 817 static FailureOr<TiledAndFusedLinalgOps> 818 tileAndFuseLinalgOpsImpl(OpBuilder &b, ArrayRef<LinalgOp> ops, 819 const LinalgDependenceGraph &dependenceGraph, 820 const LinalgTilingOptions &tilingOptions) { 821 if (ops.size() < 2) 822 return failure(); 823 LinalgOp rootOp = ops.back(); 824 if (!llvm::all_of( 825 ops, 826 [](LinalgOp linalgOp) { return linalgOp.hasBufferSemantics(); }) && 827 !llvm::all_of(ops, [](LinalgOp linalgOp) { 828 return linalgOp.hasTensorSemantics(); 829 })) { 830 rootOp.emitError( 831 "unable to fuse operations that have tensor semantics with operations " 832 "that have buffer semantics and viceversa."); 833 return failure(); 834 } 835 // TODO: Support interchange with tile + fuse. This might actually help do 836 // better fusion. 837 if (!tilingOptions.interchangeVector.empty()) { 838 rootOp.emitRemark("unable to handle tile and fuse with interchange"); 839 return failure(); 840 } 841 842 OpBuilder::InsertionGuard guard(b); 843 b.setInsertionPoint(rootOp); 844 845 // Find all the producers. 846 LLVM_DEBUG(llvm::dbgs() << "findAllFusableDependences\n"); 847 FusableOpDependencesTy fusableDependences = 848 findAllFusableDependences(ops, dependenceGraph); 849 if (fusableDependences.empty()) { 850 LLVM_DEBUG(llvm::dbgs() << "no fusable dependencies found\n"); 851 return failure(); 852 } 853 854 TiledAndFusedLinalgOps ret; 855 // Find the loops that can be tiled and fused. 856 LLVM_DEBUG(llvm::dbgs() << "collectFusableLoops\n"); 857 ret.fusedLoopDims = collectFusableLoops(ops, fusableDependences); 858 859 // If there are no fusable dependences or there are no tile+fusable loops, 860 // just return. 861 if (ret.fusedLoopDims.empty()) { 862 LLVM_DEBUG(llvm::dbgs() << "no fusable loops found\n"); 863 return failure(); 864 } 865 866 // Tile the fused loops in the last operation in the list. 867 SmallVector<Value, 4> tileSizeVector = 868 tilingOptions.tileSizeComputationFunction(b, rootOp); 869 FailureOr<TiledLinalgOp> tiledRootOp = tileRootOperation( 870 b, rootOp, tileSizeVector, tilingOptions, ret.fusedLoopDims); 871 if (failed(tiledRootOp)) { 872 rootOp.emitRemark("failed to tile the fused loops"); 873 return failure(); 874 } 875 ret.op = tiledRootOp->op; 876 ret.fusedLoops.assign(tiledRootOp->loops.begin(), tiledRootOp->loops.end()); 877 878 // Fuse the other operations into the fused inter-tile loops produced above. 879 ret.fusedProducers = fuseOperations(b, rootOp, *tiledRootOp, ops.drop_back(), 880 fusableDependences, ret.fusedLoopDims); 881 882 return ret; 883 } 884 885 FailureOr<TiledAndFusedLinalgOps> 886 mlir::linalg::tileAndFuseLinalgOps(OpBuilder &b, ArrayRef<LinalgOp> ops, 887 const LinalgDependenceGraph &dependenceGraph, 888 const LinalgTilingOptions &tilingOptions) { 889 switch (tilingOptions.loopType) { 890 case LinalgTilingLoopType::Loops: 891 case LinalgTilingLoopType::ParallelLoops: 892 case LinalgTilingLoopType::TiledLoops: 893 return tileAndFuseLinalgOpsImpl(b, ops, dependenceGraph, tilingOptions); 894 default:; 895 } 896 return failure(); 897 } 898