1 //===- Fusion.cpp - Implementation of linalg Fusion -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Fusion pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PassDetail.h" 14 #include "mlir/Dialect/Affine/IR/AffineOps.h" 15 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 16 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h" 17 #include "mlir/Dialect/Linalg/IR/Linalg.h" 18 #include "mlir/Dialect/Linalg/Passes.h" 19 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 20 #include "mlir/Dialect/Linalg/Utils/Utils.h" 21 #include "mlir/Dialect/MemRef/IR/MemRef.h" 22 #include "mlir/Dialect/Tensor/IR/Tensor.h" 23 #include "mlir/IR/AffineExpr.h" 24 #include "mlir/IR/AffineMap.h" 25 #include "mlir/IR/Dominance.h" 26 #include "mlir/Support/LLVM.h" 27 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 28 #include "mlir/Transforms/RegionUtils.h" 29 #include "llvm/ADT/MapVector.h" 30 #include "llvm/ADT/ScopeExit.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/Debug.h" 33 34 #include <set> 35 36 #define DEBUG_TYPE "linalg-fusion" 37 38 using namespace mlir; 39 using namespace mlir::linalg; 40 41 /// Implements a simple high-level fusion pass on linalg structured operations. 42 /// 43 /// In each block, linalg ops are processed in reverse textual order. 44 /// Given a linalg op `O`, fusion occurs by: 45 /// 1. inspecting the linalg ops that write into the views read by `O`. There 46 /// are 2 cases: 47 /// a) buffer case: use the SSA value of the views and a simple alias 48 /// analysis on subview ops to determine producer-consumer dependences; 49 /// b) tensor case: use SSA use-def chains on extract_slice ops; 50 /// 2. greedily fuse the linalg ops that produce the subview/extract_slice. 51 /// 3. inspect the fused ops and determine whether they have other remaining 52 /// LinalgOp uses. If not, then erase the original producing linalg op. 53 /// 54 /// More advanced use cases, analyses as well as profitability heuristics are 55 /// left for future work. 56 57 struct ShapeDimension { 58 Value shape; 59 unsigned dimension; 60 }; 61 62 // Given an `op`, returns the first (`shape`, `dimension`) pair that identifies 63 // the loop range at `loopDepth`. The semantics of the loopToOperandRangesMaps 64 // guarantees at least one such dimension is found. If multiple candidates exist 65 // they must agree by construction (i.e. have the same size) and we just return 66 // the first one. 67 static ShapeDimension 68 getShapeDefiningLoopRange(LinalgOp op, unsigned loopDepth, 69 bool fromSubViewOpOnly = false) { 70 // Iterate over the inputs and outputs in order. 71 // Extract the subranges from the linearized ranges. 72 for (OpOperand *opOperand : op.getInputAndOutputOperands()) { 73 // The method `getRangeFromOperandShape` requires using SubViewOp or 74 // ExtractSliceOps. If the value isn't defined from there continue. 75 // todo: The method should be adapted to get the values from 76 // `ViewInterface`. The interface needs a `getOrCreateRanges` method which 77 // currently returns a `linalg.range`. The fix here is to move this op to 78 // `std` dialect and add the method to `ViewInterface`. 79 if (fromSubViewOpOnly && 80 !isa_and_nonnull<memref::SubViewOp, tensor::ExtractSliceOp>( 81 opOperand->get().getDefiningOp())) 82 continue; 83 84 AffineMap map = op.getTiedIndexingMap(opOperand); 85 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange I/O idx: " 86 << opOperand->getOperandNumber() << "\n"); 87 LLVM_DEBUG(llvm::dbgs() 88 << "getShapeDefiningLoopRange map: " << map << "\n"); 89 SmallVector<Value, 8> shapeRanges(map.getNumResults(), nullptr); 90 for (const auto &en : llvm::enumerate(map.getResults())) { 91 auto dimExpr = en.value().dyn_cast<AffineDimExpr>(); 92 if (!dimExpr) 93 continue; 94 if (loopDepth == en.value().cast<AffineDimExpr>().getPosition()) { 95 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange loopDepth: " 96 << loopDepth << "\n"); 97 LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange shape: " 98 << opOperand->get() << "\n"); 99 return ShapeDimension{opOperand->get(), 100 static_cast<unsigned>(en.index())}; 101 } 102 } 103 } 104 llvm_unreachable("Expect to be able to extract a shape defining loop range"); 105 } 106 107 static SmallVector<Value> getTiledOperands(LinalgOp producer) { 108 return producer.getInputAndOutputOperands(); 109 } 110 111 /// Fuses the producer by cloning the `producer`. The `fusedLoopsAndRanges` 112 /// provides the loop range information for the fused loops. The rest are 113 /// obtained from the producer itself, since they are not tiled + fused. 114 static LinalgOp fuse(OpBuilder &b, LinalgOp producer, 115 const DenseMap<unsigned, Range> &fusedLoopsAndRanges) { 116 SmallVector<Value, 8> ivs, tileSizes, sizeBounds; 117 SmallVector<Range, 8> loopRanges; 118 Location loc = producer.getLoc(); 119 auto zero = b.create<arith::ConstantIndexOp>(loc, 0); 120 auto one = b.create<arith::ConstantIndexOp>(loc, 1); 121 122 for (unsigned i = 0, e = producer.getNumLoops(); i < e; ++i) { 123 auto shapeDim = getShapeDefiningLoopRange(producer, i); 124 Value dim = createOrFoldDimOp(b, loc, shapeDim.shape, shapeDim.dimension); 125 sizeBounds.push_back(dim); 126 auto it = fusedLoopsAndRanges.find(i); 127 if (it != fusedLoopsAndRanges.end()) { 128 ivs.push_back(it->second.offset); 129 tileSizes.push_back(it->second.size); 130 loopRanges.push_back(it->second); 131 LLVM_DEBUG(llvm::dbgs() << "tiled loop#" << i << " with LoopRange " 132 << loopRanges.back() << "\n"); 133 } else { 134 tileSizes.push_back(zero); 135 loopRanges.push_back(Range{zero, dim, one}); 136 LLVM_DEBUG(llvm::dbgs() << "full loop#" << i << " with LoopRange " 137 << loopRanges.back() << "\n"); 138 } 139 } 140 141 SmallVector<Value, 8> clonedShapes; 142 clonedShapes.reserve(producer.getNumInputsAndOutputs()); 143 144 // Compute subranges for all tensor input/output operands. 145 clonedShapes.append(makeTiledShapes(b, loc, producer, 146 getTiledOperands(producer), ivs, 147 tileSizes, sizeBounds)); 148 149 // Iterate over the results in order. 150 // Extract the subtensor type from the linearized range. 151 // Since we do not enforce any canonicalizations on the fly, this is always 152 // fully dynamic at construction time. 153 SmallVector<Type, 4> resultTypes; 154 resultTypes.reserve(producer->getNumResults()); 155 for (RankedTensorType t : producer.getOutputTensorTypes()) { 156 unsigned rank = t.getRank(); 157 SmallVector<int64_t, 4> staticOffsetsVector( 158 rank, ShapedType::kDynamicStrideOrOffset); 159 SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamicSize); 160 SmallVector<int64_t, 4> staticStridesVector( 161 rank, ShapedType::kDynamicStrideOrOffset); 162 resultTypes.push_back(tensor::ExtractSliceOp::inferResultType( 163 t.cast<RankedTensorType>(), staticOffsetsVector, staticSizesVector, 164 staticStridesVector)); 165 } 166 167 Operation *clonedOp = producer.clone(b, loc, resultTypes, clonedShapes); 168 169 // Shift all IndexOp results by the tile offset. 170 SmallVector<Value> allIvs; 171 transform(loopRanges, std::back_inserter(allIvs), 172 [](Range range) { return range.offset; }); 173 addTileLoopIvsToIndexOpResults(b, clonedOp, allIvs); 174 175 return clonedOp; 176 } 177 178 /// Get the loop range for a dimension `dim` based on the `shapedOperand`. It is 179 /// expected to be defined by a subview op or an extract_slice op. 180 static Range getRangeFromOperandShape(OpBuilder &b, Location loc, 181 Value shapedOperand, unsigned dim) { 182 Operation *shapeProducingOp = shapedOperand.getDefiningOp(); 183 if (auto subViewOp = dyn_cast<memref::SubViewOp>(shapeProducingOp)) 184 return subViewOp.getOrCreateRanges(b, loc)[dim]; 185 if (auto sliceOp = dyn_cast<tensor::ExtractSliceOp>(shapeProducingOp)) 186 return sliceOp.getOrCreateRanges(b, loc)[dim]; 187 llvm_unreachable("SubviewOp or ExtractSliceOp expected"); 188 } 189 190 /// Fuses the producer into the loop immediately enclosing the consumer. 191 /// This is achieved by "recomputing" the producer at the time it 192 /// is needed just before the consumer. 193 static LinalgOp fuse(OpBuilder &b, LinalgOp producerOp, AffineMap producerMap, 194 OpOperand &consumerOpOperand) { 195 LLVM_DEBUG(llvm::dbgs() << "Producer map: " << producerMap << "\n"); 196 DenseMap<unsigned, Range> fusedLoopsAndRanges; 197 Value shapedOperand = consumerOpOperand.get(); 198 for (const auto &en : llvm::enumerate(producerMap.getResults())) { 199 unsigned posInProducerLoop = en.value().cast<AffineDimExpr>().getPosition(); 200 fusedLoopsAndRanges[posInProducerLoop] = getRangeFromOperandShape( 201 b, consumerOpOperand.getOwner()->getLoc(), shapedOperand, en.index()); 202 } 203 return fuse(b, producerOp, fusedLoopsAndRanges); 204 } 205 206 // Encode structural fusion safety preconditions. 207 // Some of these will be lifted in the future with better analysis. 208 static bool isStructurallyFusableProducer(LinalgOp producer, Value consumedView, 209 LinalgOp consumer) { 210 assert(producer.hasBufferSemantics() && 211 "expected linalg op with buffer semantics"); 212 assert(consumer.hasBufferSemantics() && 213 "expected linalg op with buffer semantics"); 214 if (producer.getNumOutputs() != 1) { 215 LLVM_DEBUG(llvm::dbgs() << "\nNot structurally fusable (multi-output)"); 216 return false; 217 } 218 // Only fuse when the producer block dominates. 219 DominanceInfo dom(producer.getOperation()); 220 if (!dom.dominates(producer->getBlock(), consumer->getBlock())) { 221 LLVM_DEBUG( 222 llvm::dbgs() 223 << "\nNot structurally fusable (producer block does not dominate)"); 224 return false; 225 } 226 return true; 227 } 228 229 bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph, 230 LinalgOp consumer, 231 Value consumedView, 232 LinalgOp producer) { 233 assert(producer.hasBufferSemantics() && 234 "expected linalg op with buffer semantics"); 235 assert(consumer.hasBufferSemantics() && 236 "expected linalg op with buffer semantics"); 237 // Make some simple structural checks that alleviate the need for more 238 // complex analyses. 239 if (!isStructurallyFusableProducer(producer, consumedView, consumer)) { 240 LLVM_DEBUG(llvm::dbgs() << "\n***Not static last write due to structure:\t" 241 << *producer.getOperation()); 242 return false; 243 } 244 // Check for any interleaved write to consumedView. 245 if (!graph.findCoveringWrites(producer, consumer, consumedView).empty()) { 246 LLVM_DEBUG(llvm::dbgs() << "\n***Not fusable due to interleaved write:\t" 247 << *producer.getOperation()); 248 return false; 249 } 250 return true; 251 } 252 253 bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph, 254 LinalgOp consumer, Value consumedView, 255 LinalgOp producer) { 256 assert(producer.hasBufferSemantics() && 257 "expected linalg op with buffer semantics"); 258 assert(consumer.hasBufferSemantics() && 259 "expected linalg op with buffer semantics"); 260 if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer)) 261 return false; 262 // Check for any fusion-preventing dependence to any shape read/written that 263 // would violate dependences. 264 if (!graph.findCoveringDependences(producer, consumer).empty()) { 265 LLVM_DEBUG(llvm::dbgs() 266 << "\n***Not fusable due to an interleaved dependence:\t" 267 << *producer.getOperation()); 268 return false; 269 } 270 return true; 271 } 272 273 /// For `consumer` with buffer semantics, find the Linalg operation on buffers 274 /// that is the last writer of `consumerOpOperand`. For now the fusable 275 /// dependence is returned as an instance of the `dependenceGraph`. 276 static FailureOr<LinalgDependenceGraph::LinalgDependenceGraphElem> 277 findFusableProducer(OpOperand &consumerOpOperand, 278 const LinalgDependenceGraph &dependenceGraph) { 279 LLVM_DEBUG(llvm::dbgs() << "findFusableProducer for: " 280 << consumerOpOperand.get() << " @" 281 << consumerOpOperand.getOperandNumber() << " in " 282 << *consumerOpOperand.getOwner() << "\n"); 283 LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner()); 284 if (!consumerOp) 285 return failure(); 286 287 // Only consider RAW and WAW atm. 288 for (auto depType : { 289 LinalgDependenceGraph::DependenceType::RAW, 290 LinalgDependenceGraph::DependenceType::WAW, 291 }) { 292 LLVM_DEBUG(llvm::dbgs() 293 << "Dependencies into: " << *consumerOp.getOperation() << "\n"); 294 for (auto dependence : llvm::make_filter_range( 295 dependenceGraph.getDependencesInto(consumerOp, depType), 296 [&](LinalgDependenceGraph::LinalgDependenceGraphElem elem) { 297 LLVM_DEBUG(llvm::dbgs() << "Inspect dependence btw: " 298 << elem.getIndexingValue() << " and " 299 << elem.getDependentValue() << "\n"); 300 Value v = elem.getIndexingValue(); 301 Optional<unsigned> operandNum = 302 elem.getIndexingOpViewOperandNum(); 303 return isa<LinalgOp>(elem.getDependentOp()) && 304 v == consumerOpOperand.get() && operandNum && 305 operandNum.getValue() == 306 consumerOpOperand.getOperandNumber(); 307 })) { 308 // Consumer consumes this view, `isStructurallyFusableProducer` also 309 // checks whether it is a strict subview of the producer view. 310 auto producer = cast<LinalgOp>(dependence.getDependentOp()); 311 LLVM_DEBUG(llvm::dbgs() 312 << "\n" 313 << LinalgDependenceGraph::getDependenceTypeStr(depType) 314 << "producer: " << *dependence.getDependentOp() 315 << " view: " << dependence.getDependentValue() << "\n"); 316 317 // If the producer and consumer have tensor semantics, the only dependence 318 // between them is through a RAW dependence and they are fusable by 319 // construction. For buffer semantics need additional checks. 320 if (producer.hasBufferSemantics() && consumerOp.hasBufferSemantics() && 321 isFusableInto(dependenceGraph, consumerOp, consumerOpOperand.get(), 322 producer)) 323 return dependence; 324 if (producer.hasTensorSemantics() && consumerOp.hasTensorSemantics()) { 325 assert(dependence.dependenceType == 326 LinalgDependenceGraph::DependenceType::RAW); 327 return dependence; 328 } 329 } 330 } 331 return failure(); 332 } 333 334 FailureOr<FusionInfo> 335 mlir::linalg::fuseProducerOfBuffer(OpBuilder &b, OpOperand &consumerOpOperand, 336 const LinalgDependenceGraph &graph) { 337 Optional<LinalgDependenceGraph::LinalgDependenceGraphElem> fusableDependence = 338 findFusableProducer(consumerOpOperand, graph); 339 if (!fusableDependence) 340 return failure(); 341 342 LinalgOp producerOp = dyn_cast<LinalgOp>(fusableDependence->getDependentOp()); 343 if (!producerOp) 344 return failure(); 345 346 // If producer is already in the same block as consumer, we are done. 347 if (consumerOpOperand.get().getParentBlock() == 348 fusableDependence->getDependentValue().getParentBlock()) 349 return failure(); 350 351 Optional<AffineMap> producerMap = 352 fusableDependence->getDependentOpViewIndexingMap(); 353 if (!producerMap) 354 return failure(); 355 356 // Must be a subview or an extract_slice to guarantee there are loops we can 357 // fuse into. 358 auto subView = consumerOpOperand.get().getDefiningOp<memref::SubViewOp>(); 359 if (!subView) { 360 LLVM_DEBUG(llvm::dbgs() << "\nNot fusable (not a subview)"); 361 return failure(); 362 } 363 364 // Fuse `producer` just before `consumer`. 365 OpBuilder::InsertionGuard g(b); 366 b.setInsertionPoint(consumerOpOperand.getOwner()); 367 LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: " 368 << *consumerOpOperand.getOwner() << "\n"); 369 370 auto fusedProducer = fuse(b, producerOp, *producerMap, consumerOpOperand); 371 return FusionInfo{producerOp, fusedProducer}; 372 } 373 374 /// Walk back use-def chain through scf::For yields. 375 /// Sets `producer` and `outputIndex` if it finds a producer LinalgOp 376 377 // TODO(ravishankarm, ntv): This can be moved into the dependence graphs 378 // dependence tracking since the dependence tracking is similar to what is done 379 // w.r.t to buffers. 380 static void getProducerOfTensor(Value tensor, OpResult &opResult) { 381 if (!tensor.getType().isa<RankedTensorType>()) 382 return; 383 384 while (true) { 385 LLVM_DEBUG(llvm::dbgs() << "\ngetProducerOfTensor: " << tensor); 386 if (auto linalgOp = tensor.getDefiningOp<LinalgOp>()) { 387 opResult = tensor.cast<OpResult>(); 388 return; 389 } 390 if (auto sliceOp = tensor.getDefiningOp<tensor::ExtractSliceOp>()) { 391 tensor = sliceOp.source(); 392 continue; 393 } 394 if (auto blockArg = tensor.dyn_cast<BlockArgument>()) { 395 if (auto forOp = blockArg.getDefiningOp<scf::ForOp>()) { 396 tensor = *(forOp.getIterOperands().begin() + blockArg.getArgNumber()); 397 continue; 398 } 399 } 400 return; 401 } 402 } 403 404 FailureOr<FusionInfo> 405 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpOperand &consumerOpOperand) { 406 Value inputTensor = consumerOpOperand.get(); 407 OpResult producerOpResult; 408 getProducerOfTensor(inputTensor, producerOpResult); 409 if (!producerOpResult) { 410 LLVM_DEBUG(llvm::dbgs() << "\nUnable to find producer"); 411 return failure(); 412 } 413 return fuseProducerOfTensor(b, producerOpResult, consumerOpOperand); 414 } 415 416 FailureOr<FusionInfo> 417 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult, 418 OpOperand &consumerOpOperand) { 419 auto producerOp = dyn_cast<LinalgOp>(producerOpResult.getOwner()); 420 if (!producerOp) 421 return failure(); 422 423 LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner()); 424 if (!consumerOp) 425 return failure(); 426 427 Value inputTensor = consumerOpOperand.get(); 428 429 // Must be an extract_slice op to guarantee there are loops we can fuse into. 430 auto sliceOp = inputTensor.getDefiningOp<tensor::ExtractSliceOp>(); 431 if (!sliceOp) { 432 LLVM_DEBUG(llvm::dbgs() 433 << "\nNot fusable, not an extract_slice op: " << inputTensor); 434 return failure(); 435 } 436 437 // If producer is already in the same block as consumer, we are done. 438 if (consumerOpOperand.get().getParentBlock() == 439 producerOpResult.getParentBlock()) 440 return failure(); 441 442 // Insert fused `producer` just before `consumer`. 443 OpBuilder::InsertionGuard g(b); 444 b.setInsertionPoint(consumerOp); 445 LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: " << *consumerOp << "\n"); 446 OpOperand *opOperand = 447 producerOp.getOutputOperand(producerOpResult.getResultNumber()); 448 LinalgOp fusedProducer = 449 fuse(b, producerOp, producerOp.getTiedIndexingMap(opOperand), 450 consumerOpOperand); 451 452 // Replace use. 453 // Canonicalizations are not guaranteed to have happened before constructing 454 // `fusedProducer`. In the tensor case this can result in temporary type 455 // mismatches. Insert a `tensor.cast` op to propagate the transformation 456 // invariant that types are compatible. 457 Value def = fusedProducer->getResult(producerOpResult.getResultNumber()); 458 Type consumerType = consumerOpOperand.get().getType(); 459 if (consumerType != def.getType()) 460 def = b.create<tensor::CastOp>(fusedProducer.getLoc(), consumerType, def); 461 consumerOpOperand.set(def); 462 return FusionInfo{cast<LinalgOp>(producerOpResult.getOwner()), fusedProducer}; 463 } 464 465 /// Prune all dimensions that are of reduction iterator type from `map`. 466 static AffineMap pruneReductionDimsFromMap(ArrayRef<Attribute> iteratorTypes, 467 AffineMap map) { 468 llvm::SmallBitVector projectedDims(iteratorTypes.size()); 469 for (const auto &attr : llvm::enumerate(iteratorTypes)) { 470 if (!isParallelIterator(attr.value())) 471 projectedDims.set(attr.index()); 472 } 473 return getProjectedMap(map, projectedDims); 474 } 475 476 /// Returns the mapping from iterations in the consumer that write to the same 477 /// location as the iterations in the producer. To do so use 478 /// - indexing map of the fused view in the consumer : consumerIndexMap 479 /// - indexing map of the fused view in the producer : producerIndexMap 480 /// consumerLoopToProducerLoop = 481 /// inverse(producerIndexMap).compose(consumerIndexMap) 482 static FailureOr<AffineMap> getConsumerLoopToProducerLoopMap( 483 LinalgDependenceGraph::LinalgDependenceGraphElem dependence) { 484 auto producer = dyn_cast<LinalgOp>(dependence.getDependentOp()); 485 if (!producer) 486 return failure(); 487 488 Optional<AffineMap> producerIndexingMap = 489 dependence.getDependentOpViewIndexingMap(); 490 Optional<AffineMap> consumerIndexingMap = 491 dependence.getIndexingOpViewIndexingMap(); 492 if (!producerIndexingMap || !consumerIndexingMap) 493 return failure(); 494 495 AffineMap prunedProducerIndexingMap = pruneReductionDimsFromMap( 496 producer.iterator_types().getValue(), *producerIndexingMap); 497 if (!prunedProducerIndexingMap.isPermutation()) 498 return failure(); 499 500 if (consumerIndexingMap->getNumResults() != 501 prunedProducerIndexingMap.getNumResults()) 502 return failure(); 503 504 LLVM_DEBUG({ 505 llvm::dbgs() << "\t producerMap : "; 506 producerIndexingMap->print(llvm::dbgs()); 507 llvm::dbgs() << " pruned : "; 508 prunedProducerIndexingMap.print(llvm::dbgs()); 509 llvm::dbgs() << "\n"; 510 llvm::dbgs() << "\t consumerMap : "; 511 consumerIndexingMap->print(llvm::dbgs()); 512 llvm::dbgs() << "\n"; 513 }); 514 515 AffineMap invProducerIndexMap = inversePermutation(prunedProducerIndexingMap); 516 if (!invProducerIndexMap) 517 return failure(); 518 519 return invProducerIndexMap.compose(*consumerIndexingMap); 520 } 521 522 /// Given a projected permutation `map`, returns true if the map changes the 523 /// order in which the fused loop dimension appear. 524 static bool doesTransposeAccess(AffineMap map, 525 const std::set<unsigned> &fusableLoops) { 526 Optional<unsigned> lastFusableLoop; 527 for (unsigned pos : llvm::map_range(map.getResults(), [](AffineExpr expr) { 528 return expr.cast<AffineDimExpr>().getPosition(); 529 })) { 530 if (!fusableLoops.count(pos)) 531 continue; 532 if (!lastFusableLoop) { 533 lastFusableLoop = pos; 534 continue; 535 } 536 if (pos <= lastFusableLoop.getValue()) 537 return true; 538 lastFusableLoop = pos; 539 } 540 return false; 541 } 542 543 /// Returns the positions of the loop in `op` that can be tiled based on the 544 /// operations that are to be fused with it. For example, in a 545 /// 546 /// linalg.matmul ins(%a, %b : ...) outs(%c : ...) 547 /// 548 /// if the producer of %a needs to be fused with this op, only the `i` loop of 549 /// the matmul can be tiled while fusing. If producer of %a, and %b are to be 550 /// fused, then no loops can be tiled while fusing. The conditions used are: 551 /// 1. Only parallel loops can be used for tile + fuse. Find the number of 552 /// common outer parallel loops between the op and its producers being fused. 553 /// 2. Of the parallel loops only some can be fused. Only those loops can be 554 /// fused such where the fusable loops iteration space only touches one tile 555 /// of the fused operation. This is because the producer (which is writing 556 /// the fused subview) has update semantics. 557 /// 558 /// Since an inverse computation is needed, we need to consider the projection 559 /// of the producerIndexMap w.r.t the parallel loops. The actual fusable loops 560 /// are the dimensions of the consumerLoopToProducerLoop map that correspond to 561 /// parallel loops and appear in the result of the map 562 /// 563 /// Example 1: 564 /// linalg.fill(%cst, %c) 565 /// linalg.matmul ins(%a, %b) outs(%c) 566 /// Number of parallel loops : 2 567 /// producerIndexMap = affine_map<(i, j) ->(i , j)> 568 /// consumerIndexMap = affine_map<(i, j, k) -> (i, j)> 569 /// consumerLoopToProducerLoop = affine_map<(i, j, k) -> (i, j)> 570 /// Fused dimensions : i, j 571 /// 572 /// Example 2: 573 /// linalg.matmul ins(%a, %b) outs(%c) 574 /// linalg.generic {indexing_maps = [affine_map<(i, j) -> (j, i)>, ... 575 /// iterator_types = ["parallel", "parallel"]} 576 /// ins(%c) ... 577 /// 578 /// Number of parallel loops = 2: 579 /// producerIndexMap (projected to parallel loops) = 580 /// affine_map<(i, j) -> (i, j)> 581 /// consumerLoopToProducerLoop2 = affine_map<(i, j) -> (j, i)> 582 /// Fused dimensions : i, j 583 /// 584 /// Example 3: 585 /// memref.copy(%s, %b) 586 /// linalg.matmul ins(%a, %b) outs(%c) 587 /// 588 /// Number of parallel loops = 2 589 /// produceIndexMap : affine_map<(i, j) -> (i, j)> 590 /// consumerLoopToProduceLoops = affine_map<(i, j, k) -> (k, j)> 591 /// submap with only parallel loops = affine_map<(i, j) -> (j)> 592 /// Fused dimensions : j 593 static std::set<unsigned> 594 collectFusableLoops(ArrayRef<LinalgOp> ops, 595 const FusableOpDependencesTy &fusableDependences) { 596 assert(!ops.empty()); 597 auto getNumOuterParallelLoops = [](LinalgOp linalgOp) { 598 return linalgOp.iterator_types() 599 .getValue() 600 .take_while([](Attribute attr) -> bool { 601 return attr.cast<StringAttr>().getValue() == 602 getParallelIteratorTypeName(); 603 }) 604 .size(); 605 }; 606 607 size_t numOuterParallelLoops = getNumOuterParallelLoops(ops.back()); 608 for (auto op : ops.drop_back()) { 609 numOuterParallelLoops = 610 std::min(numOuterParallelLoops, getNumOuterParallelLoops(op)); 611 } 612 613 std::set<unsigned> fusableLoops; 614 auto range = llvm::seq<unsigned>(0, numOuterParallelLoops); 615 fusableLoops.insert(range.begin(), range.end()); 616 617 for (auto op : reverse(ops)) { 618 for (auto dependence : fusableDependences.lookup(op)) { 619 LLVM_DEBUG({ 620 llvm::dbgs() << "\t fusable :"; 621 for (unsigned i : fusableLoops) 622 llvm::dbgs() << " " << i; 623 llvm::dbgs() << "\n"; 624 }); 625 626 Optional<AffineMap> consumerLoopToProducerLoop = 627 getConsumerLoopToProducerLoopMap(dependence); 628 if (!consumerLoopToProducerLoop) { 629 op.emitRemark("failed to get map from consumer loop to producer loop"); 630 return {}; 631 } 632 // todo: This condition is only an implementation limitation. When fusing 633 // the operation, if the accesses in the producer/consumer are transposes 634 // of each other, the loop bounds for the tiled producer can be 635 // manipulated accordingly. This requires some additional bookkeeping in 636 // the implementation of tile+fuse that is deferred to later. 637 if (doesTransposeAccess(*consumerLoopToProducerLoop, fusableLoops)) { 638 op.emitRemark("unhandled fusion when fusion requires permutation"); 639 return {}; 640 } 641 642 std::set<unsigned> candidates; 643 for (AffineExpr expr : consumerLoopToProducerLoop->getResults()) { 644 unsigned position = expr.cast<AffineDimExpr>().getPosition(); 645 if (fusableLoops.count(position)) 646 candidates.insert(position); 647 } 648 LLVM_DEBUG({ 649 llvm::dbgs() << "\t candidates :"; 650 for (unsigned i : candidates) 651 llvm::dbgs() << " " << i; 652 llvm::dbgs() << "\n"; 653 }); 654 if (candidates.empty()) 655 return {}; 656 std::swap(candidates, fusableLoops); 657 } 658 } 659 660 return fusableLoops; 661 } 662 663 /// Find all dependences that are fusable. 664 FusableOpDependencesTy mlir::linalg::findAllFusableDependences( 665 ArrayRef<LinalgOp> ops, const LinalgDependenceGraph &dependenceGraph) { 666 FusableOpDependencesTy fusableDependences; 667 DenseMap<Operation *, SmallVector<AffineMap, 1>> fusedProducerIndexingMap; 668 for (LinalgOp op : reverse(ops)) { 669 for (OpOperand *opOperand : op.getInputAndOutputOperands()) { 670 Optional<LinalgDependenceGraph::LinalgDependenceGraphElem> 671 fusableDependence = findFusableProducer(*opOperand, dependenceGraph); 672 if (!fusableDependence) 673 continue; 674 LinalgOp producerOp = 675 dyn_cast<LinalgOp>(fusableDependence->getDependentOp()); 676 if (!producerOp) 677 continue; 678 // Do not fuse dependences that are to operations not in the same basic 679 // block. This avoid moving fused operations across loops that might 680 // themselves carry dependency making the fusion illegal. 681 if (producerOp->getBlock() != op->getBlock()) 682 continue; 683 684 // Make sure that the indexing map of the view used for fusion in the 685 // producer is a projected permutation. 686 Optional<AffineMap> producerMap = 687 fusableDependence->getDependentOpViewIndexingMap(); 688 Optional<AffineMap> consumerMap = 689 fusableDependence->getIndexingOpViewIndexingMap(); 690 assert( 691 consumerMap && 692 "unable to find indexing map of operand/result of indexing OpView"); 693 fusedProducerIndexingMap[producerOp.getOperation()].push_back( 694 *consumerMap); 695 if (!producerMap || !producerMap->isProjectedPermutation() || 696 !consumerMap->isProjectedPermutation()) 697 continue; 698 699 fusableDependences[producerOp.getOperation()].push_back( 700 *fusableDependence); 701 } 702 } 703 // TODO: Currently fusion would not be legal if the fusable dependence is to 704 // the same producer but different indexing map in the consumer. Fix this, but 705 // in the meanwhile disallow such a fusion. 706 for (auto useIndexingMapsList : fusedProducerIndexingMap) { 707 AffineMap map1 = useIndexingMapsList.second.front(); 708 for (AffineMap map2 : 709 ArrayRef<AffineMap>(useIndexingMapsList.second).drop_front()) { 710 if (map1 != map2) { 711 fusableDependences.erase(useIndexingMapsList.first); 712 break; 713 } 714 } 715 } 716 return fusableDependences; 717 } 718 719 /// Tile the fused loops in the root operation, by setting the tile sizes for 720 /// all other loops to zero (those will be tiled later). 721 static FailureOr<TiledLinalgOp> 722 tileRootOperation(OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizeVector, 723 const LinalgTilingOptions &options, 724 const std::set<unsigned> &fusedLoops) { 725 SmallVector<Value, 4> tileSizes(tileSizeVector.begin(), tileSizeVector.end()); 726 auto zero = b.create<arith::ConstantIndexOp>(op.getLoc(), 0); 727 for (unsigned i = 0, e = tileSizes.size(); i != e; ++i) 728 if (!fusedLoops.count(i)) 729 tileSizes[i] = zero; 730 LinalgTilingOptions tileFusedLoopsOptions = options; 731 tileFusedLoopsOptions.setTileSizes(tileSizes); 732 // TODO: Propagate RewriterBase everywhere. 733 IRRewriter rewriter(b); 734 return tileLinalgOp(rewriter, op, tileFusedLoopsOptions); 735 } 736 737 /// Fuse the operations in `fusionCandidates` with `tiledOp`. Latter is expected 738 /// to be a tiled operation such that it is valid to fuse all operations in 739 /// `fusionCandidates`, i.e. move the operation within the inter-tile loops of 740 /// `tiledOp`. 741 static SmallVector<LinalgOp, 1> 742 fuseOperations(OpBuilder &b, LinalgOp rootOp, TiledLinalgOp tiledLinalgOp, 743 ArrayRef<LinalgOp> fusionCandidates, 744 const FusableOpDependencesTy &fusableDependences, 745 const std::set<unsigned> &fusedLoops) { 746 LinalgOp tiledOp = tiledLinalgOp.op; 747 OpBuilder::InsertionGuard guard(b); 748 b.setInsertionPoint(tiledOp); 749 750 DenseMap<unsigned, Range> fusedLoopsAndRanges; 751 for (unsigned loop : fusedLoops) { 752 ShapeDimension shapeDim = getShapeDefiningLoopRange(tiledOp, loop, true); 753 fusedLoopsAndRanges[loop] = getRangeFromOperandShape( 754 b, tiledOp.getLoc(), shapeDim.shape, shapeDim.dimension); 755 } 756 757 SmallVector<LinalgOp, 1> fusedOps(fusionCandidates.size()); 758 DenseMap<Operation *, LinalgOp> origOpToFusedOp; 759 origOpToFusedOp[rootOp.getOperation()] = tiledOp; 760 for (const auto &candidate : enumerate(llvm::reverse(fusionCandidates))) { 761 LinalgOp origOp = candidate.value(); 762 LinalgOp fusedOp = fuse(b, origOp, fusedLoopsAndRanges); 763 origOpToFusedOp[origOp.getOperation()] = fusedOp; 764 fusedOps[fusionCandidates.size() - candidate.index() - 1] = fusedOp; 765 766 // Prepare the builder for the next insertion point. 767 auto guard = llvm::make_scope_exit([&]() { b.setInsertionPoint(fusedOp); }); 768 if (!origOp.hasTensorSemantics()) 769 continue; 770 771 // If the producer consumer operations are linalg operations on tensors, the 772 // dependence is due to value produced (as a return tensor) by the producer 773 // and used in the consumer. The returned value of the fused op needs to be 774 // made the operand of the tiled/fused consumer operation. By construction 775 // the value returned by the producer is the value used by the consumer. 776 for (auto &dependence : fusableDependences.lookup(origOp.getOperation())) { 777 if (dependence.dependenceType != 778 LinalgDependenceGraph::DependenceType::RAW) 779 continue; 780 781 unsigned resultIndex = 782 dependence.getDependentOpViewResultNum().getValue(); 783 LinalgOp consumer = origOpToFusedOp.lookup(dependence.getIndexingOp()); 784 if (!consumer) 785 continue; 786 787 Value replacementValue = fusedOp.getOperation()->getResult(resultIndex); 788 consumer.getOperation()->setOperand( 789 dependence.getIndexingOpViewOperandNum().getValue(), 790 replacementValue); 791 } 792 793 // At this point, all Linalg uses of the tensors produced by `origOp` have 794 // been replaced. However, there may still be "output tensor"-like uses 795 // coming from WAW dependencies. 796 // All these uses are iter_args of the outermost loop (TODO: add a check). 797 // Such iter_args uses serve 2 purposes: 798 // 1. give a shape to the output 799 // 2. encode destructive updates that may be inplaceable by bufferization. 800 // To keep the second type of information while letting the unfused op die 801 // unused, we need to forward the producer output operand. 802 if (auto forOp = dyn_cast<scf::ForOp>(tiledLinalgOp.loops.front())) { 803 for (auto &operand : forOp.getIterOpOperands()) { 804 if (auto opResult = operand.get().dyn_cast<OpResult>()) { 805 if (opResult.getOwner() == origOp) { 806 Value output = 807 origOp.getOutputOperand(opResult.getResultNumber())->get(); 808 assert(output.getType().isa<RankedTensorType>()); 809 operand.set(output); 810 } 811 } 812 } 813 } 814 } 815 return fusedOps; 816 } 817 818 static FailureOr<TiledAndFusedLinalgOps> 819 tileAndFuseLinalgOpsImpl(OpBuilder &b, ArrayRef<LinalgOp> ops, 820 const LinalgDependenceGraph &dependenceGraph, 821 const LinalgTilingOptions &tilingOptions) { 822 if (ops.size() < 2) 823 return failure(); 824 LinalgOp rootOp = ops.back(); 825 if (!llvm::all_of( 826 ops, 827 [](LinalgOp linalgOp) { return linalgOp.hasBufferSemantics(); }) && 828 !llvm::all_of(ops, [](LinalgOp linalgOp) { 829 return linalgOp.hasTensorSemantics(); 830 })) { 831 rootOp.emitError( 832 "unable to fuse operations that have tensor semantics with operations " 833 "that have buffer semantics and viceversa."); 834 return failure(); 835 } 836 // TODO: Support interchange with tile + fuse. This might actually help do 837 // better fusion. 838 if (!tilingOptions.interchangeVector.empty()) { 839 rootOp.emitRemark("unable to handle tile and fuse with interchange"); 840 return failure(); 841 } 842 843 OpBuilder::InsertionGuard guard(b); 844 b.setInsertionPoint(rootOp); 845 846 // Find all the producers. 847 LLVM_DEBUG(llvm::dbgs() << "findAllFusableDependences\n"); 848 FusableOpDependencesTy fusableDependences = 849 findAllFusableDependences(ops, dependenceGraph); 850 if (fusableDependences.empty()) { 851 LLVM_DEBUG(llvm::dbgs() << "no fusable dependencies found\n"); 852 return failure(); 853 } 854 855 TiledAndFusedLinalgOps ret; 856 // Find the loops that can be tiled and fused. 857 LLVM_DEBUG(llvm::dbgs() << "collectFusableLoops\n"); 858 ret.fusedLoopDims = collectFusableLoops(ops, fusableDependences); 859 860 // If there are no fusable dependences or there are no tile+fusable loops, 861 // just return. 862 if (ret.fusedLoopDims.empty()) { 863 LLVM_DEBUG(llvm::dbgs() << "no fusable loops found\n"); 864 return failure(); 865 } 866 867 // Tile the fused loops in the last operation in the list. 868 SmallVector<Value, 4> tileSizeVector = 869 tilingOptions.tileSizeComputationFunction(b, rootOp); 870 FailureOr<TiledLinalgOp> tiledRootOp = tileRootOperation( 871 b, rootOp, tileSizeVector, tilingOptions, ret.fusedLoopDims); 872 if (failed(tiledRootOp)) { 873 rootOp.emitRemark("failed to tile the fused loops"); 874 return failure(); 875 } 876 ret.op = tiledRootOp->op; 877 ret.fusedLoops.assign(tiledRootOp->loops.begin(), tiledRootOp->loops.end()); 878 879 // Fuse the other operations into the fused inter-tile loops produced above. 880 ret.fusedProducers = fuseOperations(b, rootOp, *tiledRootOp, ops.drop_back(), 881 fusableDependences, ret.fusedLoopDims); 882 883 return ret; 884 } 885 886 FailureOr<TiledAndFusedLinalgOps> 887 mlir::linalg::tileAndFuseLinalgOps(OpBuilder &b, ArrayRef<LinalgOp> ops, 888 const LinalgDependenceGraph &dependenceGraph, 889 const LinalgTilingOptions &tilingOptions) { 890 switch (tilingOptions.loopType) { 891 case LinalgTilingLoopType::Loops: 892 case LinalgTilingLoopType::ParallelLoops: 893 case LinalgTilingLoopType::TiledLoops: 894 return tileAndFuseLinalgOpsImpl(b, ops, dependenceGraph, tilingOptions); 895 default:; 896 } 897 return failure(); 898 } 899