1 //===- Fusion.cpp - Implementation of linalg Fusion -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Fusion pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PassDetail.h" 14 #include "mlir/Dialect/Affine/IR/AffineOps.h" 15 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h" 16 #include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h" 17 #include "mlir/Dialect/Linalg/IR/LinalgOps.h" 18 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h" 19 #include "mlir/Dialect/Linalg/Passes.h" 20 #include "mlir/Dialect/Linalg/Utils/Utils.h" 21 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 22 #include "mlir/IR/AffineExpr.h" 23 #include "mlir/IR/AffineMap.h" 24 #include "mlir/IR/Dominance.h" 25 #include "mlir/IR/PatternMatch.h" 26 #include "mlir/Support/LLVM.h" 27 #include "mlir/Transforms/FoldUtils.h" 28 #include "llvm/ADT/SetVector.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/Debug.h" 31 32 #define DEBUG_TYPE "linalg-fusion" 33 34 using namespace mlir; 35 using namespace mlir::edsc; 36 using namespace mlir::edsc::intrinsics; 37 using namespace mlir::linalg; 38 39 using folded_std_constant_index = FoldedValueBuilder<ConstantIndexOp>; 40 41 using llvm::dbgs; 42 43 /// Implements a simple high-level fusion pass of linalg library operations. 44 /// 45 /// In each block, linalg ops are processed in reverse textual order. 46 /// Given a linalg op `O`, fusion occurs by: 47 /// 1. inspecting the linalg ops that write into the views read by `O`. This 48 /// uses the SSA value of the views and a simple subview/slice analysis to 49 /// determine producer-consumer dependences; 50 /// 2. greedily fuse the linalg ops that produce subview 51 /// 3. inspect the fused ops and determine whether they have other remaining 52 /// LinalgOp uses. If not, then erase the original producing linalg op. 53 /// 54 /// More advanced use cases, analyses as well as profitability heuristics are 55 /// left for future work. 56 57 // Return a cloned version of `op` that operates on `loopRanges`, assumed to be 58 // a subset of the original loop ranges of `op`. 59 // This is achieved by applying the `loopToOperandRangesMaps` permutation maps 60 // to the `loopRanges` in order to obtain view ranges. 61 static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op, 62 ArrayRef<SubViewOp::Range> loopRanges) { 63 assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics"); 64 auto maps = op.indexing_maps(); 65 SmallVector<Value, 8> clonedViews; 66 clonedViews.reserve(op.getNumInputsAndOutputs()); 67 // Iterate over the inputs and outputs in order. 68 // Extract the subranges from the linearized ranges. 69 SmallVector<Value, 8> ios(op.getInputsAndOutputBuffers()); 70 for (auto en : llvm::enumerate(ios)) { 71 unsigned idx = en.index(); 72 auto map = maps[idx].cast<AffineMapAttr>().getValue(); 73 LLVM_DEBUG(dbgs() << "map: " << map << "\n"); 74 Value view = en.value(); 75 SmallVector<SubViewOp::Range, 4> viewRanges(map.getNumResults()); 76 for (auto en2 : llvm::enumerate(map.getResults())) { 77 unsigned d = en2.index(); 78 // loopToOperandRangesMaps are permutations-only. 79 unsigned loopPos = en2.value().cast<AffineDimExpr>().getPosition(); 80 viewRanges[d] = loopRanges[loopPos]; 81 LLVM_DEBUG(dbgs() << "\ni,j: " << en.index() << ", " << en2.index() 82 << "\t" 83 << "loopPos: " << loopPos << "\t" << viewRanges[d]); 84 } 85 // Construct a new subview for the tile. 86 unsigned rank = viewRanges.size(); 87 SmallVector<Value, 4> offsets, sizes, strides; 88 offsets.reserve(rank); 89 sizes.reserve(rank); 90 strides.reserve(rank); 91 for (auto r : viewRanges) { 92 offsets.push_back(r.offset); 93 sizes.push_back(r.size); 94 strides.push_back(r.stride); 95 } 96 clonedViews.push_back( 97 b.create<SubViewOp>(loc, view, offsets, sizes, strides)); 98 } 99 auto operands = getAssumedNonViewOperands(op); 100 clonedViews.append(operands.begin(), operands.end()); 101 102 Operation *clonedOp = op.clone(b, loc, clonedViews); 103 // When the producer is an IndexedGenercOp, we have to transform its block 104 // IV arguments according to the tiling of the consumer, i.e. offset them by 105 // the values computed in `loopRanges`. 106 if (auto indexedGenericOp = dyn_cast<IndexedGenericOp>(clonedOp)) { 107 auto &block = indexedGenericOp.region().front(); 108 109 OpBuilder::InsertionGuard g(b); 110 b.setInsertionPointToStart(&block); 111 for (unsigned i = 0, e = indexedGenericOp.getNumLoops(); i < e; ++i) { 112 Value oldIndex = block.getArgument(i); 113 AddIOp newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex, 114 loopRanges[i].offset); 115 oldIndex.replaceAllUsesExcept(newIndex, 116 SmallPtrSet<Operation *, 1>{newIndex}); 117 } 118 } 119 return clonedOp; 120 } 121 122 struct ViewDimension { 123 Value view; 124 unsigned dimension; 125 }; 126 127 // Given an `op`, returns the first (`view`, `dimension`) pair that identifies 128 // the loop range at `loopDepth`. The semantics of the loopToOperandRangesMaps 129 // guarantees at least one such dimension is found. If multiple candidates exist 130 // they must agree by construction (i.e. have the same size) and we just return 131 // the first one. 132 static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) { 133 assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics"); 134 auto maps = op.indexing_maps(); 135 // Iterate over the inputs and outputs in order. 136 // Extract the subranges from the linearized ranges. 137 SmallVector<Value, 8> ios(op.getInputsAndOutputBuffers()); 138 for (auto en : llvm::enumerate(ios)) { 139 unsigned idx = en.index(); 140 auto map = maps[idx].cast<AffineMapAttr>().getValue(); 141 LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange I/O idx: " << idx << "\n"); 142 LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange map: " << map << "\n"); 143 Value view = en.value(); 144 SmallVector<Value, 8> viewRanges(map.getNumResults(), nullptr); 145 for (auto en2 : llvm::enumerate(map.getResults())) { 146 if (loopDepth == en2.value().cast<AffineDimExpr>().getPosition()) { 147 LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange loopDepth: " << loopDepth 148 << "\n"); 149 LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange view: " << view << "\n"); 150 return ViewDimension{view, static_cast<unsigned>(en2.index())}; 151 } 152 } 153 } 154 llvm_unreachable("Expect to be able to extract a view defining loop range"); 155 } 156 157 static LinalgOp fuse(Value producedView, LinalgOp producer, LinalgOp consumer, 158 unsigned consumerIdx, unsigned producerIdx, 159 OperationFolder *folder) { 160 assert(producer.hasBufferSemantics() && 161 "expected linalg op with buffer semantics"); 162 assert(consumer.hasBufferSemantics() && 163 "expected linalg op with buffer semantics"); 164 165 auto subView = dyn_cast_or_null<SubViewOp>( 166 consumer.getBuffer(consumerIdx).getDefiningOp()); 167 auto slice = dyn_cast_or_null<SliceOp>( 168 consumer.getBuffer(consumerIdx).getDefiningOp()); 169 assert(subView || slice); 170 (void)subView; 171 (void)slice; 172 173 // loopToOperandRangesMaps are permutations-only by construction: 174 // we can always identify a data dimension with a (at least one) loop 175 // dimension. 176 AffineMap producerMap = 177 producer.indexing_maps()[producer.getNumInputs() + producerIdx] 178 .cast<AffineMapAttr>() 179 .getValue(); 180 LLVM_DEBUG(dbgs() << "Producer Idx: " << producerIdx 181 << ", producer map: " << producerMap << "\n"); 182 183 unsigned nPar = producer.getNumParallelLoops(); 184 unsigned nRed = producer.getNumReductionLoops(); 185 unsigned nWin = producer.getNumWindowLoops(); 186 SmallVector<SubViewOp::Range, 8> loopRanges(nPar + nRed + nWin); 187 188 OpBuilder b(consumer.getOperation()); 189 auto loc = consumer.getLoc(); 190 // Iterate over dimensions identified by the producer map for `producerIdx`. 191 // This defines a subset of the loop ranges that we need to complete later. 192 for (auto en : llvm::enumerate(producerMap.getResults())) { 193 unsigned posInProducerLoop = en.value().cast<AffineDimExpr>().getPosition(); 194 loopRanges[posInProducerLoop] = 195 subView.getOrCreateRanges(b, loc)[en.index()]; 196 } 197 198 // Iterate over all dimensions. For the dimensions not identified by the 199 // producer map for `producerIdx`, we need to explicitly compute the view that 200 // defines the loop ranges using the `producer`. 201 for (unsigned i = 0, nLoops = loopRanges.size(); i < nLoops; ++i) { 202 if (loopRanges[i].offset) 203 LLVM_DEBUG(llvm::dbgs() 204 << "existing LoopRange: " << loopRanges[i] << "\n"); 205 else { 206 auto viewDim = getViewDefiningLoopRange(producer, i); 207 loopRanges[i] = SubViewOp::Range{folded_std_constant_index(folder, 0), 208 std_dim(viewDim.view, viewDim.dimension), 209 folded_std_constant_index(folder, 1)}; 210 LLVM_DEBUG(llvm::dbgs() << "new LoopRange: " << loopRanges[i] << "\n"); 211 } 212 } 213 214 return cloneWithLoopRanges(b, loc, producer, loopRanges); 215 } 216 217 // Encode structural fusion safety preconditions. 218 // Some of these will be lifted in the future with better analysis. 219 static bool isStructurallyFusableProducer(LinalgOp producer, Value consumedView, 220 LinalgOp consumer) { 221 assert(producer.hasBufferSemantics() && 222 "expected linalg op with buffer semantics"); 223 assert(consumer.hasBufferSemantics() && 224 "expected linalg op with buffer semantics"); 225 if (producer.getNumOutputs() != 1) { 226 LLVM_DEBUG(dbgs() << "\nNot structurally fusable (multi-output)"); 227 return false; 228 } 229 // Only fuse when the producer block dominates. 230 DominanceInfo dom(producer.getOperation()); 231 if (!dom.dominates(producer.getOperation()->getBlock(), 232 consumer.getOperation()->getBlock())) { 233 LLVM_DEBUG( 234 dbgs() 235 << "\nNot structurally fusable (producer block does not dominate)"); 236 return false; 237 } 238 return true; 239 } 240 241 bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph, 242 LinalgOp consumer, 243 Value consumedView, 244 LinalgOp producer) { 245 assert(producer.hasBufferSemantics() && 246 "expected linalg op with buffer semantics"); 247 assert(consumer.hasBufferSemantics() && 248 "expected linalg op with buffer semantics"); 249 // Make some simple structural checks that alleviate the need for more 250 // complex analyses. 251 if (!isStructurallyFusableProducer(producer, consumedView, consumer)) { 252 LLVM_DEBUG(dbgs() << "\n***Not static last write due to structure:\t" 253 << *producer.getOperation()); 254 return false; 255 } 256 // Check for any interleaved write to consumedView. 257 if (!graph.findCoveringWrites(producer, consumer, consumedView).empty()) { 258 LLVM_DEBUG(dbgs() << "\n***Not fusable due to interleaved write:\t" 259 << *producer.getOperation()); 260 return false; 261 } 262 return true; 263 } 264 265 bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph, 266 LinalgOp consumer, Value consumedView, 267 LinalgOp producer) { 268 assert(producer.hasBufferSemantics() && 269 "expected linalg op with buffer semantics"); 270 assert(consumer.hasBufferSemantics() && 271 "expected linalg op with buffer semantics"); 272 if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer)) 273 return false; 274 // Check for any fusion-preventing dependence to any view read/written that 275 // would violate dependences. 276 if (!graph.findCoveringDependences(producer, consumer).empty()) { 277 LLVM_DEBUG(dbgs() << "\n***Not fusable due to an interleaved dependence:\t" 278 << *producer.getOperation()); 279 return false; 280 } 281 if (auto convOp = dyn_cast<linalg::ConvOp>(producer.getOperation())) { 282 // TODO: add a level of indirection to linalg.generic. 283 if (convOp.padding()) 284 return false; 285 } 286 if (auto convOp = dyn_cast<linalg::ConvOp>(consumer.getOperation())) { 287 // TODO: add a level of indirection to linalg.generic. 288 if (convOp.padding()) 289 return false; 290 } 291 return true; 292 } 293 294 static bool isSameSubView(Value a, Value b) { 295 if (a == b) 296 return true; 297 auto sva = a.getDefiningOp<SubViewOp>(); 298 auto svb = b.getDefiningOp<SubViewOp>(); 299 if (!sva || !svb) 300 return false; 301 if (!isSameSubView(sva.getViewSource(), svb.getViewSource())) 302 return false; 303 if (sva.getType() != svb.getType()) 304 return false; 305 if (sva.getRank() != svb.getRank()) 306 return false; 307 if (sva.getNumOperands() != svb.getNumOperands()) 308 return false; 309 if (sva.static_offsets() != svb.static_offsets()) 310 return false; 311 if (sva.static_sizes() != svb.static_sizes()) 312 return false; 313 if (sva.static_strides() != svb.static_strides()) 314 return false; 315 /// Skip the "viewSource" operand. 316 for (unsigned idx = 1, e = sva.getNumOperands(); idx != e; ++idx) 317 if (sva.getOperand(idx) != svb.getOperand(idx)) 318 return false; 319 return true; 320 } 321 322 static Optional<FusionInfo> 323 fuseProducerOfDep(OpBuilder &b, LinalgOp consumer, unsigned consumerIdx, 324 const LinalgDependenceGraph &graph, OperationFolder *folder, 325 LinalgDependenceGraph::DependenceType depType) { 326 assert(consumer.hasBufferSemantics() && 327 "expected linalg op with buffer semantics"); 328 LLVM_DEBUG(dbgs() << "\nStart examining consumer: " 329 << *consumer.getOperation()); 330 for (auto dependence : graph.getDependencesInto(consumer, depType)) { 331 LLVM_DEBUG(dbgs() << "\n***Consider producer:\t" 332 << *dependence.dependentOpView.op << "\n"); 333 auto producer = cast<LinalgOp>(dependence.dependentOpView.op); 334 335 // Check that the dependence is indeed on the input `consumerIdx` view. 336 auto consumedView = dependence.indexingView; 337 if (!isSameSubView(consumer.getBuffer(consumerIdx), consumedView)) 338 continue; 339 340 // Consumer consumes this view, `isStructurallyFusableProducer` also checks 341 // whether it is a strict subview of the producer view. 342 auto producedView = dependence.dependentOpView.view; 343 auto producerIdx = producer.getIndexOfOutputBuffer(producedView).getValue(); 344 // `consumerIdx` and `producerIdx` exist by construction. 345 LLVM_DEBUG(dbgs() << "\n" 346 << LinalgDependenceGraph::getDependenceTypeStr(depType) 347 << "producer: " << *producer.getOperation() << " view: " 348 << producedView << " output index: " << producerIdx); 349 350 // Must be a subview or a slice to guarantee there are loops we can fuse 351 // into. 352 auto subView = consumedView.getDefiningOp<SubViewOp>(); 353 auto slice = consumedView.getDefiningOp<SliceOp>(); 354 if (!subView && !slice) { 355 LLVM_DEBUG(dbgs() << "\nNot fusable (not a subview or slice)"); 356 continue; 357 } 358 359 // Simple fusability checks. 360 if (!isFusableInto(graph, consumer, consumedView, producer)) 361 continue; 362 363 // Fuse `producer` just before `consumer`. 364 OpBuilder::InsertionGuard g(b); 365 b.setInsertionPoint(consumer.getOperation()); 366 ScopedContext scope(b, consumer.getLoc()); 367 LLVM_DEBUG(dbgs() << "Fuse into consumer: " << *consumer << "\n"); 368 auto fusedProducer = fuse(producedView, producer, consumer, consumerIdx, 369 producerIdx, folder); 370 371 return FusionInfo{producer, fusedProducer}; 372 } 373 return llvm::None; 374 } 375 376 // Only consider RAW and WAW atm. 377 Optional<FusionInfo> mlir::linalg::fuseProducerOf( 378 OpBuilder &b, LinalgOp consumer, unsigned consumerIdx, 379 const LinalgDependenceGraph &graph, OperationFolder *folder) { 380 SmallVector<LinalgDependenceGraph::DependenceType, 4> deps = { 381 LinalgDependenceGraph::DependenceType::RAW, 382 LinalgDependenceGraph::DependenceType::WAW, 383 }; 384 for (auto dep : deps) { 385 if (auto res = 386 fuseProducerOfDep(b, consumer, consumerIdx, graph, folder, dep)) 387 return res; 388 } 389 return llvm::None; 390 } 391 392 static void fuseLinalgOpsGreedily(FuncOp f) { 393 LLVM_DEBUG(f.print(dbgs() << "\nBefore linalg-fusion: \n")); 394 395 OpBuilder b(f); 396 OperationFolder folder(f.getContext()); 397 DenseSet<Operation *> eraseSet; 398 399 // Save original Linalg ops, we only want to make a pass over those. 400 SmallVector<Operation *, 8> linalgOps; 401 f.walk([&](LinalgOp op) { 402 if (op.hasBufferSemantics()) 403 linalgOps.push_back(op); 404 }); 405 406 // TODO: LinalgDependenceGraph should be able to update itself. 407 // The current naive and expensive reconstruction of the graph should be 408 // removed. 409 for (auto *op : llvm::reverse(linalgOps)) { 410 for (unsigned id = 0, e = LinalgOp(op).getNumInputsAndOutputBuffers(); 411 id < e; ++id) { 412 linalg::Aliases aliases; 413 linalg::LinalgDependenceGraph graph(aliases, linalgOps); 414 if (auto info = fuseProducerOf(b, op, id, graph, &folder)) { 415 auto *originalOp = info->originalProducer.getOperation(); 416 eraseSet.insert(originalOp); 417 auto *originalOpInLinalgOpsVector = 418 std::find(linalgOps.begin(), linalgOps.end(), originalOp); 419 *originalOpInLinalgOpsVector = info->fusedProducer.getOperation(); 420 } 421 } 422 } 423 // The `fuseProducerOf` function performs structural checks and in particular 424 // that no covering read or write exist between the consumer and the producer. 425 // As a consequence, the only fusions that may occur preserve subsequent 426 // dependences and are guaranteed by construction to produce the whole view. 427 // We may thus erase the producer once it is fused. 428 for (auto *e : eraseSet) 429 e->erase(); 430 LLVM_DEBUG(f.print(dbgs() << "\nAfter linalg-fusion: \n")); 431 } 432 433 //====---------------------------------------------------------------------===// 434 // Fusion on Tensor operation. 435 //====---------------------------------------------------------------------===// 436 437 namespace { 438 439 /// Implementation of fusion of generic ops and indexed_generic ops. 440 struct FuseGenericOpsOnTensors { 441 static bool isFusible(LinalgOp producer, LinalgOp consumer, 442 unsigned consumerIdx) { 443 // Verify that 444 // - the producer has all "parallel" iterator type. 445 if (producer.getNumParallelLoops() != producer.getNumLoops()) 446 return false; 447 448 // Get the consumer index map. The number of results of the consumer index 449 // map must match the number of loops of the producer. 450 AffineMap consumerIndexMap = consumer.getIndexingMap(consumerIdx); 451 if (consumerIndexMap.getNumResults() != producer.getNumLoops()) 452 return false; 453 454 // Finally the index_map for the result must be invertible. For now just 455 // verify it is a permutation. 456 AffineMap producerResultIndexMap = producer.getOutputIndexingMap(0); 457 return producerResultIndexMap.isPermutation(); 458 } 459 460 static Operation *fuse(LinalgOp producer, LinalgOp consumer, 461 unsigned consumerIdx, PatternRewriter &rewriter, 462 OperationFolder *folder = nullptr) { 463 if (!isFusible(producer, consumer, consumerIdx)) 464 return nullptr; 465 466 unsigned numFusedOperands = producer.getOperation()->getNumOperands() + 467 consumer.getOperation()->getNumOperands() - 1; 468 469 // Compute the fused operands list, 470 SmallVector<Value, 2> fusedOperands; 471 fusedOperands.reserve(numFusedOperands); 472 auto consumerOperands = consumer.getOperation()->getOperands(); 473 auto producerOperands = producer.getOperation()->getOperands(); 474 fusedOperands.assign(consumerOperands.begin(), 475 std::next(consumerOperands.begin(), consumerIdx)); 476 fusedOperands.append(producerOperands.begin(), producerOperands.end()); 477 fusedOperands.append(std::next(consumerOperands.begin(), consumerIdx + 1), 478 consumerOperands.end()); 479 480 // Compute indexing_maps for the fused operation. The indexing_maps for the 481 // operands of the consumers that arent fused are the same. The 482 // indexing_maps for the producers need to be computed based on the 483 // indexing_map of the operand at consumerIdx in the consumer. 484 SmallVector<Attribute, 4> fusedIndexMaps; 485 auto consumerIndexMaps = consumer.indexing_maps(); 486 fusedIndexMaps.reserve(fusedOperands.size() + 487 consumer.getOperation()->getNumResults()); 488 fusedIndexMaps.assign(consumerIndexMaps.begin(), 489 std::next(consumerIndexMaps.begin(), consumerIdx)); 490 // Compute indexing maps for the producer args in the fused operation. 491 computeProducerOperandIndex( 492 producer, consumer.getInputIndexingMap(consumerIdx), fusedIndexMaps); 493 494 // Append the indexing maps for the remaining consumer operands. 495 fusedIndexMaps.append(std::next(consumerIndexMaps.begin(), consumerIdx + 1), 496 consumerIndexMaps.end()); 497 498 // Generate the fused op. 499 LinalgOp fusedOp; 500 if (isa<GenericOp>(producer.getOperation()) && 501 isa<GenericOp>(consumer.getOperation())) { 502 fusedOp = 503 rewriter 504 .create<GenericOp>( 505 rewriter.getUnknownLoc(), 506 consumer.getOperation()->getResultTypes(), fusedOperands, 507 rewriter.getI64IntegerAttr(fusedOperands.size()), 508 rewriter.getI64IntegerAttr( 509 consumer.getOperation()->getNumResults()), 510 rewriter.getArrayAttr(fusedIndexMaps), 511 consumer.iterator_types(), 512 /*doc=*/nullptr, 513 /*library_call=*/nullptr) 514 .getOperation(); 515 } else { 516 fusedOp = 517 rewriter 518 .create<IndexedGenericOp>( 519 rewriter.getUnknownLoc(), 520 consumer.getOperation()->getResultTypes(), fusedOperands, 521 rewriter.getI64IntegerAttr(fusedOperands.size()), 522 rewriter.getI64IntegerAttr( 523 consumer.getOperation()->getNumResults()), 524 rewriter.getArrayAttr(fusedIndexMaps), 525 consumer.iterator_types(), 526 /*doc=*/nullptr, 527 /*library_call=*/nullptr) 528 .getOperation(); 529 } 530 531 // Construct an AffineMap from consumer loops to producer loops. 532 // consumer loop -> tensor index 533 AffineMap consumerResultIndexMap = 534 consumer.getInputIndexingMap(consumerIdx); 535 // producer loop -> tensor index 536 AffineMap producerResultIndexMap = producer.getOutputIndexingMap(0); 537 // tensor index -> producer loop 538 AffineMap invProducerResultIndexMap = 539 inversePermutation(producerResultIndexMap); 540 assert(invProducerResultIndexMap && 541 "expected producer result indexig map to be invertible"); 542 // consumer loop -> producer loop 543 AffineMap consumerToProducerLoopsMap = 544 invProducerResultIndexMap.compose(consumerResultIndexMap); 545 546 generateFusedRegion(rewriter, fusedOp, producer, consumer, 547 consumerToProducerLoopsMap, consumerIdx, 548 consumer.getNumLoops()); 549 return fusedOp; 550 } 551 552 private: 553 /// Append to `fusedOpIndexingMapAttrs` the indexing maps for the operands of 554 /// the `producer` to use in the fused operation given the indexing map of the 555 /// result of the producer in the consumer. 556 static void computeProducerOperandIndex( 557 LinalgOp producer, AffineMap fusedConsumerArgIndexMap, 558 SmallVectorImpl<Attribute> &fusedOpIndexingMapAttrs) { 559 // The indexing map in the consumer op (fusedConsumerArgIndexMap) is a map 560 // from consumer loop -> consumer arg tensor index/producer result tensor 561 // index. The fused loop is same as the consumer loop. For each producer arg 562 // the indexing map to be computed is a map from consumer loop -> producer 563 // arg tensor index. 564 565 AffineMap producerResultIndexMap = producer.getOutputIndexingMap(0); 566 // producerResultIndexMap is a map from producer loop -> tensor index. 567 // Compute the inverse to get map from tensor index -> producer loop. 568 // The inverse is a map from producer result tensor index -> producer loop. 569 AffineMap invProducerResultIndexMap = 570 inversePermutation(producerResultIndexMap); 571 assert(invProducerResultIndexMap && 572 "expected producer result indexig map to be invertible"); 573 for (unsigned argNum : llvm::seq<unsigned>(0, producer.getNumInputs())) { 574 // argMap is a map from producer loop -> producer arg tensor index. 575 AffineMap argMap = producer.getInputIndexingMap(argNum); 576 577 // Compose argMap with invProducerResultIndexMap to get a map from 578 // producer result tensor index -> producer arg tensor index. 579 AffineMap t1 = argMap.compose(invProducerResultIndexMap); 580 581 // Compose t1 with fusedConsumerArgIndexMap gives an indexing map from 582 // consumer loop/ fused loop -> producer arg tensor index. 583 AffineMap indexingMap = t1.compose(fusedConsumerArgIndexMap); 584 fusedOpIndexingMapAttrs.push_back(AffineMapAttr::get(indexingMap)); 585 } 586 } 587 588 /// Generate the region of the fused operation. The region of the fused op 589 /// must be empty. 590 static void generateFusedRegion(PatternRewriter &rewriter, Operation *fusedOp, 591 LinalgOp producer, LinalgOp consumer, 592 AffineMap consumerToProducerLoopsMap, 593 unsigned consumerIdx, unsigned nloops) { 594 // Build the region of the fused op. 595 Block &producerBlock = producer.getOperation()->getRegion(0).front(); 596 Block &consumerBlock = consumer.getOperation()->getRegion(0).front(); 597 Block *fusedBlock = new Block(); 598 fusedOp->getRegion(0).push_back(fusedBlock); 599 BlockAndValueMapping mapper; 600 OpBuilder::InsertionGuard guard(rewriter); 601 rewriter.setInsertionPointToStart(fusedBlock); 602 603 // The block arguments are 604 // [index_0, index_1, ... , 605 // consumer_operand_0, ... , consumer_operand_(`consumerIdx`-1), 606 // producer_operand_0, ... , producer_operand_(n-1)], 607 // consumer_operand_(`consumerIdx`), .. consumer_operand_(m-1)] 608 // , where n is the number of producer's operand and m is the number 609 // consumer's operand. 610 // If both `numProducerIndices` and `numConsumerIndices` are zero, this is a 611 // generic op. In this case, there are no indices in block arguments. 612 unsigned numProducerIndices = 613 isa<IndexedGenericOp>(producer.getOperation()) ? nloops : 0; 614 unsigned numConsumerIndices = 615 isa<IndexedGenericOp>(consumer.getOperation()) ? nloops : 0; 616 // Firstly, add all the indices to the block arguments. 617 for (unsigned i = 0, e = std::max(numProducerIndices, numConsumerIndices); 618 i < e; ++i) 619 fusedBlock->addArgument(rewriter.getIndexType()); 620 // Map the arguments for the unmodified args from the consumer. 621 for (auto consumerArg : llvm::enumerate(consumerBlock.getArguments())) { 622 if (consumerArg.index() == consumerIdx + numConsumerIndices) { 623 // Map the arguments for the args from the producer. 624 for (auto producerArg : llvm::enumerate(producerBlock.getArguments())) { 625 // If producer is an indexed_generic op, map the indices from consumer 626 // loop to producer loop (because the fusedOp is built based on 627 // consumer's perspective). 628 if (producerArg.index() < numProducerIndices) { 629 auto newIndex = rewriter.create<mlir::AffineApplyOp>( 630 producer.getLoc(), 631 consumerToProducerLoopsMap.getSubMap(producerArg.index()), 632 fusedBlock->getArguments().take_front(nloops)); 633 mapper.map(producerArg.value(), newIndex); 634 } else { 635 mapper.map(producerArg.value(), 636 fusedBlock->addArgument(producerArg.value().getType())); 637 } 638 } 639 continue; 640 } 641 642 // If consumer is an indexed_generic op, map the indices to the block 643 // arguments directly. Otherwise, add the same type of arugment and map to 644 // it. 645 if (consumerArg.index() < numConsumerIndices) { 646 mapper.map(consumerArg.value(), 647 fusedBlock->getArgument(consumerArg.index())); 648 } else { 649 mapper.map(consumerArg.value(), 650 fusedBlock->addArgument(consumerArg.value().getType())); 651 } 652 } 653 654 // Add operations from producer (except the yield operation) to the fused 655 // op. 656 for (auto &op : producerBlock.getOperations()) { 657 if (auto yieldOp = dyn_cast<YieldOp>(op)) { 658 // Lookup the value the yield operation is mapped to. 659 Value yieldVal = yieldOp.getOperand(0); 660 if (Value clonedVal = mapper.lookupOrNull(yieldVal)) 661 mapper.map( 662 consumerBlock.getArgument(consumerIdx + numConsumerIndices), 663 clonedVal); 664 continue; 665 } 666 rewriter.clone(op, mapper); 667 } 668 for (auto &op : consumerBlock.getOperations()) 669 rewriter.clone(op, mapper); 670 } 671 }; 672 } // namespace 673 674 /// Linearize the expressions in `sourceMap` based on the `reassociationMaps` 675 /// provided, given the shape of the source tensor that corresponds to the 676 /// `sourceMap`. Note that this implicitly assumes that the tensors dimensions 677 /// are "row-major" ordered logically. 678 /// 679 /// For example: 680 /// 681 /// %0 = op ... : tensor<?x?x4x5xf32> 682 /// with output index_map `affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>` 683 /// 684 /// and reshape: 685 /// %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>, 686 /// affine_map<(i, j, k, l) -> (j, k, l)>] : 687 /// tensor<?x?x4x5xf32> into tensor<?x?xf32> 688 /// 689 /// would be rewritten into: 690 /// %0 = op ... : tensor<?x?x4x5xf32> 691 /// with output index_map 692 /// `affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>` 693 static AffineMap linearizeCollapsedDims(AffineMap sourceMap, 694 ArrayRef<int64_t> sourceShape, 695 ArrayRef<AffineMap> reassociationMaps) { 696 SmallVector<AffineExpr, 4> resultExprs; 697 resultExprs.reserve(reassociationMaps.size()); 698 ArrayRef<AffineExpr> sourceExprs = sourceMap.getResults(); 699 MLIRContext *context = sourceMap.getContext(); 700 701 // Compute the result exprs based on the reassociation maps. 702 for (AffineMap map : reassociationMaps) { 703 ArrayRef<AffineExpr> collapsedDims = map.getResults(); 704 // Assume that they are in-order and contiguous (already checked in 705 // verifier). 706 assert(!collapsedDims.empty()); 707 unsigned startDim = 708 collapsedDims.front().cast<AffineDimExpr>().getPosition(); 709 AffineExpr linearizedExpr = makeCanonicalStridedLayoutExpr( 710 sourceShape.slice(startDim, collapsedDims.size()), 711 sourceExprs.slice(startDim, collapsedDims.size()), context); 712 resultExprs.push_back(linearizedExpr); 713 } 714 return AffineMap::get(sourceMap.getNumDims(), sourceMap.getNumSymbols(), 715 resultExprs, context); 716 } 717 718 /// Checks if the `reshapeOp` can be fused with it consumer (if `asProducer` is 719 /// true) or its producer (if `asProducer` is false) given the indexing map at 720 /// its use. 721 static bool isTensorReshapeOpFusible(TensorReshapeOp reshapeOp, 722 AffineMap useIndexMap, bool asProducer) { 723 RankedTensorType returnType = reshapeOp.getResultType(); 724 RankedTensorType operandType = reshapeOp.getSrcType(); 725 // Reshape is fusible with its consumer (i.e. reshape as a producer) when its 726 // operand is of lesser rank than the result. Fusing when operand has higher 727 // rank will require use of mods and divs in the indexing maps of the fused op 728 // which would make it non-invertible. Similarly reshape is fused with its 729 // producer (i.e. reshape as consumer) only if the return type has lesser 730 // rank. 731 if ((asProducer && returnType.getRank() < operandType.getRank()) || 732 (!asProducer && operandType.getRank() < returnType.getRank())) 733 return false; 734 return useIndexMap.isIdentity(); 735 } 736 737 namespace { 738 /// Implementation of fusion on tensor ops when producer is a TensorReshapeOp. 739 template <typename LinalgOpTy> struct FuseTensorReshapeOpAsProducer { 740 static bool isFusible(TensorReshapeOp producer, LinalgOpTy consumer, 741 unsigned consumerIdx) { 742 return isTensorReshapeOpFusible( 743 producer, consumer.getInputIndexingMap(consumerIdx), true); 744 } 745 746 static Operation *fuse(TensorReshapeOp producer, LinalgOpTy consumer, 747 unsigned consumerIdx, PatternRewriter &rewriter, 748 OperationFolder *folder = nullptr) { 749 if (!isFusible(producer, consumer, consumerIdx)) 750 return nullptr; 751 752 // Compute the fused operands list, 753 SmallVector<Value, 2> fusedOperands(consumer.operand_begin(), 754 consumer.operand_end()); 755 fusedOperands[consumerIdx] = producer.src(); 756 757 // Compute indexing_maps for the fused operation. The indexing_maps for the 758 // operands of the consumers that arent fused are the same. 759 SmallVector<AffineMap, 4> fusedIndexMaps = 760 llvm::to_vector<4>(llvm::map_range( 761 consumer.indexing_maps(), [](Attribute attr) -> AffineMap { 762 return attr.cast<AffineMapAttr>().getValue(); 763 })); 764 765 // Compute the indexing map to use for the operand of the producer. 766 AffineMap modifiedMap = linearizeCollapsedDims( 767 fusedIndexMaps[consumerIdx], producer.getResultType().getShape(), 768 producer.getReassociationMaps()); 769 for (AffineExpr expr : modifiedMap.getResults()) { 770 if (!expr.isPureAffine()) 771 return nullptr; 772 } 773 fusedIndexMaps[consumerIdx] = modifiedMap; 774 775 // Further check that the resulting index maps can be fused and 776 // inverted. Without this the resultant op is not legal. 777 if (!inversePermutation(concatAffineMaps(fusedIndexMaps))) 778 return nullptr; 779 780 SmallVector<Attribute, 4> indexMapAttrs = llvm::to_vector<4>( 781 llvm::map_range(fusedIndexMaps, [](AffineMap map) -> Attribute { 782 return AffineMapAttr::get(map); 783 })); 784 auto fusedOp = rewriter.create<LinalgOpTy>( 785 rewriter.getUnknownLoc(), consumer.getResultTypes(), fusedOperands, 786 rewriter.getI64IntegerAttr(fusedOperands.size()), 787 rewriter.getI64IntegerAttr(consumer.getNumResults()), 788 rewriter.getArrayAttr(indexMapAttrs), consumer.iterator_types(), 789 /*doc=*/nullptr, 790 /*library_call=*/nullptr); 791 auto &fusedRegion = fusedOp.region(); 792 rewriter.cloneRegionBefore(consumer.region(), fusedRegion, 793 fusedRegion.begin()); 794 return fusedOp; 795 } 796 }; 797 798 /// Implementation of fusion on tensor ops when consumer is a TensorReshapeOp. 799 template <typename LinalgOpTy> struct FuseTensorReshapeOpAsConsumer { 800 static bool isFusible(LinalgOpTy producer, TensorReshapeOp consumer, 801 unsigned consumerIdx) { 802 return isTensorReshapeOpFusible(consumer, producer.getOutputIndexingMap(0), 803 false); 804 } 805 806 static Operation *fuse(LinalgOpTy producer, TensorReshapeOp consumer, 807 unsigned consumerIdx, PatternRewriter &rewriter, 808 OperationFolder *folder = nullptr) { 809 if (!isFusible(producer, consumer, consumerIdx)) 810 return nullptr; 811 812 // The indexing_maps for the operands of the fused operation are same as 813 // those for the operands of the producer. 814 SmallVector<AffineMap, 4> fusedIndexMaps = 815 llvm::to_vector<4>(llvm::map_range( 816 producer.indexing_maps(), [](Attribute attr) -> AffineMap { 817 return attr.cast<AffineMapAttr>().getValue(); 818 })); 819 // Compute the indexing map to use for the operand of the producer. 820 AffineMap modifiedMap = linearizeCollapsedDims( 821 producer.getOutputIndexingMap(0), consumer.getSrcType().getShape(), 822 consumer.getReassociationMaps()); 823 for (AffineExpr expr : modifiedMap.getResults()) { 824 if (!expr.isPureAffine()) 825 return nullptr; 826 } 827 fusedIndexMaps.back() = modifiedMap; 828 829 // Further check that the resulting index maps can be fused and 830 // inverted. Without this the resultant op is not legal. 831 if (!inversePermutation(concatAffineMaps(fusedIndexMaps))) 832 return nullptr; 833 834 SmallVector<Attribute, 4> indexMapAttrs = llvm::to_vector<4>( 835 llvm::map_range(fusedIndexMaps, [](AffineMap map) -> Attribute { 836 return AffineMapAttr::get(map); 837 })); 838 839 auto fusedOp = rewriter.create<LinalgOpTy>( 840 rewriter.getUnknownLoc(), consumer.getResultType(), 841 producer.getOperands(), 842 rewriter.getI64IntegerAttr(producer.getNumOperands()), 843 rewriter.getI64IntegerAttr(1), rewriter.getArrayAttr(indexMapAttrs), 844 producer.iterator_types(), 845 /*doc=*/nullptr, 846 /*library_call=*/nullptr); 847 auto &fusedRegion = fusedOp.region(); 848 rewriter.cloneRegionBefore(producer.region(), fusedRegion, 849 fusedRegion.begin()); 850 return fusedOp; 851 } 852 }; 853 854 /// Implementation of fusion on tensor ops when producer is a splat constant. 855 template <typename LinalgOpTy> struct FuseConstantOpAsProducer { 856 static bool isFusible(ConstantOp producer, LinalgOpTy consumer, 857 unsigned consumerIdx) { 858 return producer.getResult().getType().isa<RankedTensorType>() && 859 producer.value().template cast<DenseElementsAttr>().isSplat(); 860 } 861 862 static Operation *fuse(ConstantOp producer, LinalgOpTy consumer, 863 unsigned consumerIdx, PatternRewriter &rewriter, 864 OperationFolder *folder = nullptr) { 865 if (!isFusible(producer, consumer, consumerIdx)) 866 return nullptr; 867 868 // The indexing_maps for the operands of the fused operation are same as 869 // those for the operands of the consumer without the indexing map at 870 // consumerIdx 871 SmallVector<AffineMap, 4> fusedIndexMaps = 872 llvm::to_vector<4>(llvm::map_range( 873 consumer.indexing_maps(), [](Attribute attr) -> AffineMap { 874 return attr.cast<AffineMapAttr>().getValue(); 875 })); 876 fusedIndexMaps.erase(std::next(fusedIndexMaps.begin(), consumerIdx)); 877 878 // The operands list is same as the consumer with the argument for constant 879 // index dropped. 880 SmallVector<Value, 4> fusedOperands(consumer.operand_begin(), 881 consumer.operand_end()); 882 fusedOperands.erase(std::next(fusedOperands.begin(), consumerIdx)); 883 884 // Create a constant scalar value from the splat constant. 885 Value scalarConstant = rewriter.create<ConstantOp>( 886 producer.getLoc(), 887 producer.value().template cast<DenseElementsAttr>().getSplatValue()); 888 889 auto fusedOp = rewriter.create<LinalgOpTy>( 890 rewriter.getUnknownLoc(), consumer.getResultTypes(), fusedOperands, 891 rewriter.getI64IntegerAttr(consumer.getNumOperands() - 1), 892 rewriter.getI64IntegerAttr(consumer.getNumResults()), 893 rewriter.getAffineMapArrayAttr(fusedIndexMaps), 894 consumer.iterator_types(), 895 /*doc=*/nullptr, 896 /*library_call=*/nullptr); 897 898 // Map the block argument corresponding to the replaced argument with the 899 // scalar constant. 900 Region &consumerRegion = consumer.region(); 901 Block &entryBlock = *consumerRegion.begin(); 902 unsigned argIndex = 903 entryBlock.getNumArguments() - consumer.getNumOperands() + consumerIdx; 904 BlockAndValueMapping mapping; 905 mapping.map(entryBlock.getArgument(argIndex), scalarConstant); 906 Region &fusedRegion = fusedOp.region(); 907 rewriter.cloneRegionBefore(consumerRegion, fusedRegion, fusedRegion.begin(), 908 mapping); 909 return fusedOp; 910 } 911 }; 912 913 } // namespace 914 915 Operation *mlir::linalg::fuseTensorOps(PatternRewriter &rewriter, 916 Operation *consumer, 917 unsigned consumerIdx, 918 OperationFolder *folder) { 919 if (consumerIdx >= consumer->getNumOperands()) 920 return nullptr; 921 Operation *producer = consumer->getOperand(consumerIdx).getDefiningOp(); 922 if (!producer || producer->getNumResults() != 1) 923 return nullptr; 924 925 // Fuse when consumer is GenericOp or IndexedGenericOp. 926 if (isa<GenericOp, IndexedGenericOp>(consumer)) { 927 auto linalgOpConsumer = cast<LinalgOp>(consumer); 928 if (!linalgOpConsumer.hasTensorSemantics()) 929 return nullptr; 930 if (isa<GenericOp, IndexedGenericOp>(producer)) { 931 auto linalgOpProducer = cast<LinalgOp>(producer); 932 if (linalgOpProducer.hasTensorSemantics()) 933 return FuseGenericOpsOnTensors::fuse(linalgOpProducer, linalgOpConsumer, 934 consumerIdx, rewriter, folder); 935 } else if (auto reshapeOpProducer = dyn_cast<TensorReshapeOp>(producer)) { 936 if (auto genericOpConsumer = dyn_cast<GenericOp>(consumer)) { 937 return FuseTensorReshapeOpAsProducer<GenericOp>::fuse( 938 reshapeOpProducer, genericOpConsumer, consumerIdx, rewriter, 939 folder); 940 } else if (auto indexedGenericOpConsumer = 941 dyn_cast<IndexedGenericOp>(consumer)) { 942 return FuseTensorReshapeOpAsProducer<IndexedGenericOp>::fuse( 943 reshapeOpProducer, indexedGenericOpConsumer, consumerIdx, rewriter, 944 folder); 945 } 946 } else if (auto constantOpProducer = dyn_cast<ConstantOp>(producer)) { 947 if (auto genericOpConsumer = dyn_cast<GenericOp>(consumer)) { 948 return FuseConstantOpAsProducer<GenericOp>::fuse( 949 constantOpProducer, genericOpConsumer, consumerIdx, rewriter, 950 folder); 951 } 952 } 953 return nullptr; 954 } 955 956 // Fuse when consumer is a TensorReshapeOp. 957 if (TensorReshapeOp reshapeOp = dyn_cast<TensorReshapeOp>(consumer)) { 958 if (auto genericOpProducer = dyn_cast<GenericOp>(producer)) { 959 if (genericOpProducer.hasTensorSemantics()) 960 return FuseTensorReshapeOpAsConsumer<GenericOp>::fuse( 961 genericOpProducer, reshapeOp, consumerIdx, rewriter, folder); 962 } else if (auto indexedGenericOpProducer = 963 dyn_cast<IndexedGenericOp>(producer)) { 964 if (indexedGenericOpProducer.hasTensorSemantics()) 965 return FuseTensorReshapeOpAsConsumer<IndexedGenericOp>::fuse( 966 indexedGenericOpProducer, reshapeOp, consumerIdx, rewriter, folder); 967 } 968 return nullptr; 969 } 970 971 return nullptr; 972 } 973 974 namespace { 975 /// Patterns to fuse a generic op, with the producer of its operands. 976 template <typename LinalgOpTy> 977 struct FuseTensorOps : public OpRewritePattern<LinalgOpTy> { 978 using OpRewritePattern<LinalgOpTy>::OpRewritePattern; 979 980 LogicalResult matchAndRewrite(LinalgOpTy op, 981 PatternRewriter &rewriter) const override { 982 // Find the first operand that is defined by another generic op on tensors. 983 for (auto operandNum : 984 llvm::seq<unsigned>(0, op.getOperation()->getNumOperands())) { 985 Operation *producer = 986 op.getOperation()->getOperand(operandNum).getDefiningOp(); 987 if (Operation *fusedOp = fuseTensorOps(rewriter, op, operandNum)) { 988 rewriter.replaceOp(op, fusedOp->getResults()); 989 if (producer && llvm::all_of(producer->getResults(), 990 [](Value val) { return val.use_empty(); })) 991 rewriter.eraseOp(producer); 992 return success(); 993 } 994 } 995 return failure(); 996 } 997 }; 998 999 /// Pass that fuses generic ops on tensors. Used only for testing. 1000 struct FusionOfTensorOpsPass 1001 : public LinalgFusionOfTensorOpsBase<FusionOfTensorOpsPass> { 1002 void runOnOperation() override { 1003 OwningRewritePatternList patterns; 1004 Operation *op = getOperation(); 1005 populateLinalgTensorOpsFusionPatterns(op->getContext(), patterns); 1006 applyPatternsAndFoldGreedily(op->getRegions(), patterns); 1007 }; 1008 }; 1009 1010 struct LinalgFusionPass : public LinalgFusionBase<LinalgFusionPass> { 1011 void runOnFunction() override { fuseLinalgOpsGreedily(getFunction()); } 1012 }; 1013 } // namespace 1014 1015 void mlir::populateLinalgTensorOpsFusionPatterns( 1016 MLIRContext *context, OwningRewritePatternList &patterns) { 1017 patterns.insert<FuseTensorOps<GenericOp>, FuseTensorOps<IndexedGenericOp>, 1018 FuseTensorOps<TensorReshapeOp>>(context); 1019 } 1020 1021 std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgFusionPass() { 1022 return std::make_unique<LinalgFusionPass>(); 1023 } 1024 1025 std::unique_ptr<Pass> mlir::createLinalgFusionOfTensorOpsPass() { 1026 return std::make_unique<FusionOfTensorOpsPass>(); 1027 } 1028