xref: /llvm-project/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp (revision a2a4bc561ddf61bd5104674072c79fede3380ab1)
1 //===- Fusion.cpp - Implementation of linalg Fusion -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Fusion pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PassDetail.h"
14 #include "mlir/Dialect/Affine/IR/AffineOps.h"
15 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
16 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
17 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
18 #include "mlir/Dialect/Linalg/Passes.h"
19 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
20 #include "mlir/Dialect/Linalg/Utils/Utils.h"
21 #include "mlir/Dialect/MemRef/IR/MemRef.h"
22 #include "mlir/Dialect/Tensor/IR/Tensor.h"
23 #include "mlir/IR/AffineExpr.h"
24 #include "mlir/IR/AffineMap.h"
25 #include "mlir/IR/Dominance.h"
26 #include "mlir/Support/LLVM.h"
27 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
28 #include "mlir/Transforms/RegionUtils.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/ScopeExit.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 
34 #include <set>
35 
36 #define DEBUG_TYPE "linalg-fusion"
37 
38 using namespace mlir;
39 using namespace mlir::linalg;
40 
41 using llvm::dbgs;
42 
43 /// Implements a simple high-level fusion pass on linalg structured operations.
44 ///
45 /// In each block, linalg ops are processed in reverse textual order.
46 /// Given a linalg op `O`, fusion occurs by:
47 ///   1. inspecting the linalg ops that write into the views read by `O`. There
48 ///      are 2 cases:
49 ///      a) buffer case: use the SSA value of the views and a simple alias
50 ///         analysis on subview ops to determine producer-consumer dependences;
51 ///      b) tensor case: use SSA use-def chains on extract_slice ops;
52 ///   2. greedily fuse the linalg ops that produce the subview/extract_slice.
53 ///   3. inspect the fused ops and determine whether they have other remaining
54 ///      LinalgOp uses. If not, then erase the original producing linalg op.
55 ///
56 /// More advanced use cases, analyses as well as profitability heuristics are
57 /// left for future work.
58 
59 struct ShapeDimension {
60   Value shape;
61   unsigned dimension;
62 };
63 
64 // Given an `op`, returns the first (`shape`, `dimension`) pair that identifies
65 // the loop range at `loopDepth`. The semantics of the loopToOperandRangesMaps
66 // guarantees at least one such dimension is found. If multiple candidates exist
67 // they must agree by construction (i.e. have the same size) and we just return
68 // the first one.
69 static ShapeDimension
70 getShapeDefiningLoopRange(LinalgOp op, unsigned loopDepth,
71                           bool fromSubViewOpOnly = false) {
72   // Iterate over the inputs and outputs in order.
73   // Extract the subranges from the linearized ranges.
74   for (OpOperand *opOperand : op.getInputAndOutputOperands()) {
75     // The method `getRangeFromOperandShape` requires using SubViewOp or
76     // ExtractSliceOps. If the value isn't defined from there continue.
77     // todo: The method should be adapted to get the values from
78     // `ViewInterface`. The interface needs a `getOrCreateRanges` method which
79     // currently returns a `linalg.range`. The fix here is to move this op to
80     // `std` dialect and add the method to `ViewInterface`.
81     if (fromSubViewOpOnly &&
82         !isa_and_nonnull<memref::SubViewOp, tensor::ExtractSliceOp>(
83             opOperand->get().getDefiningOp()))
84       continue;
85 
86     AffineMap map = op.getTiedIndexingMap(opOperand);
87     LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange I/O idx: "
88                             << opOperand->getOperandNumber() << "\n");
89     LLVM_DEBUG(llvm::dbgs()
90                << "getShapeDefiningLoopRange map: " << map << "\n");
91     SmallVector<Value, 8> shapeRanges(map.getNumResults(), nullptr);
92     for (auto en : llvm::enumerate(map.getResults())) {
93       auto dimExpr = en.value().dyn_cast<AffineDimExpr>();
94       if (!dimExpr)
95         continue;
96       if (loopDepth == en.value().cast<AffineDimExpr>().getPosition()) {
97         LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange loopDepth: "
98                                 << loopDepth << "\n");
99         LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange shape: "
100                                 << opOperand->get() << "\n");
101         return ShapeDimension{opOperand->get(),
102                               static_cast<unsigned>(en.index())};
103       }
104     }
105   }
106   llvm_unreachable("Expect to be able to extract a shape defining loop range");
107 }
108 
109 // Return tiled operands for the fused producer op. When fusing into
110 // `linalg.tiled_loop` one has to update `input` and `output` arguments of the
111 // loop correspondingly.
112 // Each input tensor of the producer op has to be added to `inputs` of the
113 // `tiled_loop` if it is not present there already. Each output tensor has to
114 // be added either to `inputs` or to `outputs` of `linalg.tiled_loop` depending
115 // on whether the correponding result is an input or an output to the loop.
116 //
117 // NOTE: This way of updating the arguments of the `tiled_loop` assumes that the
118 // intermediate result is not used by any other operation but the consumer. A
119 // more generic way is to append all missing output tensors of the producer to
120 // the tiled loop outputs and hence modify the number of the results, since we
121 // would need to add the intermediate results to `linalg.yield`. After that a
122 // canonicalization pass would move the unused output args of the `tiled_loop`
123 // to the `input` section.
124 static SmallVector<Value> getTiledOperands(OpBuilder &b, LinalgOp producer) {
125   auto tiledLoop = dyn_cast<TiledLoopOp>(b.getBlock()->getParentOp());
126   if (!tiledLoop)
127     return producer.getInputAndOutputOperands();
128 
129   SmallVector<Value> tiledOperands;
130   assert(producer.hasTensorSemantics() &&
131          "only fusion on tensors is currently supported for TiledLinalgOp");
132 
133   for (OpOperand *producerInput : producer.getInputOperands()) {
134     OpOperand *addedInput = tiledLoop.findInputOperand(producerInput->get());
135     if (addedInput == nullptr)
136       addedInput = &tiledLoop.appendInputOperand(b, producerInput->get());
137     BlockArgument addedBlockArg = tiledLoop.getTiedBlockArgument(*addedInput);
138     tiledOperands.push_back(addedBlockArg);
139   }
140   for (OpOperand *producerOutput : producer.getOutputOperands()) {
141     OpResult result = producer.getTiedOpResult(producerOutput);
142     OpOperand *resultInputOperand = tiledLoop.findInputOperand(result);
143     OpOperand *resultOutputOperand = tiledLoop.findOutputOperand(result);
144     assert((resultInputOperand != nullptr) ^ (resultOutputOperand != nullptr) &&
145            "The result should be present in `input` or `output` args of "
146            "`tiled_loop");
147 
148     bool isInput = resultInputOperand;
149     int opNumber = isInput ? resultInputOperand->getOperandNumber()
150                            : resultOutputOperand->getOperandNumber();
151 
152     OpOperand *addedOutput = tiledLoop.findOutputOperand(producerOutput->get());
153     if (addedOutput == nullptr)
154       addedOutput =
155           isInput ? &tiledLoop.appendInputOperand(b, producerOutput->get())
156                   : &tiledLoop.appendOutputOperand(b, producerOutput->get());
157 
158     OpOperand &resultOperand = tiledLoop->getOpOperand(opNumber);
159     auto addedBlockArg = tiledLoop.getTiedBlockArgument(*addedOutput);
160     auto resultOperandBlockArg = tiledLoop.getTiedBlockArgument(resultOperand);
161     resultOperandBlockArg.replaceAllUsesWith(addedBlockArg);
162     tiledLoop.eraseOperand(b, resultOperand);
163     tiledOperands.push_back(addedBlockArg);
164   }
165   return tiledOperands;
166 }
167 
168 /// Fuses the producer by cloning the `producer`. The `fusedLoopsAndRanges`
169 /// provides the loop range information for the fused loops. The rest are
170 /// obtained from the producer itself, since they are not tiled + fused.
171 static LinalgOp fuse(OpBuilder &b, LinalgOp producer,
172                      const DenseMap<unsigned, Range> &fusedLoopsAndRanges) {
173   SmallVector<Value, 8> ivs, tileSizes, sizeBounds;
174   SmallVector<Range, 8> loopRanges;
175   Location loc = producer.getLoc();
176   auto zero = b.create<ConstantIndexOp>(loc, 0);
177   auto one = b.create<ConstantIndexOp>(loc, 1);
178 
179   for (unsigned i = 0, e = producer.getNumLoops(); i < e; ++i) {
180     auto it = fusedLoopsAndRanges.find(i);
181     if (it != fusedLoopsAndRanges.end()) {
182       ivs.push_back(it->second.offset);
183       tileSizes.push_back(it->second.size);
184       sizeBounds.push_back(nullptr);
185       loopRanges.push_back(it->second);
186       LLVM_DEBUG(llvm::dbgs() << "tiled loop#" << i << " with LoopRange "
187                               << loopRanges.back() << "\n");
188     } else {
189       auto shapeDim = getShapeDefiningLoopRange(producer, i);
190       Value dim = b.createOrFold<memref::DimOp>(loc, shapeDim.shape,
191                                                 shapeDim.dimension);
192       tileSizes.push_back(zero);
193       sizeBounds.push_back(dim);
194       loopRanges.push_back(Range{zero, dim, one});
195       LLVM_DEBUG(llvm::dbgs() << "full loop#" << i << " with LoopRange "
196                               << loopRanges.back() << "\n");
197     }
198   }
199 
200   SmallVector<Value, 8> clonedShapes;
201   clonedShapes.reserve(producer.getNumInputsAndOutputs());
202 
203   // Compute subranges for all tensor input/output operands.
204   clonedShapes.append(makeTiledShapes(b, loc, producer,
205                                       getTiledOperands(b, producer), ivs,
206                                       tileSizes, sizeBounds));
207 
208   // Iterate over the results in order.
209   // Extract the subtensor type from the linearized range.
210   // Since we do not enforce any canonicalizations on the fly, this is always
211   // fully dynamic at construction time.
212   SmallVector<Type, 4> resultTypes;
213   resultTypes.reserve(producer->getNumResults());
214   for (RankedTensorType t : producer.getOutputTensorTypes()) {
215     unsigned rank = t.getRank();
216     SmallVector<int64_t, 4> staticOffsetsVector(
217         rank, ShapedType::kDynamicStrideOrOffset);
218     SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamicSize);
219     SmallVector<int64_t, 4> staticStridesVector(
220         rank, ShapedType::kDynamicStrideOrOffset);
221     resultTypes.push_back(tensor::ExtractSliceOp::inferResultType(
222         t.cast<RankedTensorType>(), staticOffsetsVector, staticSizesVector,
223         staticStridesVector));
224   }
225 
226   Operation *clonedOp = producer.clone(b, loc, resultTypes, clonedShapes);
227   // When the producer has index semantics, we have to transform the indices of
228   // the producer according to the tiling of the consumer, i.e. offset them by
229   // the values computed in `loopRanges`.
230   if (producer.hasIndexSemantics()) {
231     assert(clonedOp->getNumRegions() == 1 &&
232            clonedOp->getRegion(0).getBlocks().size() == 1 &&
233            "expected producer to have one block.");
234     // Shift all indices by the tile offset.
235     Block &block = clonedOp->getRegion(0).front();
236     for (IndexOp indexOp : block.getOps<IndexOp>()) {
237       OpBuilder::InsertionGuard g(b);
238       b.setInsertionPointAfter(indexOp);
239       AffineExpr index, offset;
240       bindDims(b.getContext(), index, offset);
241       AffineApplyOp applyOp = b.create<AffineApplyOp>(
242           indexOp.getLoc(), index + offset,
243           ValueRange{indexOp.getResult(), loopRanges[indexOp.dim()].offset});
244       indexOp.getResult().replaceAllUsesExcept(applyOp, applyOp);
245     }
246   }
247 
248   return clonedOp;
249 }
250 
251 /// Get the loop range for a dimension `dim` based on the `shapedOperand`. It is
252 /// expected to be defined by a subview op or an extract_slice op.
253 static Range getRangeFromOperandShape(OpBuilder &b, Location loc,
254                                       Value shapedOperand, unsigned dim) {
255   Operation *shapeProducingOp = shapedOperand.getDefiningOp();
256   if (auto subViewOp = dyn_cast<memref::SubViewOp>(shapeProducingOp))
257     return subViewOp.getOrCreateRanges(b, loc)[dim];
258   if (auto sliceOp = dyn_cast<tensor::ExtractSliceOp>(shapeProducingOp))
259     return sliceOp.getOrCreateRanges(b, loc)[dim];
260   llvm_unreachable("SubviewOp or ExtractSliceOp expected");
261 }
262 
263 /// Fuses the producer into the loop immediately enclosing the consumer.
264 /// This is achieved by "recomputing" the producer at the time it
265 /// is needed just before the consumer.
266 static LinalgOp fuse(OpBuilder &b, LinalgOp producerOp, AffineMap producerMap,
267                      OpOperand &consumerOpOperand) {
268   LLVM_DEBUG(llvm::dbgs() << "Producer map: " << producerMap << "\n");
269   DenseMap<unsigned, Range> fusedLoopsAndRanges;
270   Value shapedOperand = consumerOpOperand.get();
271   for (auto en : llvm::enumerate(producerMap.getResults())) {
272     unsigned posInProducerLoop = en.value().cast<AffineDimExpr>().getPosition();
273     fusedLoopsAndRanges[posInProducerLoop] = getRangeFromOperandShape(
274         b, consumerOpOperand.getOwner()->getLoc(), shapedOperand, en.index());
275   }
276   return fuse(b, producerOp, fusedLoopsAndRanges);
277 }
278 
279 // Encode structural fusion safety preconditions.
280 // Some of these will be lifted in the future with better analysis.
281 static bool isStructurallyFusableProducer(LinalgOp producer, Value consumedView,
282                                           LinalgOp consumer) {
283   assert(producer.hasBufferSemantics() &&
284          "expected linalg op with buffer semantics");
285   assert(consumer.hasBufferSemantics() &&
286          "expected linalg op with buffer semantics");
287   if (producer.getNumOutputs() != 1) {
288     LLVM_DEBUG(llvm::dbgs() << "\nNot structurally fusable (multi-output)");
289     return false;
290   }
291   // Only fuse when the producer block dominates.
292   DominanceInfo dom(producer.getOperation());
293   if (!dom.dominates(producer->getBlock(), consumer->getBlock())) {
294     LLVM_DEBUG(
295         llvm::dbgs()
296         << "\nNot structurally fusable (producer block does not dominate)");
297     return false;
298   }
299   return true;
300 }
301 
302 bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
303                                              LinalgOp consumer,
304                                              Value consumedView,
305                                              LinalgOp producer) {
306   assert(producer.hasBufferSemantics() &&
307          "expected linalg op with buffer semantics");
308   assert(consumer.hasBufferSemantics() &&
309          "expected linalg op with buffer semantics");
310   // Make some simple structural checks that alleviate the need for more
311   // complex analyses.
312   if (!isStructurallyFusableProducer(producer, consumedView, consumer)) {
313     LLVM_DEBUG(llvm::dbgs() << "\n***Not static last write due to structure:\t"
314                             << *producer.getOperation());
315     return false;
316   }
317   // Check for any interleaved write to consumedView.
318   if (!graph.findCoveringWrites(producer, consumer, consumedView).empty()) {
319     LLVM_DEBUG(llvm::dbgs() << "\n***Not fusable due to interleaved write:\t"
320                             << *producer.getOperation());
321     return false;
322   }
323   return true;
324 }
325 
326 bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph,
327                                  LinalgOp consumer, Value consumedView,
328                                  LinalgOp producer) {
329   assert(producer.hasBufferSemantics() &&
330          "expected linalg op with buffer semantics");
331   assert(consumer.hasBufferSemantics() &&
332          "expected linalg op with buffer semantics");
333   if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer))
334     return false;
335   // Check for any fusion-preventing dependence to any shape read/written that
336   // would violate dependences.
337   if (!graph.findCoveringDependences(producer, consumer).empty()) {
338     LLVM_DEBUG(llvm::dbgs()
339                << "\n***Not fusable due to an interleaved dependence:\t"
340                << *producer.getOperation());
341     return false;
342   }
343   if (auto convOp = dyn_cast<linalg::ConvOp>(producer.getOperation())) {
344     // TODO: add a level of indirection to linalg.generic.
345     if (convOp.padding())
346       return false;
347   }
348   if (auto convOp = dyn_cast<linalg::ConvOp>(consumer.getOperation())) {
349     // TODO: add a level of indirection to linalg.generic.
350     if (convOp.padding())
351       return false;
352   }
353   return true;
354 }
355 
356 /// For `consumer` with buffer semantics, find the Linalg operation on buffers
357 /// that is the last writer of `consumerOpOperand`. For now the fusable
358 /// dependence is returned as an instance of the `dependenceGraph`.
359 static Optional<LinalgDependenceGraph::LinalgDependenceGraphElem>
360 findFusableProducer(OpOperand &consumerOpOperand,
361                     const LinalgDependenceGraph &dependenceGraph) {
362   LLVM_DEBUG(llvm::dbgs() << "findFusableProducer for: "
363                           << consumerOpOperand.get() << " @"
364                           << consumerOpOperand.getOperandNumber() << " in "
365                           << *consumerOpOperand.getOwner() << "\n");
366   LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner());
367   if (!consumerOp)
368     return {};
369 
370   // Only consider RAW and WAW atm.
371   for (auto depType : {
372            LinalgDependenceGraph::DependenceType::RAW,
373            LinalgDependenceGraph::DependenceType::WAW,
374        }) {
375     LLVM_DEBUG(llvm::dbgs()
376                << "Dependencies into: " << *consumerOp.getOperation() << "\n");
377     for (auto dependence : llvm::make_filter_range(
378              dependenceGraph.getDependencesInto(consumerOp, depType),
379              [&](LinalgDependenceGraph::LinalgDependenceGraphElem elem) {
380                LLVM_DEBUG(llvm::dbgs() << "Inspect dependence btw: "
381                                        << elem.getIndexingValue() << " and "
382                                        << elem.getDependentValue() << "\n");
383                Value v = elem.getIndexingValue();
384                Optional<unsigned> operandNum =
385                    elem.getIndexingOpViewOperandNum();
386                return isa<LinalgOp>(elem.getDependentOp()) &&
387                       v == consumerOpOperand.get() && operandNum &&
388                       operandNum.getValue() ==
389                           consumerOpOperand.getOperandNumber();
390              })) {
391       // Consumer consumes this view, `isStructurallyFusableProducer` also
392       // checks whether it is a strict subview of the producer view.
393       auto producer = cast<LinalgOp>(dependence.getDependentOp());
394       LLVM_DEBUG(llvm::dbgs()
395                  << "\n"
396                  << LinalgDependenceGraph::getDependenceTypeStr(depType)
397                  << "producer: " << *dependence.getDependentOp()
398                  << " view: " << dependence.getDependentValue() << "\n");
399 
400       // If the producer and consumer have tensor semantics, the only dependence
401       // between them is through a RAW dependence and they are fusable by
402       // construction. For buffer semantics need additional checks.
403       if (producer.hasBufferSemantics() && consumerOp.hasBufferSemantics() &&
404           isFusableInto(dependenceGraph, consumerOp, consumerOpOperand.get(),
405                         producer))
406         return dependence;
407       if (producer.hasTensorSemantics() && consumerOp.hasTensorSemantics()) {
408         assert(dependence.dependenceType ==
409                LinalgDependenceGraph::DependenceType::RAW);
410         return dependence;
411       }
412     }
413   }
414   return {};
415 }
416 
417 Optional<FusionInfo>
418 mlir::linalg::fuseProducerOfBuffer(OpBuilder &b, OpOperand &consumerOpOperand,
419                                    const LinalgDependenceGraph &graph) {
420   Optional<LinalgDependenceGraph::LinalgDependenceGraphElem> fusableDependence =
421       findFusableProducer(consumerOpOperand, graph);
422   if (!fusableDependence)
423     return llvm::None;
424 
425   LinalgOp producerOp = dyn_cast<LinalgOp>(fusableDependence->getDependentOp());
426   if (!producerOp)
427     return llvm::None;
428 
429   // If producer is already in the same block as consumer, we are done.
430   if (consumerOpOperand.get().getParentBlock() ==
431       fusableDependence->getDependentValue().getParentBlock())
432     return llvm::None;
433 
434   Optional<AffineMap> producerMap =
435       fusableDependence->getDependentOpViewIndexingMap();
436   if (!producerMap)
437     return llvm::None;
438 
439   // Must be a subview or an extract_slice to guarantee there are loops we can
440   // fuse into.
441   auto subView = consumerOpOperand.get().getDefiningOp<memref::SubViewOp>();
442   if (!subView) {
443     LLVM_DEBUG(llvm::dbgs() << "\nNot fusable (not a subview)");
444     return llvm::None;
445   }
446 
447   // Fuse `producer` just before `consumer`.
448   OpBuilder::InsertionGuard g(b);
449   b.setInsertionPoint(consumerOpOperand.getOwner());
450   LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: "
451                           << *consumerOpOperand.getOwner() << "\n");
452 
453   auto fusedProducer = fuse(b, producerOp, *producerMap, consumerOpOperand);
454   return FusionInfo{producerOp, fusedProducer};
455 }
456 
457 /// Walk back use-def chain through scf::For yields.
458 /// Sets `producer` and `outputIndex` if it finds a producer LinalgOp
459 
460 // TODO(ravishankarm, ntv): This can be moved into the dependence graphs
461 // dependence tracking since the dependence tracking is similar to what is done
462 // w.r.t to buffers.
463 static void getProducerOfTensor(Value tensor, OpResult &opResult) {
464   if (!tensor.getType().isa<RankedTensorType>())
465     return;
466 
467   while (true) {
468     LLVM_DEBUG(llvm::dbgs() << "\ngetProducerOfTensor: " << tensor);
469     if (auto linalgOp = tensor.getDefiningOp<LinalgOp>()) {
470       opResult = tensor.cast<OpResult>();
471       return;
472     }
473     if (auto sliceOp = tensor.getDefiningOp<tensor::ExtractSliceOp>()) {
474       tensor = sliceOp.source();
475       continue;
476     }
477     if (auto blockArg = tensor.dyn_cast<BlockArgument>()) {
478       if (auto forOp = blockArg.getDefiningOp<scf::ForOp>()) {
479         tensor = *(forOp.getIterOperands().begin() + blockArg.getArgNumber());
480         continue;
481       }
482     }
483     return;
484   }
485 }
486 
487 Optional<FusionInfo>
488 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpOperand &consumerOpOperand) {
489   Value inputTensor = consumerOpOperand.get();
490   OpResult producerOpResult;
491   getProducerOfTensor(inputTensor, producerOpResult);
492   if (!producerOpResult) {
493     LLVM_DEBUG(llvm::dbgs() << "\nUnable to find producer");
494     return {};
495   }
496   return fuseProducerOfTensor(b, producerOpResult, consumerOpOperand);
497 }
498 
499 Optional<FusionInfo>
500 mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult,
501                                    OpOperand &consumerOpOperand) {
502   auto producerOp = dyn_cast<LinalgOp>(producerOpResult.getOwner());
503   if (!producerOp)
504     return llvm::None;
505 
506   LinalgOp consumerOp = dyn_cast<LinalgOp>(consumerOpOperand.getOwner());
507   if (!consumerOp)
508     return llvm::None;
509 
510   Value inputTensor = consumerOpOperand.get();
511 
512   // Must be an extract_slice op to guarantee there are loops we can fuse into.
513   auto sliceOp = inputTensor.getDefiningOp<tensor::ExtractSliceOp>();
514   if (!sliceOp) {
515     LLVM_DEBUG(llvm::dbgs()
516                << "\nNot fusable, not an extract_slice op: " << inputTensor);
517     return {};
518   }
519 
520   // If producer is already in the same block as consumer, we are done.
521   if (consumerOpOperand.get().getParentBlock() ==
522       producerOpResult.getParentBlock())
523     return {};
524 
525   // Insert fused `producer` just before `consumer`.
526   OpBuilder::InsertionGuard g(b);
527   b.setInsertionPoint(consumerOp);
528   LLVM_DEBUG(llvm::dbgs() << "Fuse into consumer: " << *consumerOp << "\n");
529   OpOperand *opOperand =
530       producerOp.getOutputOperand(producerOpResult.getResultNumber());
531   LinalgOp fusedProducer =
532       fuse(b, producerOp, producerOp.getTiedIndexingMap(opOperand),
533            consumerOpOperand);
534 
535   // Replace use.
536   // Canonicalizations are not guaranteed to have happened before constructing
537   // `fusedProducer`. In the tensor case this can result in temporary type
538   // mismatches. Insert a `tensor.cast` op to propagate the transformation
539   // invariant that types are compatible.
540   Value def = fusedProducer->getResult(producerOpResult.getResultNumber());
541   Type consumerType = consumerOpOperand.get().getType();
542   if (consumerType != def.getType())
543     def = b.create<tensor::CastOp>(fusedProducer.getLoc(), consumerType, def);
544   consumerOpOperand.set(def);
545   return FusionInfo{cast<LinalgOp>(producerOpResult.getOwner()), fusedProducer};
546 }
547 
548 /// Prune all dimensions that are of reduction iterator type from `map`.
549 static AffineMap pruneReductionDimsFromMap(ArrayRef<Attribute> iteratorTypes,
550                                            AffineMap map) {
551   llvm::SmallDenseSet<unsigned> projectedDims;
552   for (auto attr : llvm::enumerate(iteratorTypes)) {
553     if (!isParallelIterator(attr.value()))
554       projectedDims.insert(attr.index());
555   }
556   return getProjectedMap(map, projectedDims);
557 }
558 
559 /// Returns the mapping from iterations in the consumer that write to the same
560 /// location as the iterations in the producer. To do so use
561 /// - indexing map of the fused view in the consumer : consumerIndexMap
562 /// - indexing map of the fused view in the producer : producerIndexMap
563 ///     consumerLoopToProducerLoop =
564 ///       inverse(producerIndexMap).compose(consumerIndexMap)
565 static Optional<AffineMap> getConsumerLoopToProducerLoopMap(
566     LinalgDependenceGraph::LinalgDependenceGraphElem dependence) {
567   auto producer = dyn_cast<LinalgOp>(dependence.getDependentOp());
568   if (!producer)
569     return None;
570 
571   Optional<AffineMap> producerIndexingMap =
572       dependence.getDependentOpViewIndexingMap();
573   Optional<AffineMap> consumerIndexingMap =
574       dependence.getIndexingOpViewIndexingMap();
575   if (!producerIndexingMap || !consumerIndexingMap)
576     return None;
577 
578   AffineMap prunedProducerIndexingMap = pruneReductionDimsFromMap(
579       producer.iterator_types().getValue(), *producerIndexingMap);
580   if (!prunedProducerIndexingMap.isPermutation())
581     return None;
582 
583   if (consumerIndexingMap->getNumResults() !=
584       prunedProducerIndexingMap.getNumResults())
585     return None;
586 
587   LLVM_DEBUG({
588     llvm::dbgs() << "\t producerMap : ";
589     producerIndexingMap->print(llvm::dbgs());
590     llvm::dbgs() << "  pruned : ";
591     prunedProducerIndexingMap.print(llvm::dbgs());
592     llvm::dbgs() << "\n";
593     llvm::dbgs() << "\t consumerMap : ";
594     consumerIndexingMap->print(llvm::dbgs());
595     llvm::dbgs() << "\n";
596   });
597 
598   AffineMap invProducerIndexMap = inversePermutation(prunedProducerIndexingMap);
599   if (!invProducerIndexMap)
600     return None;
601 
602   return invProducerIndexMap.compose(*consumerIndexingMap);
603 }
604 
605 /// Given a projected permutation `map`, returns true if the map changes the
606 /// order in which the fused loop dimension appear.
607 static bool doesTransposeAccess(AffineMap map,
608                                 const std::set<unsigned> &fusableLoops) {
609   Optional<unsigned> lastFusableLoop;
610   for (unsigned pos : llvm::map_range(map.getResults(), [](AffineExpr expr) {
611          return expr.cast<AffineDimExpr>().getPosition();
612        })) {
613     if (!fusableLoops.count(pos))
614       continue;
615     if (!lastFusableLoop) {
616       lastFusableLoop = pos;
617       continue;
618     }
619     if (pos <= lastFusableLoop.getValue())
620       return true;
621     lastFusableLoop = pos;
622   }
623   return false;
624 }
625 
626 /// Returns the positions of the loop in `op` that can be tiled based on the
627 /// operations that are to be fused with it. For example, in a
628 ///
629 ///   linalg.matmul ins(%a, %b : ...) outs(%c : ...)
630 ///
631 /// if the producer of %a needs to be fused with this op, only the `i` loop of
632 /// the matmul can be tiled while fusing. If producer of %a, and %b are to be
633 /// fused, then no loops can be tiled while fusing. The conditions used are:
634 /// 1. Only parallel loops can be used for tile + fuse. Find the number of
635 ///    common outer parallel loops between the op and its producers being fused.
636 /// 2. Of the parallel loops only some can be fused. Only those loops can be
637 ///    fused such where the fusable loops iteration space only touches one tile
638 ///    of the fused operation. This is because the producer (which is writing
639 ///    the fused subview) has update semantics.
640 ///
641 /// Since an inverse computation is needed, we need to consider the projection
642 /// of the producerIndexMap w.r.t the parallel loops.  The actual fusable loops
643 /// are the dimensions of the consumerLoopToProducerLoop map that correspond to
644 /// parallel loops and appear in the result of the map
645 ///
646 /// Example 1:
647 ///   linalg.fill(%cst, %c)
648 ///   linalg.matmul ins(%a, %b) outs(%c)
649 ///     Number of parallel loops : 2
650 ///     producerIndexMap = affine_map<(i, j) ->(i , j)>
651 ///     consumerIndexMap = affine_map<(i, j, k) -> (i, j)>
652 ///     consumerLoopToProducerLoop = affine_map<(i, j, k) -> (i, j)>
653 ///     Fused dimensions : i, j
654 ///
655 /// Example 2:
656 ///   linalg.matmul ins(%a, %b) outs(%c)
657 ///   linalg.generic {indexing_maps = [affine_map<(i, j) -> (j, i)>, ...
658 ///                   iterator_types = ["parallel", "parallel"]}
659 ///     ins(%c) ...
660 ///
661 ///     Number of parallel loops = 2:
662 ///     producerIndexMap (projected to parallel loops) =
663 ///       affine_map<(i, j) -> (i, j)>
664 ///     consumerLoopToProducerLoop2 = affine_map<(i, j) -> (j, i)>
665 ///     Fused dimensions : i, j
666 ///
667 /// Example 3:
668 ///   linalg.copy(%s, %b)
669 ///   linalg.matmul ins(%a, %b) outs(%c)
670 ///
671 ///   Number of parallel loops = 2
672 ///   produceIndexMap : affine_map<(i, j) -> (i, j)>
673 ///   consumerLoopToProduceLoops = affine_map<(i, j, k) -> (k, j)>
674 ///     submap with only parallel loops = affine_map<(i, j) -> (j)>
675 ///   Fused dimensions : j
676 static std::set<unsigned>
677 collectFusableLoops(ArrayRef<LinalgOp> ops,
678                     const FusableOpDependencesTy &fusableDependences) {
679   assert(!ops.empty());
680   auto getNumOuterParallelLoops = [](LinalgOp linalgOp) {
681     return linalgOp.iterator_types()
682         .getValue()
683         .take_while([](Attribute attr) -> bool {
684           return attr.cast<StringAttr>().getValue() ==
685                  getParallelIteratorTypeName();
686         })
687         .size();
688   };
689 
690   size_t numOuterParallelLoops = getNumOuterParallelLoops(ops.back());
691   for (auto op : ops.drop_back()) {
692     numOuterParallelLoops =
693         std::min(numOuterParallelLoops, getNumOuterParallelLoops(op));
694   }
695 
696   std::set<unsigned> fusableLoops;
697   auto range = llvm::seq<unsigned>(0, numOuterParallelLoops);
698   fusableLoops.insert(range.begin(), range.end());
699 
700   for (auto op : reverse(ops)) {
701     for (auto dependence : fusableDependences.lookup(op)) {
702       LLVM_DEBUG({
703         llvm::dbgs() << "\t fusable :";
704         for (unsigned i : fusableLoops)
705           llvm::dbgs() << " " << i;
706         llvm::dbgs() << "\n";
707       });
708 
709       Optional<AffineMap> consumerLoopToProducerLoop =
710           getConsumerLoopToProducerLoopMap(dependence);
711       if (!consumerLoopToProducerLoop) {
712         op.emitRemark("failed to get map from consumer loop to producer loop");
713         return {};
714       }
715       // todo: This condition is only an implementation limitation. When fusing
716       // the operation, if the accesses in the producer/consumer are transposes
717       // of each other, the loop bounds for the tiled producer can be
718       // manipulated accordingly. This requires some additional bookkeeping in
719       // the implementation of tile+fuse that is deferred to later.
720       if (doesTransposeAccess(*consumerLoopToProducerLoop, fusableLoops)) {
721         op.emitRemark("unhandled fusion when fusion requires permutation");
722         return {};
723       }
724 
725       std::set<unsigned> candidates;
726       for (AffineExpr expr : consumerLoopToProducerLoop->getResults()) {
727         unsigned position = expr.cast<AffineDimExpr>().getPosition();
728         if (fusableLoops.count(position))
729           candidates.insert(position);
730       }
731       LLVM_DEBUG({
732         llvm::dbgs() << "\t candidates :";
733         for (unsigned i : candidates)
734           llvm::dbgs() << " " << i;
735         llvm::dbgs() << "\n";
736       });
737       if (candidates.empty())
738         return {};
739       std::swap(candidates, fusableLoops);
740     }
741   }
742 
743   return fusableLoops;
744 }
745 
746 /// Find all dependences that are fusable.
747 FusableOpDependencesTy mlir::linalg::findAllFusableDependences(
748     ArrayRef<LinalgOp> ops, const LinalgDependenceGraph &dependenceGraph) {
749   FusableOpDependencesTy fusableDependences;
750   DenseMap<Operation *, SmallVector<AffineMap, 1>> fusedProducerIndexingMap;
751   for (LinalgOp op : reverse(ops)) {
752     for (OpOperand *opOperand : op.getInputAndOutputOperands()) {
753       Optional<LinalgDependenceGraph::LinalgDependenceGraphElem>
754           fusableDependence = findFusableProducer(*opOperand, dependenceGraph);
755       if (!fusableDependence)
756         continue;
757       LinalgOp producerOp =
758           dyn_cast<LinalgOp>(fusableDependence->getDependentOp());
759       if (!producerOp)
760         continue;
761       // Do not fuse dependences that are to operations not in the same basic
762       // block. This avoid moving fused operations across loops that might
763       // themselves carry dependency making the fusion illegal.
764       if (producerOp->getBlock() != op->getBlock())
765         continue;
766 
767       // Make sure that the indexing map of the view used for fusion in the
768       // producer is a projected permutation.
769       Optional<AffineMap> producerMap =
770           fusableDependence->getDependentOpViewIndexingMap();
771       Optional<AffineMap> consumerMap =
772           fusableDependence->getIndexingOpViewIndexingMap();
773       assert(
774           consumerMap &&
775           "unable to find indexing map of operand/result of indexing OpView");
776       fusedProducerIndexingMap[producerOp.getOperation()].push_back(
777           *consumerMap);
778       if (!producerMap || !producerMap->isProjectedPermutation() ||
779           !consumerMap->isProjectedPermutation())
780         continue;
781 
782       fusableDependences[producerOp.getOperation()].push_back(
783           *fusableDependence);
784     }
785   }
786   // TODO: Currently fusion would not be legal if the fusable dependence is to
787   // the same producer but different indexing map in the consumer. Fix this, but
788   // in the meanwhile disallow such a fusion.
789   for (auto useIndexingMapsList : fusedProducerIndexingMap) {
790     AffineMap map1 = useIndexingMapsList.second.front();
791     for (AffineMap map2 :
792          ArrayRef<AffineMap>(useIndexingMapsList.second).drop_front()) {
793       if (map1 != map2) {
794         fusableDependences.erase(useIndexingMapsList.first);
795         break;
796       }
797     }
798   }
799   return fusableDependences;
800 }
801 
802 /// Tile the fused loops in the root operation, by setting the tile sizes for
803 /// all other loops to zero (those will be tiled later).
804 static Optional<TiledLinalgOp>
805 tileRootOperation(OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizeVector,
806                   const LinalgTilingOptions &options,
807                   const std::set<unsigned> &fusedLoops) {
808   SmallVector<Value, 4> tileSizes(tileSizeVector.begin(), tileSizeVector.end());
809   auto zero = b.create<ConstantIndexOp>(op.getLoc(), 0);
810   for (unsigned i = 0, e = tileSizes.size(); i != e; ++i)
811     if (!fusedLoops.count(i))
812       tileSizes[i] = zero;
813   LinalgTilingOptions tileFusedLoopsOptions = options;
814   tileFusedLoopsOptions.setTileSizes(tileSizes);
815   return tileLinalgOp(b, op, tileFusedLoopsOptions);
816 }
817 
818 /// Fuse the operations in `fusionCandidates` with `tiledOp`. Latter is expected
819 /// to be a tiled operation such that it is valid to fuse all operations in
820 /// `fusionCandidates`, i.e. move the operation within the inter-tile loops of
821 /// `tiledOp`.
822 static SmallVector<LinalgOp, 1>
823 fuseOperations(OpBuilder &b, LinalgOp rootOp, TiledLinalgOp tiledLinalgOp,
824                ArrayRef<LinalgOp> fusionCandidates,
825                const FusableOpDependencesTy &fusableDependences,
826                const std::set<unsigned> &fusedLoops) {
827   LinalgOp tiledOp = tiledLinalgOp.op;
828   OpBuilder::InsertionGuard guard(b);
829   b.setInsertionPoint(tiledOp);
830 
831   DenseMap<unsigned, Range> fusedLoopsAndRanges;
832   for (unsigned loop : fusedLoops) {
833     ShapeDimension shapeDim = getShapeDefiningLoopRange(tiledOp, loop, true);
834     fusedLoopsAndRanges[loop] = getRangeFromOperandShape(
835         b, tiledOp.getLoc(), shapeDim.shape, shapeDim.dimension);
836   }
837 
838   SmallVector<LinalgOp, 1> fusedOps(fusionCandidates.size());
839   DenseMap<Operation *, LinalgOp> origOpToFusedOp;
840   origOpToFusedOp[rootOp.getOperation()] = tiledOp;
841   for (auto candidate : enumerate(llvm::reverse(fusionCandidates))) {
842     LinalgOp origOp = candidate.value();
843     LinalgOp fusedOp = fuse(b, origOp, fusedLoopsAndRanges);
844     origOpToFusedOp[origOp.getOperation()] = fusedOp;
845     fusedOps[fusionCandidates.size() - candidate.index() - 1] = fusedOp;
846 
847     // Prepare the builder for the next insertion point.
848     auto guard = llvm::make_scope_exit([&]() { b.setInsertionPoint(fusedOp); });
849     if (!origOp.hasTensorSemantics())
850       continue;
851 
852     // If the producer consumer operations are linalg operations on tensors, the
853     // dependence is due to value produced (as a return tensor) by the producer
854     // and used in the consumer. The returned value of the fused op needs to be
855     // made the operand of the tiled/fused consumer operation. By construction
856     // the value returned by the producer is the value used by the consumer.
857     for (auto &dependence : fusableDependences.lookup(origOp.getOperation())) {
858       if (dependence.dependenceType !=
859           LinalgDependenceGraph::DependenceType::RAW)
860         continue;
861 
862       unsigned resultIndex =
863           dependence.getDependentOpViewResultNum().getValue();
864       LinalgOp consumer = origOpToFusedOp.lookup(dependence.getIndexingOp());
865       if (!consumer)
866         continue;
867 
868       Value replacementValue = fusedOp.getOperation()->getResult(resultIndex);
869       consumer.getOperation()->setOperand(
870           dependence.getIndexingOpViewOperandNum().getValue(),
871           replacementValue);
872     }
873 
874     // At this point, all Linalg uses of the tensors produced by `origOp` have
875     // been replaced. However, there may still be "output tensor"-like uses
876     // coming from WAW dependencies.
877     // All these uses are iter_args of the outermost loop (TODO: add a check).
878     // Such iter_args uses serve 2 purposes:
879     //  1. give a shape to the output
880     //  2. encode destructive updates that may be inplaceable by bufferization.
881     // To keep the second type of information while letting the unfused op die
882     // unused, we need to forward the producer output operand.
883     if (auto forOp = dyn_cast<scf::ForOp>(tiledLinalgOp.loops.front())) {
884       for (auto &operand : forOp.getIterOpOperands()) {
885         if (auto opResult = operand.get().dyn_cast<OpResult>()) {
886           if (opResult.getOwner() == origOp) {
887             Value output =
888                 origOp.getOutputOperand(opResult.getResultNumber())->get();
889             assert(output.getType().isa<RankedTensorType>());
890             operand.set(output);
891           }
892         }
893       }
894     }
895   }
896   return fusedOps;
897 }
898 
899 static Optional<TiledAndFusedLinalgOps>
900 tileAndFuseLinalgOpsImpl(OpBuilder &b, ArrayRef<LinalgOp> ops,
901                          const LinalgDependenceGraph &dependenceGraph,
902                          const LinalgTilingOptions &tilingOptions) {
903   if (ops.size() < 2)
904     return llvm::None;
905   LinalgOp rootOp = ops.back();
906   if (!llvm::all_of(
907           ops,
908           [](LinalgOp linalgOp) { return linalgOp.hasBufferSemantics(); }) &&
909       !llvm::all_of(ops, [](LinalgOp linalgOp) {
910         return linalgOp.hasTensorSemantics();
911       })) {
912     rootOp.emitError(
913         "unable to fuse operations that have tensor semantics with operations "
914         "that have buffer semantics and viceversa.");
915     return llvm::None;
916   }
917   // TODO: Support interchange with tile + fuse. This might actually help do
918   // better fusion.
919   if (!tilingOptions.interchangeVector.empty()) {
920     rootOp.emitRemark("unable to handle tile and fuse with interchange");
921     return llvm::None;
922   }
923 
924   OpBuilder::InsertionGuard guard(b);
925   b.setInsertionPoint(rootOp);
926 
927   // Find all the producers.
928   LLVM_DEBUG(llvm::dbgs() << "findAllFusableDependences\n");
929   FusableOpDependencesTy fusableDependences =
930       findAllFusableDependences(ops, dependenceGraph);
931   if (fusableDependences.empty()) {
932     LLVM_DEBUG(llvm::dbgs() << "no fusable dependencies found\n");
933     return llvm::None;
934   }
935 
936   TiledAndFusedLinalgOps ret;
937   // Find the loops that can be tiled and fused.
938   LLVM_DEBUG(llvm::dbgs() << "collectFusableLoops\n");
939   ret.fusedLoopDims = collectFusableLoops(ops, fusableDependences);
940 
941   // If there are no fusable dependences or there are no tile+fusable loops,
942   // just return.
943   if (ret.fusedLoopDims.empty()) {
944     LLVM_DEBUG(llvm::dbgs() << "no fusable loops found\n");
945     return llvm::None;
946   }
947 
948   // Tile the fused loops in the last operation in the list.
949   SmallVector<Value, 4> tileSizeVector =
950       tilingOptions.tileSizeComputationFunction(b, rootOp);
951   Optional<TiledLinalgOp> tiledRootOp = tileRootOperation(
952       b, rootOp, tileSizeVector, tilingOptions, ret.fusedLoopDims);
953   if (!tiledRootOp) {
954     rootOp.emitRemark("failed to tile the fused loops");
955     return llvm::None;
956   }
957   ret.op = tiledRootOp->op;
958   ret.fusedLoops.assign(tiledRootOp->loops.begin(), tiledRootOp->loops.end());
959 
960   // Fuse the other operations into the fused inter-tile loops produced above.
961   ret.fusedProducers = fuseOperations(b, rootOp, *tiledRootOp, ops.drop_back(),
962                                       fusableDependences, ret.fusedLoopDims);
963 
964   return ret;
965 }
966 
967 Optional<TiledAndFusedLinalgOps>
968 mlir::linalg::tileAndFuseLinalgOps(OpBuilder &b, ArrayRef<LinalgOp> ops,
969                                    const LinalgDependenceGraph &dependenceGraph,
970                                    const LinalgTilingOptions &tilingOptions) {
971   switch (tilingOptions.loopType) {
972   case LinalgTilingLoopType::Loops:
973   case LinalgTilingLoopType::ParallelLoops:
974   case LinalgTilingLoopType::TiledLoops:
975     return tileAndFuseLinalgOpsImpl(b, ops, dependenceGraph, tilingOptions);
976   default:;
977   }
978   return llvm::None;
979 }
980